diff options
400 files changed, 55769 insertions, 0 deletions
diff --git a/Android.bp b/Android.bp new file mode 100644 index 0000000000..836c6ce688 --- /dev/null +++ b/Android.bp @@ -0,0 +1,160 @@ +// +// Copyright © 2017 ARM Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +//////////////////////////////////////////// +// // +// libarmnn.a // +// // +//////////////////////////////////////////// +cc_library_static { + name: "libarmnn", + export_include_dirs: ["include", + "src/armnnUtils"], + local_include_dirs: ["src/armnn"], + srcs: [ + "src/armnnUtils/Logging.cpp", + "src/armnnUtils/Permute.cpp", + "src/armnn/backends/ArmComputeTensorUtils.cpp", + "src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClActivationUint8Workload.cpp", + "src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.cpp", + "src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClConstantUint8Workload.cpp", + "src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp", + "src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.cpp", + "src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClMergerUint8Workload.cpp", + "src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClPermuteWorkload.cpp", + "src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.cpp", + "src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.cpp", + "src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.cpp", + "src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.cpp", + "src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.cpp", + "src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonActivationUint8Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonConstantUint8Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp", + "src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonMergerUint8Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonPermuteWorkload.cpp", + "src/armnn/backends/NeonWorkloads/NeonPooling2dBaseWorkload.cpp", + "src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonPooling2dUint8Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonReshapeUint8Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.cpp", + "src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.cpp", + "src/armnn/backends/ClWorkloadFactory.cpp", + "src/armnn/backends/CpuTensorHandle.cpp", + "src/armnn/backends/RefWorkloadFactory.cpp", + "src/armnn/backends/RefWorkloads/RefMergerUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/FullyConnected.cpp", + "src/armnn/backends/RefWorkloads/RefFullyConnectedFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/Multiplication.cpp", + "src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.cpp", + "src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefBatchNormalizationFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/Broadcast.cpp", + "src/armnn/backends/RefWorkloads/Addition.cpp", + "src/armnn/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/ResizeBilinear.cpp", + "src/armnn/backends/RefWorkloads/RefSoftmaxUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefPooling2dUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefFloorFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/ConvImpl.cpp", + "src/armnn/backends/RefWorkloads/Activation.cpp", + "src/armnn/backends/RefWorkloads/RefReshapeUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefConvolution2dFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefConvolution2dUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefSplitterFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefActivationUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefSplitterUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefPooling2dFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefReshapeFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefNormalizationFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/Softmax.cpp", + "src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefConstantUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefConstantFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/Pooling2d.cpp", + "src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefMergerFloat32Workload.cpp", + "src/armnn/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp", + "src/armnn/backends/RefWorkloads/RefPermuteWorkload.cpp", + "src/armnn/backends/MemCopyWorkload.cpp", + "src/armnn/backends/WorkloadData.cpp", + "src/armnn/backends/WorkloadFactory.cpp", + "src/armnn/Descriptors.cpp", + "src/armnn/Exceptions.cpp", + "src/armnn/Graph.cpp", + "src/armnn/Optimizer.cpp", + "src/armnn/Runtime.cpp", + "src/armnn/InternalTypes.cpp", + "src/armnn/Layer.cpp", + "src/armnn/Layers.cpp", + "src/armnn/LoadedNetwork.cpp", + "src/armnn/Network.cpp", + "src/armnn/backends/OutputHandler.cpp", + "src/armnn/Profiling.cpp", + "src/armnn/Tensor.cpp", + "src/armnn/Utils.cpp", + "src/armnn/LayerSupport.cpp", + "src/armnn/backends/RefLayerSupport.cpp", + "src/armnn/backends/ClLayerSupport.cpp", + "src/armnn/backends/NeonLayerSupport.cpp", + "src/armnn/backends/NeonWorkloadUtils.cpp", + "src/armnn/backends/NeonWorkloadFactory.cpp" + ], + static_libs: [ + "armnn-arm_compute", + "libboost_log", + "libboost_system", + "libboost_thread"], + stl: "libc++", + cppflags: [ + "-fexceptions", + "-std=c++14", + "-DARMCOMPUTECL_ENABLED", + "-DARMCOMPUTENEON_ENABLED", + "-Wno-unused-parameter", + ], + rtti: true, +} diff --git a/Android.mk b/Android.mk new file mode 100644 index 0000000000..4b97ca99bf --- /dev/null +++ b/Android.mk @@ -0,0 +1,84 @@ +# +# Copyright © 2017 ARM Ltd. All rights reserved. +# See LICENSE file in the project root for full license information. +# + +LOCAL_PATH := $(call my-dir) + +# Configure these paths if you move the source or Khronos headers +# +OPENCL_HEADER_PATH := $(LOCAL_PATH)/../../mali/product/khronos/original +NN_HEADER_PATH := $(LOCAL_PATH)/../../../../frameworks/ml/nn/runtime/include +ARMNN_HEADER_PATH := $(LOCAL_PATH)/include +ARMNN_SOURCE_HEADER_PATH := $(LOCAL_PATH)/src/armnn + +include $(CLEAR_VARS) + +LOCAL_C_INCLUDES := \ + $(OPENCL_HEADER_PATH) \ + $(NN_HEADER_PATH) \ + $(ARMNN_HEADER_PATH) \ + $(ARMNN_SOURCE_HEADER_PATH) + +LOCAL_CFLAGS := \ + -std=c++14 \ + -fexceptions \ + -frtti \ + -isystem vendor/arm/android-nn-driver/boost_1_64_0 \ + -DARMCOMPUTECL_ENABLED \ + -DARMCOMPUTENEON_ENABLED + +LOCAL_SRC_FILES := \ + src/armnn/test/UnitTests.cpp \ + src/armnn/test/EndToEndTest.cpp \ + src/armnn/test/UtilsTests.cpp \ + src/armnn/test/GraphTests.cpp \ + src/armnn/test/RuntimeTests.cpp \ + src/armnn/test/TensorTest.cpp \ + src/armnn/test/Network_test.cpp \ + src/armnn/backends/test/IsLayerSupportedTest.cpp \ + src/armnn/backends/test/Reference.cpp \ + src/armnn/backends/test/WorkloadDataValidation.cpp \ + src/armnn/backends/test/TensorCopyUtils.cpp \ + src/armnn/backends/test/LayerTests.cpp \ + src/armnn/backends/test/CreateWorkloadRef.cpp \ + src/armnn/backends/test/ArmComputeCl.cpp \ + src/armnn/backends/test/ArmComputeNeon.cpp \ + src/armnn/backends/test/CreateWorkloadCl.cpp \ + src/armnn/backends/test/CreateWorkloadNeon.cpp \ + src/armnn/backends/test/MemCopyTests.cpp + +LOCAL_STATIC_LIBRARIES := \ + libneuralnetworks_common \ + libarmnn \ + libboost_log \ + libboost_system \ + libboost_unit_test_framework \ + libboost_thread \ + armnn-arm_compute + +LOCAL_SHARED_LIBRARIES := \ + libbase \ + libhidlbase \ + libhidltransport \ + libhidlmemory \ + liblog \ + libutils \ + android.hardware.neuralnetworks@1.0 \ + android.hidl.allocator@1.0 \ + android.hidl.memory@1.0 \ + libOpenCL + +LOCAL_MODULE := armnn-tests + +LOCAL_MODULE_TAGS := eng optional + +LOCAL_ARM_MODE := arm + +# Mark source files as dependent on Android.mk +LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk + +include $(BUILD_EXECUTABLE) + + + diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000000..d09e549106 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,488 @@ +cmake_minimum_required (VERSION 3.0.2) # 3.0.2 required for return() statement used in AddDllCopyCommands.cmake. +project(armnn) + +set(additional_cmake_files) +list(APPEND additional_cmake_files + cmake/Utils.cmake + cmake/GlobalConfig.cmake + cmake/AddDllCopyCommands.cmake) + +foreach(cmake_file ${additional_cmake_files}) + include(${cmake_file}) +endforeach() + +if (BUILD_TESTS) + add_subdirectory(tests) +endif() + +# Include the additional cmake files in their own target so that they will appear nicely in IDEs +add_custom_target(AdditionalCMakeFiles SOURCES ${additional_cmake_files}) + +set(armnnUtils_sources) +list(APPEND armnnUtils_sources + src/armnnUtils/GraphTopologicalSort.hpp + src/armnnUtils/Logging.hpp + src/armnnUtils/Permute.hpp + src/armnnUtils/Logging.cpp + src/armnnUtils/Permute.cpp + ) +if(BUILD_CAFFE_PARSER) + list(APPEND armnnUtils_sources + src/armnnUtils/ParserPrototxtFixture.hpp + ) +endif() + +add_library_ex(armnnUtils STATIC ${armnnUtils_sources}) + +if(BUILD_CAFFE_PARSER) + # ArmNN Parser source files required for all build options + set(armnn_caffe_parser_sources) + list(APPEND armnn_caffe_parser_sources + include/armnnCaffeParser/ICaffeParser.hpp + src/armnnCaffeParser/CaffeParser.hpp + src/armnnCaffeParser/CaffeParser.cpp + ${CAFFE_GENERATED_SOURCES}/caffe/proto/caffe.pb.cc + ) + # The generated Caffe protobuf .cc file is not warning clean and we can't fix them. + if(COMPILER_IS_GNU_LIKE) + set_source_files_properties(${CAFFE_GENERATED_SOURCES}/caffe/proto/caffe.pb.cc PROPERTIES COMPILE_FLAGS "-Wno-conversion -Wno-sign-conversion") + endif() + + add_library_ex(armnnCaffeParser SHARED ${armnn_caffe_parser_sources}) + set_target_properties(armnnCaffeParser PROPERTIES COMPILE_FLAGS "${CAFFE_PARSER_ADDITIONAL_COMPILE_FLAGS}") + + target_include_directories(armnnCaffeParser PRIVATE src/armnnUtils) + + target_link_libraries(armnnCaffeParser ${Boost_LOG_LIBRARY} ${Boost_THREAD_LIBRARY} ${Boost_SYSTEM_LIBRARY}) + + target_link_libraries(armnnCaffeParser armnn) + target_link_libraries(armnnCaffeParser ${PROTOBUF_LIBRARIES}) +endif() + +# ArmNN source files required for all build options +list(APPEND armnn_sources + include/armnn/ArmNN.hpp + include/armnn/Descriptors.hpp + include/armnn/DescriptorsFwd.hpp + include/armnn/IRuntime.hpp + include/armnn/INetwork.hpp + include/armnn/Tensor.hpp + include/armnn/TensorFwd.hpp + include/armnn/Types.hpp + include/armnn/TypesUtils.hpp + include/armnn/Utils.hpp + include/armnn/LayerSupport.hpp + include/armnn/Version.hpp + src/armnn/backends/ClWorkloadFactory.hpp + src/armnn/backends/ClWorkloadFactory.cpp + src/armnn/backends/ClLayerSupport.cpp + src/armnn/backends/ClLayerSupport.hpp + src/armnn/backends/CpuTensorHandleFwd.hpp + src/armnn/backends/CpuTensorHandle.hpp + src/armnn/backends/CpuTensorHandle.cpp + src/armnn/backends/RefWorkloadFactory.cpp + src/armnn/backends/RefWorkloadFactory.hpp + src/armnn/backends/RefLayerSupport.cpp + src/armnn/backends/RefLayerSupport.hpp + src/armnn/backends/MakeWorkloadHelper.hpp + src/armnn/backends/NeonWorkloadFactory.cpp + src/armnn/backends/NeonWorkloadFactory.hpp + src/armnn/backends/NeonLayerSupport.cpp + src/armnn/backends/NeonLayerSupport.hpp + src/armnn/backends/Workload.hpp + src/armnn/backends/WorkloadDataFwd.hpp + src/armnn/backends/WorkloadData.hpp + src/armnn/backends/WorkloadData.cpp + src/armnn/backends/WorkloadFactory.hpp + src/armnn/backends/WorkloadFactory.cpp + src/armnn/backends/WorkloadInfo.hpp + src/armnn/backends/MemCopyWorkload.cpp + src/armnn/backends/MemCopyWorkload.hpp + src/armnn/backends/RefWorkloads/Broadcast.hpp + src/armnn/backends/RefWorkloads/Broadcast.cpp + src/armnn/backends/RefWorkloads/RefMergerUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefConstantUint8Workload.hpp + src/armnn/backends/RefWorkloads/Addition.hpp + src/armnn/backends/RefWorkloads/ConvImpl.hpp + src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.hpp + src/armnn/backends/RefWorkloads/FullyConnected.cpp + src/armnn/backends/RefWorkloads/RefFullyConnectedFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.hpp + src/armnn/backends/RefWorkloads/RefConvolution2dUint8Workload.hpp + src/armnn/backends/RefWorkloads/RefSplitterUint8Workload.hpp + src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.hpp + src/armnn/backends/RefWorkloads/Multiplication.cpp + src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.hpp + src/armnn/backends/RefWorkloads/Multiplication.hpp + src/armnn/backends/RefWorkloads/RefActivationUint8Workload.hpp + src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.cpp + src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefBatchNormalizationFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefPooling2dFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefWorkloadUtils.hpp + src/armnn/backends/RefWorkloads/RefMergerUint8Workload.hpp + src/armnn/backends/RefWorkloads/RefFullyConnectedFloat32Workload.hpp + src/armnn/backends/RefWorkloads/Softmax.hpp + src/armnn/backends/RefWorkloads/RefMergerFloat32Workload.hpp + src/armnn/backends/RefWorkloads/Addition.cpp + src/armnn/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp + src/armnn/backends/RefWorkloads/TensorBufferArrayView.hpp + src/armnn/backends/RefWorkloads/ResizeBilinear.cpp + src/armnn/backends/RefWorkloads/RefBatchNormalizationFloat32Workload.hpp + src/armnn/backends/RefWorkloads/Splitter.hpp + src/armnn/backends/RefWorkloads/RefFullyConnectedUint8Workload.hpp + src/armnn/backends/RefWorkloads/RefSoftmaxUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefReshapeFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dUint8Workload.hpp + src/armnn/backends/RefWorkloads/FullyConnected.hpp + src/armnn/backends/RefWorkloads/RefFloorFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefPooling2dUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefFloorFloat32Workload.cpp + src/armnn/backends/RefWorkloads/ConvImpl.cpp + src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefSoftmaxUint8Workload.hpp + src/armnn/backends/RefWorkloads/RefReshapeUint8Workload.hpp + src/armnn/backends/RefWorkloads/Activation.cpp + src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.hpp + src/armnn/backends/RefWorkloads/RefReshapeUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefConvolution2dFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.hpp + src/armnn/backends/RefWorkloads/RefConvolution2dUint8Workload.cpp + src/armnn/backends/RefWorkloads/ResizeBilinear.hpp + src/armnn/backends/RefWorkloads/RefNormalizationFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefSplitterFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefActivationUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefPooling2dUint8Workload.hpp + src/armnn/backends/RefWorkloads/BatchNormImpl.hpp + src/armnn/backends/RefWorkloads/RefSplitterUint8Workload.cpp + src/armnn/backends/RefWorkloads/Activation.hpp + src/armnn/backends/RefWorkloads/RefPooling2dFloat32Workload.cpp + src/armnn/backends/RefWorkloads/Merger.hpp + src/armnn/backends/RefWorkloads/RefSplitterFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefConstantFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefReshapeFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefNormalizationFloat32Workload.cpp + src/armnn/backends/RefWorkloads/Softmax.cpp + src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefConstantUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefConstantFloat32Workload.cpp + src/armnn/backends/RefWorkloads/Pooling2d.cpp + src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp + src/armnn/backends/RefWorkloads/RefConvolution2dFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefMergerFloat32Workload.cpp + src/armnn/backends/RefWorkloads/Pooling2d.hpp + src/armnn/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp + src/armnn/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.hpp + src/armnn/backends/RefWorkloads/RefPermuteWorkload.hpp + src/armnn/backends/RefWorkloads/RefPermuteWorkload.cpp + src/armnn/InternalTypes.hpp + src/armnn/InternalTypes.cpp + src/armnn/LayerFwd.hpp + src/armnn/Layer.hpp + src/armnn/Layer.cpp + src/armnn/LayersFwd.hpp + src/armnn/Layers.hpp + src/armnn/Layers.cpp + src/armnn/Runtime.hpp + src/armnn/Runtime.cpp + src/armnn/Descriptors.cpp + src/armnn/LoadedNetwork.hpp + src/armnn/LoadedNetwork.cpp + src/armnn/Exceptions.cpp + src/armnn/Graph.hpp + src/armnn/Graph.cpp + src/armnn/Network.hpp + src/armnn/Network.cpp + src/armnn/backends/OutputHandler.hpp + src/armnn/backends/OutputHandler.cpp + src/armnn/Profiling.cpp + src/armnn/Tensor.cpp + src/armnn/Utils.cpp + src/armnn/LayerSupport.cpp + src/armnn/LayerSupportCommon.hpp + src/armnn/optimizations/All.hpp + src/armnn/optimizations/MovePermuteUp.hpp + src/armnn/optimizations/Optimization.hpp + src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp + src/armnn/optimizations/OptimizeInversePermutes.hpp + src/armnn/optimizations/PermuteAsReshape.hpp + src/armnn/optimizations/SquashEqualSiblings.hpp + src/armnn/Optimizer.hpp + src/armnn/Optimizer.cpp + ) + +if(ARMCOMPUTENEON) + # Additionally include source files for ARM Compute NEON backend + list(APPEND armnn_sources + src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonActivationUint8Workload.cpp + src/armnn/backends/NeonWorkloads/NeonActivationUint8Workload.hpp + src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonBaseConstantWorkload.hpp + src/armnn/backends/NeonWorkloads/NeonBaseMergerWorkload.hpp + src/armnn/backends/NeonWorkloads/NeonBaseSplitterWorkload.hpp + src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonConstantUint8Workload.cpp + src/armnn/backends/NeonWorkloads/NeonConstantUint8Workload.hpp + src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp + src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp + src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.cpp + src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.hpp + src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonMergerUint8Workload.cpp + src/armnn/backends/NeonWorkloads/NeonMergerUint8Workload.hpp + src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonPermuteWorkload.cpp + src/armnn/backends/NeonWorkloads/NeonPermuteWorkload.hpp + src/armnn/backends/NeonWorkloads/NeonPooling2dBaseWorkload.cpp + src/armnn/backends/NeonWorkloads/NeonPooling2dBaseWorkload.hpp + src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonPooling2dUint8Workload.cpp + src/armnn/backends/NeonWorkloads/NeonPooling2dUint8Workload.hpp + src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonReshapeUint8Workload.cpp + src/armnn/backends/NeonWorkloads/NeonReshapeUint8Workload.hpp + src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp + src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.hpp + src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.cpp + src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.hpp + src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.cpp + src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.hpp + src/armnn/backends/NeonWorkloadUtils.cpp + src/armnn/backends/NeonWorkloadUtils.hpp + src/armnn/backends/NeonTensorHandle.hpp) +endif() +if(ARMCOMPUTECL) + # Additionally include source files for ARM Compute OpenCL backend + list(APPEND armnn_sources + src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClActivationUint8Workload.cpp + src/armnn/backends/ClWorkloads/ClActivationUint8Workload.hpp + src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.cpp + src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.hpp + src/armnn/backends/ClWorkloads/ClBaseMergerWorkload.hpp + src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClConstantUint8Workload.cpp + src/armnn/backends/ClWorkloads/ClConstantUint8Workload.hpp + src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp + src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.hpp + src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.cpp + src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.hpp + src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionHelper.hpp + src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClMergerUint8Workload.cpp + src/armnn/backends/ClWorkloads/ClMergerUint8Workload.hpp + src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClPermuteWorkload.cpp + src/armnn/backends/ClWorkloads/ClPermuteWorkload.hpp + src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.cpp + src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.hpp + src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.cpp + src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.hpp + src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.cpp + src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.hpp + src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.cpp + src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.hpp + src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.cpp + src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.hpp + src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.cpp + src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.hpp + src/armnn/backends/ClWorkloadUtils.hpp + src/armnn/backends/ClTensorHandle.hpp) +endif() +# Files shared by all ARM Compute backends +if(ARMCOMPUTENEON OR ARMCOMPUTECL) + list(APPEND armnn_sources + src/armnn/backends/ArmComputeTensorUtils.hpp + src/armnn/backends/ArmComputeTensorUtils.cpp + src/armnn/backends/ArmComputeUtils.hpp) +endif() + +# Files used for Streamline-based profiling backend +if(PROFILING_BACKEND_STREAMLINE) + list(APPEND armnn_sources + ${GATOR_ROOT}/annotate/streamline_annotate.h + ${GATOR_ROOT}/annotate/streamline_annotate.c) +endif() + +add_library_ex(armnn SHARED ${armnn_sources}) +target_include_directories(armnn PRIVATE src/armnn) +target_include_directories(armnn PRIVATE src/armnnUtils) +target_link_libraries(armnn armnnUtils) +target_link_libraries(armnn ${CMAKE_DL_LIBS}) + +install(TARGETS armnn DESTINATION ${CMAKE_INSTALL_PREFIX}/lib) +if(BUILD_CAFFE_PARSER) + install(TARGETS armnnCaffeParser DESTINATION ${CMAKE_INSTALL_PREFIX}/lib) +endif() +install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_PREFIX}/include) + +target_link_libraries(armnn ${Boost_LOG_LIBRARY} ${Boost_THREAD_LIBRARY} ${Boost_SYSTEM_LIBRARY}) + +if(ARMCOMPUTENEON OR ARMCOMPUTECL) + target_link_libraries(armnn ${ARMCOMPUTE_LIBRARIES}) +endif() +if(ARMCOMPUTECL) + target_link_libraries(armnn ${OPENCL_LIBRARIES}) +endif() + +if(PROFILING_BACKEND_STREAMLINE) + target_link_libraries(armnn pthread) +endif() + + +if(BUILD_UNIT_TESTS) + set(unittest_sources) + list(APPEND unittest_sources + src/armnn/test/UnitTests.cpp + src/armnn/test/UnitTests.hpp + src/armnn/test/EndToEndTest.cpp + src/armnn/test/UtilsTests.cpp + src/armnn/test/GraphTests.cpp + src/armnn/test/RuntimeTests.cpp + src/armnn/test/CreateWorkload.hpp + src/armnn/test/TensorTest.cpp + src/armnn/test/TensorHelpers.hpp + src/armnn/test/Network_test.cpp + src/armnn/test/GraphUtils.hpp + src/armnn/backends/test/IsLayerSupportedTest.cpp + src/armnn/backends/test/IsLayerSupportedTestImpl.hpp + src/armnn/backends/test/Reference.cpp + src/armnn/backends/test/WorkloadDataValidation.cpp + src/armnn/backends/test/TensorCopyUtils.hpp + src/armnn/backends/test/TensorCopyUtils.cpp + src/armnn/backends/test/LayerTests.hpp + src/armnn/backends/test/LayerTests.cpp + src/armnn/backends/test/Conv2dTestImpl.hpp + src/armnn/backends/test/ActivationTestImpl.hpp + src/armnn/backends/test/ActivationFixture.hpp + src/armnn/backends/test/Pooling2dTestImpl.hpp + src/armnn/backends/test/ReshapeTestImpl.hpp + src/armnn/backends/test/PermuteTestImpl.hpp + src/armnn/backends/test/FullyConnectedTestImpl.hpp + src/armnn/backends/test/SplitterTestImpl.hpp + src/armnn/backends/test/NormTestImpl.hpp + src/armnn/backends/test/BatchNormTestImpl.hpp + src/armnn/backends/test/WorkloadTestUtils.hpp + src/armnn/backends/test/CreateWorkloadRef.cpp + src/armnn/backends/test/QuantizeHelper.hpp) + + if(ARMCOMPUTENEON) + list(APPEND unittest_sources + src/armnn/backends/test/ArmComputeNeon.cpp + src/armnn/backends/test/CreateWorkloadNeon.cpp + src/armnn/test/CreateWorkloadClNeon.hpp) + endif() + + if(ARMCOMPUTECL) + list(APPEND unittest_sources + src/armnn/backends/test/ArmComputeCl.cpp + src/armnn/backends/test/CreateWorkloadCl.cpp + src/armnn/test/CreateWorkloadClNeon.hpp) + endif() + + if(ARMCOMPUTENEON OR ARMCOMPUTECL) + list(APPEND unittest_sources + src/armnn/backends/test/MemCopyTests.cpp) + endif() + + if(BUILD_CAFFE_PARSER) + list(APPEND unittest_sources + src/armnnCaffeParser/test/TestAdd.cpp + src/armnnCaffeParser/test/TestConcat.cpp + src/armnnCaffeParser/test/TestDropout.cpp + src/armnnCaffeParser/test/TestInputs.cpp + src/armnnCaffeParser/test/TestMul.cpp + src/armnnCaffeParser/test/TestPooling2d.cpp + src/armnnCaffeParser/test/TestInPlace.cpp + src/armnnCaffeParser/test/TestMultiInputsOutputs.cpp + src/armnnCaffeParser/test/TestSplit.cpp + ) + endif() + + add_executable_ex(UnitTests ${unittest_sources}) + target_include_directories(UnitTests PRIVATE src/armnn) + target_include_directories(UnitTests PRIVATE src/armnnUtils) + + CHECK_INCLUDE_FILE(valgrind/memcheck.h VALGRIND_FOUND) + + if(VALGRIND_FOUND) + target_compile_definitions(UnitTests PRIVATE "WITH_VALGRIND=1") + endif() + + target_link_libraries(UnitTests armnn) + target_link_libraries(UnitTests ${CMAKE_THREAD_LIBS_INIT}) + target_link_libraries(UnitTests ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}) + + if(BUILD_CAFFE_PARSER) + target_link_libraries(UnitTests armnnCaffeParser) + endif() + + addDllCopyCommands(UnitTests) +endif() + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..18e83ec163 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) 2017 ARM Limited. + +SPDX-License-Identifier: MIT + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/README.md diff --git a/cmake/AddDllCopyCommands.cmake b/cmake/AddDllCopyCommands.cmake new file mode 100644 index 0000000000..b0f4ee0814 --- /dev/null +++ b/cmake/AddDllCopyCommands.cmake @@ -0,0 +1,74 @@ +macro (addDllCopyCommand target sourceDebug sourceRelease) + add_custom_command(TARGET ${target} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_if_different + "$<$<CONFIG:Debug>:${sourceDebug}>$<$<CONFIG:Release>:${sourceRelease}>$<$<CONFIG:RelWithDebInfo>:${sourceRelease}>$<$<CONFIG:MinSizeRel>:${sourceRelease}>" + $<TARGET_FILE_DIR:${target}>) +endmacro() + +macro (addBoostDllCopyCommand target ignored sourceReleaseLib ignored sourceDebugLib) + string(REGEX REPLACE ".lib$" ".dll" sourceReleaseDll ${sourceReleaseLib}) + string(REGEX REPLACE "/libboost" "/boost" sourceReleaseDll2 ${sourceReleaseDll}) + + string(REGEX REPLACE ".lib$" ".dll" sourceDebugDll ${sourceDebugLib}) + string(REGEX REPLACE "/libboost" "/boost" sourceDebugDll2 ${sourceDebugDll}) + addDllCopyCommand(${target} ${sourceDebugDll2} ${sourceReleaseDll2}) +endmacro() + +# Checks if the given list contains an entry which matches the given regex. +function(listContainsRegex result list regex) + set(${result} 0 PARENT_SCOPE) + foreach(element ${list}) + if(${element} MATCHES ${regex}) + set(${result} 1 PARENT_SCOPE) + return() + endif() + endforeach() +endfunction() + +macro(addDllCopyCommands target) + if(${CMAKE_CXX_COMPILER_ID} STREQUAL MSVC) + # Get the list of dependencies for the given target, so we can copy just the DLLs we need. + get_target_property(target_deps_str ${target} LINK_LIBRARIES) + set(target_deps) + list(APPEND target_deps ${target_deps_str}) + + cmake_policy(SET CMP0057 NEW) # Enable the "IN_LIST" operator + + # armnn.dll + if ("armnn" IN_LIST target_deps) + addDllCopyCommand(${target} "$<TARGET_FILE_DIR:armnn>/armnn.dll" "$<TARGET_FILE_DIR:armnn>/armnn.dll") + endif() + + # armnnCaffeParser.dll + if ("armnnCaffeParser" IN_LIST target_deps) + addDllCopyCommand(${target} "$<TARGET_FILE_DIR:armnnCaffeParser>/armnnCaffeParser.dll" + "$<TARGET_FILE_DIR:armnnCaffeParser>/armnnCaffeParser.dll") + addDllCopyCommand(${target} "${PROTOBUF_ROOT}/bin/libprotobufd.dll" + "${PROTOBUF_ROOT}/bin/libprotobuf.dll") + endif() + + # armnnTfParser.dll + if ("armnnTfParser" IN_LIST target_deps) + addDllCopyCommand(${target} "$<TARGET_FILE_DIR:armnnTfParser>/armnnTfParser.dll" + "$<TARGET_FILE_DIR:armnnTfParser>/armnnTfParser.dll") + addDllCopyCommand(${target} "${PROTOBUF_ROOT}/bin/libprotobufd.dll" + "${PROTOBUF_ROOT}/bin/libprotobuf.dll") + endif() + + # caffe.dll and its dependencies + listContainsRegex(includeCaffeDlls "${target_deps}" "caffe") + if (${includeCaffeDlls}) + addDllCopyCommand(${target} "${CAFFE_BUILD_ROOT}/lib/caffe-d.dll" + "${CAFFE_BUILD_ROOT}/lib/caffe.dll") + addDllCopyCommand(${target} "${PROTOBUF_ROOT}/bin/libprotobufd.dll" + "${PROTOBUF_ROOT}/bin/libprotobuf.dll") + addDllCopyCommand(${target} "${BLAS_ROOT}/bin/libopenblas.dll" "${BLAS_ROOT}/bin/libopenblas.dll") + addDllCopyCommand(${target} "${MINGW32_ROOT}/bin/libgfortran-3.dll" "${MINGW32_ROOT}/bin/libgfortran-3.dll") + addDllCopyCommand(${target} "${MINGW32_ROOT}/bin/libgcc_s_dw2-1.dll" "${MINGW32_ROOT}/bin/libgcc_s_dw2-1.dll") + addDllCopyCommand(${target} "${MINGW32_ROOT}/bin/libquadmath-0.dll" "${MINGW32_ROOT}/bin/libquadmath-0.dll") + addBoostDllCopyCommand(${target} ${Boost_SYSTEM_LIBRARY}) + addBoostDllCopyCommand(${target} ${Boost_THREAD_LIBRARY}) + addBoostDllCopyCommand(${target} ${Boost_CHRONO_LIBRARY}) + endif() + endif() +endmacro() diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake new file mode 100644 index 0000000000..0ce95a717a --- /dev/null +++ b/cmake/GlobalConfig.cmake @@ -0,0 +1,252 @@ +option(BUILD_CAFFE_PARSER "Build Caffe parser" OFF) +option(BUILD_TF_PARSER "Build Tensorflow parser" OFF) +option(BUILD_UNIT_TESTS "Build unit tests" ON) +option(BUILD_TESTS "Build test applications" OFF) +option(BUILD_FOR_COVERAGE "Use no optimization and output .gcno and .gcda files" OFF) +option(ARMCOMPUTENEON "Build with ARM Compute NEON support" OFF) +option(ARMCOMPUTECL "Build with ARM Compute OpenCL support" OFF) +option(PROFILING "Build with ArmNN built-in profiling support" OFF) +option(PROFILING_BACKEND_STREAMLINE "Forward the armNN profiling events to DS-5/Streamline as annotations" OFF) + +include(SelectLibraryConfigurations) + +set(COMPILER_IS_GNU_LIKE 0) +if(${CMAKE_CXX_COMPILER_ID} STREQUAL GNU OR ${CMAKE_CXX_COMPILER_ID} STREQUAL Clang) + set(COMPILER_IS_GNU_LIKE 1) +endif() + +# Enable CCache if available and not disabled +option(USE_CCACHE "USE_CCACHE" ON) +find_program(CCACHE_FOUND ccache) +if(CCACHE_FOUND AND USE_CCACHE) + get_property(rule_launch_compile DIRECTORY PROPERTY RULE_LAUNCH_COMPILE) + set_property(DIRECTORY PROPERTY RULE_LAUNCH_COMPILE "CCACHE_CPP2=yes ${rule_launch_compile} ccache") +endif() + +# Enable distcc if available and not disabled +option(USE_DISTCC "USE_DISTCC" OFF) +find_program(DISTCC_FOUND distcc) +if(DISTCC_FOUND AND USE_DISTCC) + get_property(rule_launch_compile DIRECTORY PROPERTY RULE_LAUNCH_COMPILE) + set_property(DIRECTORY PROPERTY RULE_LAUNCH_COMPILE "${rule_launch_compile} distcc") +endif() + +# Set to release configuration by default +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "Release") +endif() + +# Compiler flags that are always set +set(CMAKE_POSITION_INDEPENDENT_CODE ON) +if(COMPILER_IS_GNU_LIKE) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion") +elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL MSVC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /MP") + add_definitions(-DNOMINMAX=1 -DNO_STRICT=1) +endif() +if("${CMAKE_SYSTEM_NAME}" STREQUAL Android) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -llog") + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -llog") +endif() + +# Compiler flags for Release builds +set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG") +if(COMPILER_IS_GNU_LIKE) + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3") +elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL MSVC) + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MD /O2") +endif() + +# Compiler flags for Debug builds +if(COMPILER_IS_GNU_LIKE) + set(CMAKE_CXX_FLAGS_DEBUG "-g") +elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL MSVC) + set(CMAKE_CXX_FLAGS_DEBUG "/MDd /ZI /Od") + # Disable SAFESEH which is necessary for Edit and Continue to work + set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} /SAFESEH:NO") + set(CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} /SAFESEH:NO") +endif() + +# Modify RelWithDebInfo so that NDEBUG isn't defined. +# This enables asserts. +if (COMPILER_IS_GNU_LIKE) + string(REPLACE "-DNDEBUG" "" CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") +elseif (${CMAKE_CXX_COMPILER_ID} STREQUAL MSVC) + string(REPLACE "/DNDEBUG" "" CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") +endif() + +# Compiler flags for code coverage measurements +if(BUILD_FOR_COVERAGE) + if(NOT CMAKE_BUILD_TYPE EQUAL "Debug") + message(WARNING "BUILD_FOR_COVERAGE set so forcing to Debug build") + set(CMAKE_BUILD_TYPE "Debug") + endif() + + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --coverage") +endif() + +if(BUILD_FOR_COVERAGE AND NOT BUILD_UNIT_TESTS) + message(WARNING "BUILD_FOR_COVERAGE set but not BUILD_UNIT_TESTS, so code coverage will not be able to run") +endif() + +set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules ${CMAKE_MODULE_PATH}) + +# Boost +add_definitions("-DBOOST_ALL_NO_LIB") # Turn off auto-linking as we specify the libs manually +set(Boost_USE_STATIC_LIBS ON) +find_package(Boost 1.59 REQUIRED COMPONENTS unit_test_framework system filesystem log program_options) +include_directories(SYSTEM "${Boost_INCLUDE_DIR}") +link_directories(${Boost_LIBRARY_DIR}) + +# pthread +find_package (Threads) + +# Favour the protobuf passed on command line +if(BUILD_TF_PARSER OR BUILD_CAFFE_PARSER) + find_library(PROTOBUF_LIBRARY_DEBUG NAMES "protobufd" + PATHS ${PROTOBUF_ROOT}/lib + NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + find_library(PROTOBUF_LIBRARY_DEBUG NAMES "protobufd") + + find_library(PROTOBUF_LIBRARY_RELEASE NAMES "protobuf" + PATHS ${PROTOBUF_ROOT}/lib + NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + find_library(PROTOBUF_LIBRARY_RELEASE NAMES "protobuf") + + select_library_configurations(PROTOBUF) + + find_path(PROTOBUF_INCLUDE_DIRS "google/protobuf/message.h" + PATHS ${PROTOBUF_ROOT}/include + NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + find_path(PROTOBUF_INCLUDE_DIRS "google/protobuf/message.h") + + include_directories(SYSTEM "${PROTOBUF_INCLUDE_DIRS}") + add_definitions(-DPROTOBUF_USE_DLLS) +endif() + +# Caffe and its dependencies +if(BUILD_CAFFE_PARSER) + add_definitions(-DARMNN_CAFFE_PARSER) + + find_path(CAFFE_GENERATED_SOURCES "caffe/proto/caffe.pb.h" + HINTS ${CAFFE_BUILD_ROOT}/include) + include_directories(SYSTEM "${CAFFE_GENERATED_SOURCES}") +endif() + +if(BUILD_TF_PARSER) + add_definitions(-DARMNN_TF_PARSER) + + find_path(TF_GENERATED_SOURCES "tensorflow/core/protobuf/saved_model.pb.cc") + + # C++ sources generated for tf protobufs + file(GLOB_RECURSE TF_PROTOBUFS "${TF_GENERATED_SOURCES}/*.pb.cc") + + # C++ headers generated for tf protobufs + include_directories(SYSTEM "${TF_GENERATED_SOURCES}") +endif() + + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) + +# ARM Compute +# Note that ARM Compute has a different folder layout depending on the branch but also on +# whether it comes from a prepackaged archive (this is why we add several hints below) +if(ARMCOMPUTENEON OR ARMCOMPUTECL) + find_path(ARMCOMPUTE_INCLUDE arm_compute/core/CL/ICLKernel.h + PATHS ${ARMCOMPUTE_ROOT}/include + PATHS ${ARMCOMPUTE_ROOT}/applications/arm_compute + PATHS ${ARMCOMPUTE_ROOT} + NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + find_path(ARMCOMPUTE_INCLUDE arm_compute/core/CL/ICLKernel.h) + include_directories(SYSTEM "${ARMCOMPUTE_INCLUDE}") + + # Find the Arm Compute libraries if not already specified (the user may have already defined this in advance, + # e.g. if building clframework as a dependent cmake project) + if (NOT DEFINED ARMCOMPUTE_LIBRARIES) + # We link to the static variant so that customers don't need to find and build a compatible version of clframework. + # First try the folders specified ARMCOMPUTE_BUILD_DIR (with PATH_SUFFIXES for + # Windows builds) + find_library(ARMCOMPUTE_LIBRARY_DEBUG NAMES arm_compute-static + PATHS ${ARMCOMPUTE_BUILD_DIR} + PATH_SUFFIXES "Debug" + NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + find_library(ARMCOMPUTE_LIBRARY_RELEASE NAMES arm_compute-static + PATHS ${ARMCOMPUTE_BUILD_DIR} + PATH_SUFFIXES "Release" + NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + find_library(ARMCOMPUTE_CORE_LIBRARY_DEBUG NAMES arm_compute_core-static + PATHS ${ARMCOMPUTE_BUILD_DIR} + PATH_SUFFIXES "Debug" + NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + find_library(ARMCOMPUTE_CORE_LIBRARY_RELEASE NAMES arm_compute_core-static + PATHS ${ARMCOMPUTE_BUILD_DIR} + PATH_SUFFIXES "Release" + NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + + # In case it wasn't there, try a default search (will work in cases where + # the library has been installed into a standard location) + find_library(ARMCOMPUTE_LIBRARY_DEBUG NAMES arm_compute-static) + find_library(ARMCOMPUTE_LIBRARY_RELEASE NAMES arm_compute-static) + find_library(ARMCOMPUTE_CORE_LIBRARY_DEBUG NAMES arm_compute_core-static) + find_library(ARMCOMPUTE_CORE_LIBRARY_RELEASE NAMES arm_compute_core-static) + + set(ARMCOMPUTE_LIBRARIES + debug ${ARMCOMPUTE_LIBRARY_DEBUG} ${ARMCOMPUTE_CORE_LIBRARY_DEBUG} + optimized ${ARMCOMPUTE_LIBRARY_RELEASE} ${ARMCOMPUTE_CORE_LIBRARY_RELEASE} ) + endif() +endif() + +# ARM Compute NEON backend +if(ARMCOMPUTENEON) + # Add preprocessor definition for ARM Compute NEON + add_definitions(-DARMCOMPUTENEON_ENABLED) + # The ARM Compute headers contain some NEON intrinsics, so we need to build armnn with NEON support on armv7 + if(${CMAKE_SYSTEM_PROCESSOR} MATCHES armv7 AND COMPILER_IS_GNU_LIKE) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon") + endif() +endif() + +# ARM Compute OpenCL backend +if(ARMCOMPUTECL) + # Always use Arm compute library OpenCL headers + find_path(OPENCL_INCLUDE CL/cl2.hpp + PATHS ${ARMCOMPUTE_ROOT}/include + NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + + find_library(OPENCL_LIBRARIES OpenCL) + if (NOT OPENCL_LIBRARIES) + # Link against libOpenCL in opencl-1.2-stubs, but don't search there at runtime + link_libraries(-L${ARMCOMPUTE_BUILD_DIR}/opencl-1.2-stubs) + set(OPENCL_LIBRARIES OpenCL) + endif() + + include_directories(${OPENCL_INCLUDE}) + + # Add preprocessor definition for ARM Compute OpenCL + add_definitions(-DARMCOMPUTECL_ENABLED) + + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DARM_COMPUTE_DEBUG_ENABLED") +endif() + +# Used by both Arm Compute backends, but should be added +# to the search path after the system directories if necessary +if(ARMCOMPUTENEON OR ARMCOMPUTECL) + find_path(HALF_INCLUDE half/half.hpp) + find_path(HALF_INCLUDE half/half.hpp + PATHS ${ARMCOMPUTE_ROOT}/include + NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + include_directories(${HALF_INCLUDE}) +endif() + +# Built-in profiler +if(PROFILING) + add_definitions(-DARMNN_PROFILING_ENABLED) +endif() + +# Streamline annotate +if(PROFILING_BACKEND_STREAMLINE) + include_directories("${GATOR_ROOT}/annotate") + add_definitions(-DARMNN_STREAMLINE_ENABLED) +endif() + diff --git a/cmake/Utils.cmake b/cmake/Utils.cmake new file mode 100644 index 0000000000..3a9d93a15c --- /dev/null +++ b/cmake/Utils.cmake @@ -0,0 +1,43 @@ +# Function which creates appropriate "source groups" (filter folders in Visual Studio) for the given list of source files +function(createSourceGroups source1) + set(sources ${source1} ${ARGN}) + foreach(source ${sources}) + get_filename_component(source_path ${source} PATH) + string(REPLACE "/" "\\" source_path_backslashes "${source_path}") + source_group(${source_path_backslashes} FILES ${source}) + endforeach() +endfunction() + +# Further processes a target and its list of source files adding extra touches useful for some generators +# (filter folders, group targets in folders, etc.). +# All optional arguments are treated as additional source files. +function(setup_target targetName source1) + set(sources ${source1} ${ARGN}) + + createSourceGroups(${sources}) + + # Enable USE_FOLDERS. This is required by the set_target_properties(... FOLDER ...) call below. + # We prefer to set it here rather than globally at the top of the file so that we only modify + # the Cmake environment if/when the functionality is actually required. + set_property(GLOBAL PROPERTY USE_FOLDERS ON) + file(RELATIVE_PATH projectFolder ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) + set_target_properties(${targetName} PROPERTIES FOLDER "${projectFolder}") +endfunction() + +# Convenience replacement of add_executable(), which besides adding an executable to the project +# further configures the target via setup_target(). +# All optional arguments are treated as additional source files. +function(add_executable_ex targetName source1) + set(sources ${source1} ${ARGN}) + add_executable(${targetName} ${sources}) + setup_target(${targetName} ${sources}) +endfunction() + +# Convenience replacement of add_library(), which besides adding a library to the project +# further configures the target via setup_target(). +# All optional arguments are treated as additional source files. +function(add_library_ex targetName libraryType source1) + set(sources ${source1} ${ARGN}) + add_library(${targetName} ${libraryType} ${sources}) + setup_target(${targetName} ${sources}) +endfunction() diff --git a/cmake/modules/FindBoost.cmake b/cmake/modules/FindBoost.cmake new file mode 100644 index 0000000000..cf08f9b882 --- /dev/null +++ b/cmake/modules/FindBoost.cmake @@ -0,0 +1,1893 @@ +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing for details. + +#.rst: +# FindBoost +# --------- +# +# Find Boost include dirs and libraries +# +# Use this module by invoking find_package with the form:: +# +# find_package(Boost +# [version] [EXACT] # Minimum or EXACT version e.g. 1.36.0 +# [REQUIRED] # Fail with error if Boost is not found +# [COMPONENTS <libs>...] # Boost libraries by their canonical name +# ) # e.g. "date_time" for "libboost_date_time" +# +# This module finds headers and requested component libraries OR a CMake +# package configuration file provided by a "Boost CMake" build. For the +# latter case skip to the "Boost CMake" section below. For the former +# case results are reported in variables:: +# +# Boost_FOUND - True if headers and requested libraries were found +# Boost_INCLUDE_DIRS - Boost include directories +# Boost_LIBRARY_DIRS - Link directories for Boost libraries +# Boost_LIBRARIES - Boost component libraries to be linked +# Boost_<C>_FOUND - True if component <C> was found (<C> is upper-case) +# Boost_<C>_LIBRARY - Libraries to link for component <C> (may include +# target_link_libraries debug/optimized keywords) +# Boost_VERSION - BOOST_VERSION value from boost/version.hpp +# Boost_LIB_VERSION - Version string appended to library filenames +# Boost_MAJOR_VERSION - Boost major version number (X in X.y.z) +# Boost_MINOR_VERSION - Boost minor version number (Y in x.Y.z) +# Boost_SUBMINOR_VERSION - Boost subminor version number (Z in x.y.Z) +# Boost_LIB_DIAGNOSTIC_DEFINITIONS (Windows) +# - Pass to add_definitions() to have diagnostic +# information about Boost's automatic linking +# displayed during compilation +# +# This module reads hints about search locations from variables:: +# +# BOOST_ROOT - Preferred installation prefix +# (or BOOSTROOT) +# BOOST_INCLUDEDIR - Preferred include directory e.g. <prefix>/include +# BOOST_LIBRARYDIR - Preferred library directory e.g. <prefix>/lib +# Boost_NO_SYSTEM_PATHS - Set to ON to disable searching in locations not +# specified by these hint variables. Default is OFF. +# Boost_ADDITIONAL_VERSIONS +# - List of Boost versions not known to this module +# (Boost install locations may contain the version) +# +# and saves search results persistently in CMake cache entries:: +# +# Boost_INCLUDE_DIR - Directory containing Boost headers +# Boost_LIBRARY_DIR_RELEASE - Directory containing release Boost libraries +# Boost_LIBRARY_DIR_DEBUG - Directory containing debug Boost libraries +# Boost_<C>_LIBRARY_DEBUG - Component <C> library debug variant +# Boost_<C>_LIBRARY_RELEASE - Component <C> library release variant +# +# The following :prop_tgt:`IMPORTED` targets are also defined:: +# +# Boost::boost - Target for header-only dependencies +# (Boost include directory) +# Boost::<C> - Target for specific component dependency +# (shared or static library); <C> is lower- +# case +# Boost::diagnostic_definitions - interface target to enable diagnostic +# information about Boost's automatic linking +# during compilation (adds BOOST_LIB_DIAGNOSTIC) +# Boost::disable_autolinking - interface target to disable automatic +# linking with MSVC (adds BOOST_ALL_NO_LIB) +# Boost::dynamic_linking - interface target to enable dynamic linking +# linking with MSVC (adds BOOST_ALL_DYN_LINK) +# +# Implicit dependencies such as Boost::filesystem requiring +# Boost::system will be automatically detected and satisfied, even +# if system is not specified when using find_package and if +# Boost::system is not added to target_link_libraries. If using +# Boost::thread, then Thread::Thread will also be added automatically. +# +# It is important to note that the imported targets behave differently +# than variables created by this module: multiple calls to +# find_package(Boost) in the same directory or sub-directories with +# different options (e.g. static or shared) will not override the +# values of the targets created by the first call. +# +# Users may set these hints or results as cache entries. Projects +# should not read these entries directly but instead use the above +# result variables. Note that some hint names start in upper-case +# "BOOST". One may specify these as environment variables if they are +# not specified as CMake variables or cache entries. +# +# This module first searches for the Boost header files using the above +# hint variables (excluding BOOST_LIBRARYDIR) and saves the result in +# Boost_INCLUDE_DIR. Then it searches for requested component libraries +# using the above hints (excluding BOOST_INCLUDEDIR and +# Boost_ADDITIONAL_VERSIONS), "lib" directories near Boost_INCLUDE_DIR, +# and the library name configuration settings below. It saves the +# library directories in Boost_LIBRARY_DIR_DEBUG and +# Boost_LIBRARY_DIR_RELEASE and individual library +# locations in Boost_<C>_LIBRARY_DEBUG and Boost_<C>_LIBRARY_RELEASE. +# When one changes settings used by previous searches in the same build +# tree (excluding environment variables) this module discards previous +# search results affected by the changes and searches again. +# +# Boost libraries come in many variants encoded in their file name. +# Users or projects may tell this module which variant to find by +# setting variables:: +# +# Boost_USE_MULTITHREADED - Set to OFF to use the non-multithreaded +# libraries ('mt' tag). Default is ON. +# Boost_USE_STATIC_LIBS - Set to ON to force the use of the static +# libraries. Default is OFF. +# Boost_USE_STATIC_RUNTIME - Set to ON or OFF to specify whether to use +# libraries linked statically to the C++ runtime +# ('s' tag). Default is platform dependent. +# Boost_USE_DEBUG_RUNTIME - Set to ON or OFF to specify whether to use +# libraries linked to the MS debug C++ runtime +# ('g' tag). Default is ON. +# Boost_USE_DEBUG_PYTHON - Set to ON to use libraries compiled with a +# debug Python build ('y' tag). Default is OFF. +# Boost_USE_STLPORT - Set to ON to use libraries compiled with +# STLPort ('p' tag). Default is OFF. +# Boost_USE_STLPORT_DEPRECATED_NATIVE_IOSTREAMS +# - Set to ON to use libraries compiled with +# STLPort deprecated "native iostreams" +# ('n' tag). Default is OFF. +# Boost_COMPILER - Set to the compiler-specific library suffix +# (e.g. "-gcc43"). Default is auto-computed +# for the C++ compiler in use. A list may be +# used if multiple compatible suffixes should +# be tested for, in decreasing order of +# preference. +# Boost_THREADAPI - Suffix for "thread" component library name, +# such as "pthread" or "win32". Names with +# and without this suffix will both be tried. +# Boost_NAMESPACE - Alternate namespace used to build boost with +# e.g. if set to "myboost", will search for +# myboost_thread instead of boost_thread. +# +# Other variables one may set to control this module are:: +# +# Boost_DEBUG - Set to ON to enable debug output from FindBoost. +# Please enable this before filing any bug report. +# Boost_DETAILED_FAILURE_MSG +# - Set to ON to add detailed information to the +# failure message even when the REQUIRED option +# is not given to the find_package call. +# Boost_REALPATH - Set to ON to resolve symlinks for discovered +# libraries to assist with packaging. For example, +# the "system" component library may be resolved to +# "/usr/lib/libboost_system.so.1.42.0" instead of +# "/usr/lib/libboost_system.so". This does not +# affect linking and should not be enabled unless +# the user needs this information. +# Boost_LIBRARY_DIR - Default value for Boost_LIBRARY_DIR_RELEASE and +# Boost_LIBRARY_DIR_DEBUG. +# +# On Visual Studio and Borland compilers Boost headers request automatic +# linking to corresponding libraries. This requires matching libraries +# to be linked explicitly or available in the link library search path. +# In this case setting Boost_USE_STATIC_LIBS to OFF may not achieve +# dynamic linking. Boost automatic linking typically requests static +# libraries with a few exceptions (such as Boost.Python). Use:: +# +# add_definitions(${Boost_LIB_DIAGNOSTIC_DEFINITIONS}) +# +# to ask Boost to report information about automatic linking requests. +# +# Example to find Boost headers only:: +# +# find_package(Boost 1.36.0) +# if(Boost_FOUND) +# include_directories(${Boost_INCLUDE_DIRS}) +# add_executable(foo foo.cc) +# endif() +# +# Example to find Boost libraries and use imported targets:: +# +# find_package(Boost 1.56 REQUIRED COMPONENTS +# date_time filesystem iostreams) +# add_executable(foo foo.cc) +# target_link_libraries(foo Boost::date_time Boost::filesystem +# Boost::iostreams) +# +# Example to find Boost headers and some *static* libraries:: +# +# set(Boost_USE_STATIC_LIBS ON) # only find static libs +# set(Boost_USE_MULTITHREADED ON) +# set(Boost_USE_STATIC_RUNTIME OFF) +# find_package(Boost 1.36.0 COMPONENTS date_time filesystem system ...) +# if(Boost_FOUND) +# include_directories(${Boost_INCLUDE_DIRS}) +# add_executable(foo foo.cc) +# target_link_libraries(foo ${Boost_LIBRARIES}) +# endif() +# +# Boost CMake +# ^^^^^^^^^^^ +# +# If Boost was built using the boost-cmake project it provides a package +# configuration file for use with find_package's Config mode. This +# module looks for the package configuration file called +# BoostConfig.cmake or boost-config.cmake and stores the result in cache +# entry "Boost_DIR". If found, the package configuration file is loaded +# and this module returns with no further action. See documentation of +# the Boost CMake package configuration for details on what it provides. +# +# Set Boost_NO_BOOST_CMAKE to ON to disable the search for boost-cmake. + +#------------------------------------------------------------------------------- +# Before we go searching, check whether boost-cmake is available, unless the +# user specifically asked NOT to search for boost-cmake. +# +# If Boost_DIR is set, this behaves as any find_package call would. If not, +# it looks at BOOST_ROOT and BOOSTROOT to find Boost. +# +if (NOT Boost_NO_BOOST_CMAKE) + # If Boost_DIR is not set, look for BOOSTROOT and BOOST_ROOT as alternatives, + # since these are more conventional for Boost. + if ("$ENV{Boost_DIR}" STREQUAL "") + if (NOT "$ENV{BOOST_ROOT}" STREQUAL "") + set(ENV{Boost_DIR} $ENV{BOOST_ROOT}) + elseif (NOT "$ENV{BOOSTROOT}" STREQUAL "") + set(ENV{Boost_DIR} $ENV{BOOSTROOT}) + endif() + endif() + + # Do the same find_package call but look specifically for the CMake version. + # Note that args are passed in the Boost_FIND_xxxxx variables, so there is no + # need to delegate them to this find_package call. + find_package(Boost QUIET NO_MODULE) + mark_as_advanced(Boost_DIR) + + # If we found boost-cmake, then we're done. Print out what we found. + # Otherwise let the rest of the module try to find it. + if (Boost_FOUND) + message("Boost ${Boost_FIND_VERSION} found.") + if (Boost_FIND_COMPONENTS) + message("Found Boost components:") + message(" ${Boost_FIND_COMPONENTS}") + endif() + return() + endif() +endif() + + +#------------------------------------------------------------------------------- +# FindBoost functions & macros +# + +############################################ +# +# Check the existence of the libraries. +# +############################################ +# This macro was taken directly from the FindQt4.cmake file that is included +# with the CMake distribution. This is NOT my work. All work was done by the +# original authors of the FindQt4.cmake file. Only minor modifications were +# made to remove references to Qt and make this file more generally applicable +# And ELSE/ENDIF pairs were removed for readability. +######################################################################### + +macro(_Boost_ADJUST_LIB_VARS basename) + if(Boost_INCLUDE_DIR ) + if(Boost_${basename}_LIBRARY_DEBUG AND Boost_${basename}_LIBRARY_RELEASE) + # if the generator supports configuration types then set + # optimized and debug libraries, or if the CMAKE_BUILD_TYPE has a value + if(CMAKE_CONFIGURATION_TYPES OR CMAKE_BUILD_TYPE) + set(Boost_${basename}_LIBRARY optimized ${Boost_${basename}_LIBRARY_RELEASE} debug ${Boost_${basename}_LIBRARY_DEBUG}) + else() + # if there are no configuration types and CMAKE_BUILD_TYPE has no value + # then just use the release libraries + set(Boost_${basename}_LIBRARY ${Boost_${basename}_LIBRARY_RELEASE} ) + endif() + # FIXME: This probably should be set for both cases + set(Boost_${basename}_LIBRARIES optimized ${Boost_${basename}_LIBRARY_RELEASE} debug ${Boost_${basename}_LIBRARY_DEBUG}) + endif() + + # if only the release version was found, set the debug variable also to the release version + if(Boost_${basename}_LIBRARY_RELEASE AND NOT Boost_${basename}_LIBRARY_DEBUG) + set(Boost_${basename}_LIBRARY_DEBUG ${Boost_${basename}_LIBRARY_RELEASE}) + set(Boost_${basename}_LIBRARY ${Boost_${basename}_LIBRARY_RELEASE}) + set(Boost_${basename}_LIBRARIES ${Boost_${basename}_LIBRARY_RELEASE}) + endif() + + # if only the debug version was found, set the release variable also to the debug version + if(Boost_${basename}_LIBRARY_DEBUG AND NOT Boost_${basename}_LIBRARY_RELEASE) + set(Boost_${basename}_LIBRARY_RELEASE ${Boost_${basename}_LIBRARY_DEBUG}) + set(Boost_${basename}_LIBRARY ${Boost_${basename}_LIBRARY_DEBUG}) + set(Boost_${basename}_LIBRARIES ${Boost_${basename}_LIBRARY_DEBUG}) + endif() + + # If the debug & release library ends up being the same, omit the keywords + if(${Boost_${basename}_LIBRARY_RELEASE} STREQUAL ${Boost_${basename}_LIBRARY_DEBUG}) + set(Boost_${basename}_LIBRARY ${Boost_${basename}_LIBRARY_RELEASE} ) + set(Boost_${basename}_LIBRARIES ${Boost_${basename}_LIBRARY_RELEASE} ) + endif() + + if(Boost_${basename}_LIBRARY AND Boost_${basename}_HEADER) + set(Boost_${basename}_FOUND ON) + endif() + + endif() + # Make variables changeable to the advanced user + mark_as_advanced( + Boost_${basename}_LIBRARY_RELEASE + Boost_${basename}_LIBRARY_DEBUG + ) +endmacro() + +# Detect changes in used variables. +# Compares the current variable value with the last one. +# In short form: +# v != v_LAST -> CHANGED = 1 +# v is defined, v_LAST not -> CHANGED = 1 +# v is not defined, but v_LAST is -> CHANGED = 1 +# otherwise -> CHANGED = 0 +# CHANGED is returned in variable named ${changed_var} +macro(_Boost_CHANGE_DETECT changed_var) + set(${changed_var} 0) + foreach(v ${ARGN}) + if(DEFINED _Boost_COMPONENTS_SEARCHED) + if(${v}) + if(_${v}_LAST) + string(COMPARE NOTEQUAL "${${v}}" "${_${v}_LAST}" _${v}_CHANGED) + else() + set(_${v}_CHANGED 1) + endif() + elseif(_${v}_LAST) + set(_${v}_CHANGED 1) + endif() + if(_${v}_CHANGED) + set(${changed_var} 1) + endif() + else() + set(_${v}_CHANGED 0) + endif() + endforeach() +endmacro() + +# +# Find the given library (var). +# Use 'build_type' to support different lib paths for RELEASE or DEBUG builds +# +macro(_Boost_FIND_LIBRARY var build_type) + + find_library(${var} ${ARGN}) + + if(${var}) + # If this is the first library found then save Boost_LIBRARY_DIR_[RELEASE,DEBUG]. + if(NOT Boost_LIBRARY_DIR_${build_type}) + get_filename_component(_dir "${${var}}" PATH) + set(Boost_LIBRARY_DIR_${build_type} "${_dir}" CACHE PATH "Boost library directory ${build_type}" FORCE) + endif() + elseif(_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT) + # Try component-specific hints but do not save Boost_LIBRARY_DIR_[RELEASE,DEBUG]. + find_library(${var} HINTS ${_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT} ${ARGN}) + endif() + + # If Boost_LIBRARY_DIR_[RELEASE,DEBUG] is known then search only there. + if(Boost_LIBRARY_DIR_${build_type}) + set(_boost_LIBRARY_SEARCH_DIRS_${build_type} ${Boost_LIBRARY_DIR_${build_type}} NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " Boost_LIBRARY_DIR_${build_type} = ${Boost_LIBRARY_DIR_${build_type}}" + " _boost_LIBRARY_SEARCH_DIRS_${build_type} = ${_boost_LIBRARY_SEARCH_DIRS_${build_type}}") + endif() + endif() +endmacro() + +#------------------------------------------------------------------------------- + +# +# Runs compiler with "-dumpversion" and parses major/minor +# version with a regex. +# +function(_Boost_COMPILER_DUMPVERSION _OUTPUT_VERSION) + + exec_program(${CMAKE_CXX_COMPILER} + ARGS ${CMAKE_CXX_COMPILER_ARG1} -dumpversion + OUTPUT_VARIABLE _boost_COMPILER_VERSION + ) + string(REGEX REPLACE "([0-9])\\.([0-9])(\\.[0-9])?" "\\1\\2" + _boost_COMPILER_VERSION ${_boost_COMPILER_VERSION}) + + set(${_OUTPUT_VERSION} ${_boost_COMPILER_VERSION} PARENT_SCOPE) +endfunction() + +# +# Take a list of libraries with "thread" in it +# and prepend duplicates with "thread_${Boost_THREADAPI}" +# at the front of the list +# +function(_Boost_PREPEND_LIST_WITH_THREADAPI _output) + set(_orig_libnames ${ARGN}) + string(REPLACE "thread" "thread_${Boost_THREADAPI}" _threadapi_libnames "${_orig_libnames}") + set(${_output} ${_threadapi_libnames} ${_orig_libnames} PARENT_SCOPE) +endfunction() + +# +# If a library is found, replace its cache entry with its REALPATH +# +function(_Boost_SWAP_WITH_REALPATH _library _docstring) + if(${_library}) + get_filename_component(_boost_filepathreal ${${_library}} REALPATH) + unset(${_library} CACHE) + set(${_library} ${_boost_filepathreal} CACHE FILEPATH "${_docstring}") + endif() +endfunction() + +function(_Boost_CHECK_SPELLING _var) + if(${_var}) + string(TOUPPER ${_var} _var_UC) + message(FATAL_ERROR "ERROR: ${_var} is not the correct spelling. The proper spelling is ${_var_UC}.") + endif() +endfunction() + +# Guesses Boost's compiler prefix used in built library names +# Returns the guess by setting the variable pointed to by _ret +function(_Boost_GUESS_COMPILER_PREFIX _ret) + if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel" + OR CMAKE_CXX_COMPILER MATCHES "icl" + OR CMAKE_CXX_COMPILER MATCHES "icpc") + if(WIN32) + set (_boost_COMPILER "-iw") + else() + set (_boost_COMPILER "-il") + endif() + elseif (GHSMULTI) + set(_boost_COMPILER "-ghs") + elseif("x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xMSVC") + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.10) + set(_boost_COMPILER "-vc141;-vc140") + elseif (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19) + set(_boost_COMPILER "-vc140") + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 18) + set(_boost_COMPILER "-vc120") + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 17) + set(_boost_COMPILER "-vc110") + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16) + set(_boost_COMPILER "-vc100") + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 15) + set(_boost_COMPILER "-vc90") + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14) + set(_boost_COMPILER "-vc80") + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 13.10) + set(_boost_COMPILER "-vc71") + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 13) # Good luck! + set(_boost_COMPILER "-vc7") # yes, this is correct + else() # VS 6.0 Good luck! + set(_boost_COMPILER "-vc6") # yes, this is correct + endif() + elseif (BORLAND) + set(_boost_COMPILER "-bcb") + elseif(CMAKE_CXX_COMPILER_ID STREQUAL "SunPro") + set(_boost_COMPILER "-sw") + elseif(CMAKE_CXX_COMPILER_ID STREQUAL "XL") + set(_boost_COMPILER "-xlc") + elseif (MINGW) + if(${Boost_MAJOR_VERSION}.${Boost_MINOR_VERSION} VERSION_LESS 1.34) + set(_boost_COMPILER "-mgw") # no GCC version encoding prior to 1.34 + else() + _Boost_COMPILER_DUMPVERSION(_boost_COMPILER_VERSION) + set(_boost_COMPILER "-mgw${_boost_COMPILER_VERSION}") + endif() + elseif (UNIX) + if (CMAKE_COMPILER_IS_GNUCXX) + if(${Boost_MAJOR_VERSION}.${Boost_MINOR_VERSION} VERSION_LESS 1.34) + set(_boost_COMPILER "-gcc") # no GCC version encoding prior to 1.34 + else() + _Boost_COMPILER_DUMPVERSION(_boost_COMPILER_VERSION) + # Determine which version of GCC we have. + if(APPLE) + if(Boost_MINOR_VERSION) + if(${Boost_MINOR_VERSION} GREATER 35) + # In Boost 1.36.0 and newer, the mangled compiler name used + # on Mac OS X/Darwin is "xgcc". + set(_boost_COMPILER "-xgcc${_boost_COMPILER_VERSION}") + else() + # In Boost <= 1.35.0, there is no mangled compiler name for + # the Mac OS X/Darwin version of GCC. + set(_boost_COMPILER "") + endif() + else() + # We don't know the Boost version, so assume it's + # pre-1.36.0. + set(_boost_COMPILER "") + endif() + else() + set(_boost_COMPILER "-gcc${_boost_COMPILER_VERSION}") + endif() + endif() + endif () + else() + # TODO at least Boost_DEBUG here? + set(_boost_COMPILER "") + endif() + set(${_ret} ${_boost_COMPILER} PARENT_SCOPE) +endfunction() + +# +# Get component dependencies. Requires the dependencies to have been +# defined for the Boost release version. +# +# component - the component to check +# _ret - list of library dependencies +# +function(_Boost_COMPONENT_DEPENDENCIES component _ret) + # Note: to add a new Boost release, run + # + # % cmake -DBOOST_DIR=/path/to/boost/source -P Utilities/Scripts/BoostScanDeps.cmake + # + # The output may be added in a new block below. If it's the same as + # the previous release, simply update the version range of the block + # for the previous release. Also check if any new components have + # been added, and add any new components to + # _Boost_COMPONENT_HEADERS. + # + # This information was originally generated by running + # BoostScanDeps.cmake against every boost release to date supported + # by FindBoost: + # + # % for version in /path/to/boost/sources/* + # do + # cmake -DBOOST_DIR=$version -P Utilities/Scripts/BoostScanDeps.cmake + # done + # + # The output was then updated by search and replace with these regexes: + # + # - Strip message(STATUS) prefix dashes + # s;^-- ;; + # - Indent + # s;^set(; set(;; + # - Add conditionals + # s;Scanning /path/to/boost/sources/boost_\(.*\)_\(.*\)_\(.*); elseif(NOT Boost_VERSION VERSION_LESS \10\20\3 AND Boost_VERSION VERSION_LESS xxxx); + # + # This results in the logic seen below, but will require the xxxx + # replacing with the following Boost release version (or the next + # minor version to be released, e.g. 1.59 was the latest at the time + # of writing, making 1.60 the next, so 106000 is the needed version + # number). Identical consecutive releases were then merged together + # by updating the end range of the first block and removing the + # following redundant blocks. + # + # Running the script against all historical releases should be + # required only if the BoostScanDeps.cmake script logic is changed. + # The addition of a new release should only require it to be run + # against the new release. + set(_Boost_IMPORTED_TARGETS TRUE) + if(NOT Boost_VERSION VERSION_LESS 103300 AND Boost_VERSION VERSION_LESS 103500) + set(_Boost_IOSTREAMS_DEPENDENCIES regex thread) + set(_Boost_REGEX_DEPENDENCIES thread) + set(_Boost_WAVE_DEPENDENCIES filesystem thread) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 103500 AND Boost_VERSION VERSION_LESS 103600) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_WAVE_DEPENDENCIES filesystem system thread) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 103600 AND Boost_VERSION VERSION_LESS 103800) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_WAVE_DEPENDENCIES filesystem system thread) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 103800 AND Boost_VERSION VERSION_LESS 104300) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_THREAD_DEPENDENCIES date_time) + set(_Boost_WAVE_DEPENDENCIES filesystem system thread date_time) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 104300 AND Boost_VERSION VERSION_LESS 104400) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l random) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_THREAD_DEPENDENCIES date_time) + set(_Boost_WAVE_DEPENDENCIES filesystem system thread date_time) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 104400 AND Boost_VERSION VERSION_LESS 104500) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l random serialization) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_THREAD_DEPENDENCIES date_time) + set(_Boost_WAVE_DEPENDENCIES serialization filesystem system thread date_time) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 104500 AND Boost_VERSION VERSION_LESS 104700) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l random) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_THREAD_DEPENDENCIES date_time) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread date_time) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 104700 AND Boost_VERSION VERSION_LESS 104800) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l random) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_THREAD_DEPENDENCIES date_time) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread date_time) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 104800 AND Boost_VERSION VERSION_LESS 105000) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l random) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_THREAD_DEPENDENCIES date_time) + set(_Boost_TIMER_DEPENDENCIES chrono system) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread date_time) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 105000 AND Boost_VERSION VERSION_LESS 105300) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l regex random) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_THREAD_DEPENDENCIES chrono system date_time) + set(_Boost_TIMER_DEPENDENCIES chrono system) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 105300 AND Boost_VERSION VERSION_LESS 105400) + set(_Boost_ATOMIC_DEPENDENCIES thread chrono system date_time) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l regex random) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) + set(_Boost_TIMER_DEPENDENCIES chrono system) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 105400 AND Boost_VERSION VERSION_LESS 105500) + set(_Boost_ATOMIC_DEPENDENCIES thread chrono system date_time) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_LOG_DEPENDENCIES log_setup date_time system filesystem thread regex chrono) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l regex random) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) + set(_Boost_TIMER_DEPENDENCIES chrono system) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 105500 AND Boost_VERSION VERSION_LESS 105600) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_COROUTINE_DEPENDENCIES context system) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_LOG_DEPENDENCIES log_setup date_time system filesystem thread regex chrono) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l regex random) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) + set(_Boost_TIMER_DEPENDENCIES chrono system) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 105600 AND Boost_VERSION VERSION_LESS 105900) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_COROUTINE_DEPENDENCIES context system) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_LOG_DEPENDENCIES log_setup date_time system filesystem thread regex chrono) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_RANDOM_DEPENDENCIES system) + set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) + set(_Boost_TIMER_DEPENDENCIES chrono system) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 105900 AND Boost_VERSION VERSION_LESS 106000) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_COROUTINE_DEPENDENCIES context system) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_LOG_DEPENDENCIES log_setup date_time system filesystem thread regex chrono atomic) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_RANDOM_DEPENDENCIES system) + set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) + set(_Boost_TIMER_DEPENDENCIES chrono system) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 106000 AND Boost_VERSION VERSION_LESS 106100) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_COROUTINE_DEPENDENCIES context system) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_LOG_DEPENDENCIES date_time log_setup system filesystem thread regex chrono atomic) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_RANDOM_DEPENDENCIES system) + set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) + set(_Boost_TIMER_DEPENDENCIES chrono system) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 106100 AND Boost_VERSION VERSION_LESS 106200) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_CONTEXT_DEPENDENCIES thread chrono system date_time) + set(_Boost_COROUTINE_DEPENDENCIES context system) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_LOG_DEPENDENCIES date_time log_setup system filesystem thread regex chrono atomic) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_RANDOM_DEPENDENCIES system) + set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 106200 AND Boost_VERSION VERSION_LESS 106300) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_CONTEXT_DEPENDENCIES thread chrono system date_time) + set(_Boost_COROUTINE_DEPENDENCIES context system) + set(_Boost_FIBER_DEPENDENCIES context thread chrono system date_time) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_LOG_DEPENDENCIES date_time log_setup system filesystem thread regex chrono atomic) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_RANDOM_DEPENDENCIES system) + set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + elseif(NOT Boost_VERSION VERSION_LESS 106300 AND Boost_VERSION VERSION_LESS 106500) + set(_Boost_CHRONO_DEPENDENCIES system) + set(_Boost_CONTEXT_DEPENDENCIES thread chrono system date_time) + set(_Boost_COROUTINE_DEPENDENCIES context system) + set(_Boost_COROUTINE2_DEPENDENCIES context fiber thread chrono system date_time) + set(_Boost_FIBER_DEPENDENCIES context thread chrono system date_time) + set(_Boost_FILESYSTEM_DEPENDENCIES system) + set(_Boost_IOSTREAMS_DEPENDENCIES regex) + set(_Boost_LOG_DEPENDENCIES date_time log_setup system filesystem thread regex chrono atomic) + set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) + set(_Boost_MPI_DEPENDENCIES serialization) + set(_Boost_MPI_PYTHON_DEPENDENCIES python mpi serialization) + set(_Boost_RANDOM_DEPENDENCIES system) + set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) + set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) + set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) + else() + message(WARNING "Imported targets not available for Boost version ${Boost_VERSION}") + set(_Boost_IMPORTED_TARGETS FALSE) + endif() + + string(TOUPPER ${component} uppercomponent) + set(${_ret} ${_Boost_${uppercomponent}_DEPENDENCIES} PARENT_SCOPE) + set(_Boost_IMPORTED_TARGETS ${_Boost_IMPORTED_TARGETS} PARENT_SCOPE) + + string(REGEX REPLACE ";" " " _boost_DEPS_STRING "${_Boost_${uppercomponent}_DEPENDENCIES}") + if (NOT _boost_DEPS_STRING) + set(_boost_DEPS_STRING "(none)") + endif() + # message(STATUS "Dependencies for Boost::${component}: ${_boost_DEPS_STRING}") +endfunction() + +# +# Get component headers. This is the primary header (or headers) for +# a given component, and is used to check that the headers are present +# as well as the library itself as an extra sanity check of the build +# environment. +# +# component - the component to check +# _hdrs +# +function(_Boost_COMPONENT_HEADERS component _hdrs) + # Note: new boost components will require adding here. The header + # must be present in all versions of Boost providing a library. + set(_Boost_ATOMIC_HEADERS "boost/atomic.hpp") + set(_Boost_CHRONO_HEADERS "boost/chrono.hpp") + set(_Boost_CONTAINER_HEADERS "boost/container/container_fwd.hpp") + set(_Boost_CONTEXT_HEADERS "boost/context/all.hpp") + set(_Boost_COROUTINE_HEADERS "boost/coroutine/all.hpp") + set(_Boost_EXCEPTION_HEADERS "boost/exception/exception.hpp") + set(_Boost_DATE_TIME_HEADERS "boost/date_time/date.hpp") + set(_Boost_FIBER_HEADERS "boost/fiber/all.hpp") + set(_Boost_FILESYSTEM_HEADERS "boost/filesystem/path.hpp") + set(_Boost_GRAPH_HEADERS "boost/graph/adjacency_list.hpp") + set(_Boost_GRAPH_PARALLEL_HEADERS "boost/graph/adjacency_list.hpp") + set(_Boost_IOSTREAMS_HEADERS "boost/iostreams/stream.hpp") + set(_Boost_LOCALE_HEADERS "boost/locale.hpp") + set(_Boost_LOG_HEADERS "boost/log/core.hpp") + set(_Boost_LOG_SETUP_HEADERS "boost/log/detail/setup_config.hpp") + set(_Boost_MATH_HEADERS "boost/math_fwd.hpp") + set(_Boost_MATH_C99_HEADERS "boost/math/tr1.hpp") + set(_Boost_MATH_C99F_HEADERS "boost/math/tr1.hpp") + set(_Boost_MATH_C99L_HEADERS "boost/math/tr1.hpp") + set(_Boost_MATH_TR1_HEADERS "boost/math/tr1.hpp") + set(_Boost_MATH_TR1F_HEADERS "boost/math/tr1.hpp") + set(_Boost_MATH_TR1L_HEADERS "boost/math/tr1.hpp") + set(_Boost_MPI_HEADERS "boost/mpi.hpp") + set(_Boost_MPI_PYTHON_HEADERS "boost/mpi/python/config.hpp") + set(_Boost_PRG_EXEC_MONITOR_HEADERS "boost/test/prg_exec_monitor.hpp") + set(_Boost_PROGRAM_OPTIONS_HEADERS "boost/program_options.hpp") + set(_Boost_PYTHON_HEADERS "boost/python.hpp") + set(_Boost_RANDOM_HEADERS "boost/random.hpp") + set(_Boost_REGEX_HEADERS "boost/regex.hpp") + set(_Boost_SERIALIZATION_HEADERS "boost/serialization/serialization.hpp") + set(_Boost_SIGNALS_HEADERS "boost/signals.hpp") + set(_Boost_SYSTEM_HEADERS "boost/system/config.hpp") + set(_Boost_TEST_EXEC_MONITOR_HEADERS "boost/test/test_exec_monitor.hpp") + set(_Boost_THREAD_HEADERS "boost/thread.hpp") + set(_Boost_TIMER_HEADERS "boost/timer.hpp") + set(_Boost_TYPE_ERASURE_HEADERS "boost/type_erasure/config.hpp") + set(_Boost_UNIT_TEST_FRAMEWORK_HEADERS "boost/test/framework.hpp") + set(_Boost_WAVE_HEADERS "boost/wave.hpp") + set(_Boost_WSERIALIZATION_HEADERS "boost/archive/text_wiarchive.hpp") + if(WIN32) + set(_Boost_BZIP2_HEADERS "boost/iostreams/filter/bzip2.hpp") + set(_Boost_ZLIB_HEADERS "boost/iostreams/filter/zlib.hpp") + endif() + + string(TOUPPER ${component} uppercomponent) + set(${_hdrs} ${_Boost_${uppercomponent}_HEADERS} PARENT_SCOPE) + + string(REGEX REPLACE ";" " " _boost_HDRS_STRING "${_Boost_${uppercomponent}_HEADERS}") + if (NOT _boost_HDRS_STRING) + set(_boost_HDRS_STRING "(none)") + endif() + # message(STATUS "Headers for Boost::${component}: ${_boost_HDRS_STRING}") +endfunction() + +# +# Determine if any missing dependencies require adding to the component list. +# +# Sets _Boost_${COMPONENT}_DEPENDENCIES for each required component, +# plus _Boost_IMPORTED_TARGETS (TRUE if imported targets should be +# defined; FALSE if dependency information is unavailable). +# +# componentvar - the component list variable name +# extravar - the indirect dependency list variable name +# +# +function(_Boost_MISSING_DEPENDENCIES componentvar extravar) + # _boost_unprocessed_components - list of components requiring processing + # _boost_processed_components - components already processed (or currently being processed) + # _boost_new_components - new components discovered for future processing + # + list(APPEND _boost_unprocessed_components ${${componentvar}}) + + while(_boost_unprocessed_components) + list(APPEND _boost_processed_components ${_boost_unprocessed_components}) + foreach(component ${_boost_unprocessed_components}) + string(TOUPPER ${component} uppercomponent) + set(${_ret} ${_Boost_${uppercomponent}_DEPENDENCIES} PARENT_SCOPE) + _Boost_COMPONENT_DEPENDENCIES("${component}" _Boost_${uppercomponent}_DEPENDENCIES) + set(_Boost_${uppercomponent}_DEPENDENCIES ${_Boost_${uppercomponent}_DEPENDENCIES} PARENT_SCOPE) + set(_Boost_IMPORTED_TARGETS ${_Boost_IMPORTED_TARGETS} PARENT_SCOPE) + foreach(componentdep ${_Boost_${uppercomponent}_DEPENDENCIES}) + list(FIND _boost_processed_components "${componentdep}" _boost_component_found) + list(FIND _boost_new_components "${componentdep}" _boost_component_new) + if (_boost_component_found EQUAL -1 AND _boost_component_new EQUAL -1) + list(APPEND _boost_new_components ${componentdep}) + endif() + endforeach() + endforeach() + set(_boost_unprocessed_components ${_boost_new_components}) + unset(_boost_new_components) + endwhile() + set(_boost_extra_components ${_boost_processed_components}) + if(_boost_extra_components AND ${componentvar}) + list(REMOVE_ITEM _boost_extra_components ${${componentvar}}) + endif() + set(${componentvar} ${_boost_processed_components} PARENT_SCOPE) + set(${extravar} ${_boost_extra_components} PARENT_SCOPE) +endfunction() + +# +# Update library search directory hint variable with paths used by prebuilt boost binaries. +# +# Prebuilt windows binaries (https://sourceforge.net/projects/boost/files/boost-binaries/) +# have library directories named using MSVC compiler version and architecture. +# This function would append corresponding directories if MSVC is a current compiler, +# so having `BOOST_ROOT` would be enough to specify to find everything. +# +macro(_Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS componentlibvar basedir) + if("x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xMSVC") + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(_arch_suffix 64) + else() + set(_arch_suffix 32) + endif() + if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.10) + list(APPEND ${componentlibvar} ${basedir}/lib${_arch_suffix}-msvc-14.1) + list(APPEND ${componentlibvar} ${basedir}/lib${_arch_suffix}-msvc-14.0) + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19) + list(APPEND ${componentlibvar} ${basedir}/lib${_arch_suffix}-msvc-14.0) + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 18) + list(APPEND ${componentlibvar} ${basedir}/lib${_arch_suffix}-msvc-12.0) + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 17) + list(APPEND ${componentlibvar} ${basedir}/lib${_arch_suffix}-msvc-11.0) + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16) + list(APPEND ${componentlibvar} ${basedir}/lib${_arch_suffix}-msvc-10.0) + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 15) + list(APPEND ${componentlibvar} ${basedir}/lib${_arch_suffix}-msvc-9.0) + elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14) + list(APPEND ${componentlibvar} ${basedir}/lib${_arch_suffix}-msvc-8.0) + endif() + endif() +endmacro() + +# +# End functions/macros +# +#------------------------------------------------------------------------------- + +#------------------------------------------------------------------------------- +# main. +#------------------------------------------------------------------------------- + + +# If the user sets Boost_LIBRARY_DIR, use it as the default for both +# configurations. +if(NOT Boost_LIBRARY_DIR_RELEASE AND Boost_LIBRARY_DIR) + set(Boost_LIBRARY_DIR_RELEASE "${Boost_LIBRARY_DIR}") +endif() +if(NOT Boost_LIBRARY_DIR_DEBUG AND Boost_LIBRARY_DIR) + set(Boost_LIBRARY_DIR_DEBUG "${Boost_LIBRARY_DIR}") +endif() + +if(NOT DEFINED Boost_USE_MULTITHREADED) + set(Boost_USE_MULTITHREADED TRUE) +endif() +if(NOT DEFINED Boost_USE_DEBUG_RUNTIME) + set(Boost_USE_DEBUG_RUNTIME TRUE) +endif() + +# Check the version of Boost against the requested version. +if(Boost_FIND_VERSION AND NOT Boost_FIND_VERSION_MINOR) + message(SEND_ERROR "When requesting a specific version of Boost, you must provide at least the major and minor version numbers, e.g., 1.34") +endif() + +if(Boost_FIND_VERSION_EXACT) + # The version may appear in a directory with or without the patch + # level, even when the patch level is non-zero. + set(_boost_TEST_VERSIONS + "${Boost_FIND_VERSION_MAJOR}.${Boost_FIND_VERSION_MINOR}.${Boost_FIND_VERSION_PATCH}" + "${Boost_FIND_VERSION_MAJOR}.${Boost_FIND_VERSION_MINOR}") +else() + # The user has not requested an exact version. Among known + # versions, find those that are acceptable to the user request. + # + # Note: When adding a new Boost release, also update the dependency + # information in _Boost_COMPONENT_DEPENDENCIES and + # _Boost_COMPONENT_HEADERS. See the instructions at the top of + # _Boost_COMPONENT_DEPENDENCIES. + set(_Boost_KNOWN_VERSIONS ${Boost_ADDITIONAL_VERSIONS} + "1.64.0" "1.64" "1.63.0" "1.63" "1.62.0" "1.62" "1.61.0" "1.61" "1.60.0" "1.60" + "1.59.0" "1.59" "1.58.0" "1.58" "1.57.0" "1.57" "1.56.0" "1.56" "1.55.0" "1.55" + "1.54.0" "1.54" "1.53.0" "1.53" "1.52.0" "1.52" "1.51.0" "1.51" + "1.50.0" "1.50" "1.49.0" "1.49" "1.48.0" "1.48" "1.47.0" "1.47" "1.46.1" + "1.46.0" "1.46" "1.45.0" "1.45" "1.44.0" "1.44" "1.43.0" "1.43" "1.42.0" "1.42" + "1.41.0" "1.41" "1.40.0" "1.40" "1.39.0" "1.39" "1.38.0" "1.38" "1.37.0" "1.37" + "1.36.1" "1.36.0" "1.36" "1.35.1" "1.35.0" "1.35" "1.34.1" "1.34.0" + "1.34" "1.33.1" "1.33.0" "1.33") + + set(_boost_TEST_VERSIONS) + if(Boost_FIND_VERSION) + set(_Boost_FIND_VERSION_SHORT "${Boost_FIND_VERSION_MAJOR}.${Boost_FIND_VERSION_MINOR}") + # Select acceptable versions. + foreach(version ${_Boost_KNOWN_VERSIONS}) + if(NOT "${version}" VERSION_LESS "${Boost_FIND_VERSION}") + # This version is high enough. + list(APPEND _boost_TEST_VERSIONS "${version}") + elseif("${version}.99" VERSION_EQUAL "${_Boost_FIND_VERSION_SHORT}.99") + # This version is a short-form for the requested version with + # the patch level dropped. + list(APPEND _boost_TEST_VERSIONS "${version}") + endif() + endforeach() + else() + # Any version is acceptable. + set(_boost_TEST_VERSIONS "${_Boost_KNOWN_VERSIONS}") + endif() +endif() + +# The reason that we failed to find Boost. This will be set to a +# user-friendly message when we fail to find some necessary piece of +# Boost. +set(Boost_ERROR_REASON) + +if(Boost_DEBUG) + # Output some of their choices + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "_boost_TEST_VERSIONS = ${_boost_TEST_VERSIONS}") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "Boost_USE_MULTITHREADED = ${Boost_USE_MULTITHREADED}") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "Boost_USE_STATIC_LIBS = ${Boost_USE_STATIC_LIBS}") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "Boost_USE_STATIC_RUNTIME = ${Boost_USE_STATIC_RUNTIME}") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "Boost_ADDITIONAL_VERSIONS = ${Boost_ADDITIONAL_VERSIONS}") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "Boost_NO_SYSTEM_PATHS = ${Boost_NO_SYSTEM_PATHS}") +endif() + +# Supply Boost_LIB_DIAGNOSTIC_DEFINITIONS as a convenience target. It +# will only contain any interface definitions on WIN32, but is created +# on all platforms to keep end user code free from platform dependent +# code. Also provide convenience targets to disable autolinking and +# enable dynamic linking. +if(NOT TARGET Boost::diagnostic_definitions) + add_library(Boost::diagnostic_definitions INTERFACE IMPORTED) + add_library(Boost::disable_autolinking INTERFACE IMPORTED) + add_library(Boost::dynamic_linking INTERFACE IMPORTED) +endif() +if(WIN32) + # In windows, automatic linking is performed, so you do not have + # to specify the libraries. If you are linking to a dynamic + # runtime, then you can choose to link to either a static or a + # dynamic Boost library, the default is to do a static link. You + # can alter this for a specific library "whatever" by defining + # BOOST_WHATEVER_DYN_LINK to force Boost library "whatever" to be + # linked dynamically. Alternatively you can force all Boost + # libraries to dynamic link by defining BOOST_ALL_DYN_LINK. + + # This feature can be disabled for Boost library "whatever" by + # defining BOOST_WHATEVER_NO_LIB, or for all of Boost by defining + # BOOST_ALL_NO_LIB. + + # If you want to observe which libraries are being linked against + # then defining BOOST_LIB_DIAGNOSTIC will cause the auto-linking + # code to emit a #pragma message each time a library is selected + # for linking. + set(Boost_LIB_DIAGNOSTIC_DEFINITIONS "-DBOOST_LIB_DIAGNOSTIC") + set_target_properties(Boost::diagnostic_definitions PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "BOOST_LIB_DIAGNOSTIC") + set_target_properties(Boost::disable_autolinking PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "BOOST_ALL_NO_LIB") + set_target_properties(Boost::dynamic_linking PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "BOOST_ALL_DYN_LINK") +endif() + +_Boost_CHECK_SPELLING(Boost_ROOT) +_Boost_CHECK_SPELLING(Boost_LIBRARYDIR) +_Boost_CHECK_SPELLING(Boost_INCLUDEDIR) + +# Collect environment variable inputs as hints. Do not consider changes. +foreach(v BOOSTROOT BOOST_ROOT BOOST_INCLUDEDIR BOOST_LIBRARYDIR) + set(_env $ENV{${v}}) + if(_env) + file(TO_CMAKE_PATH "${_env}" _ENV_${v}) + else() + set(_ENV_${v} "") + endif() +endforeach() +if(NOT _ENV_BOOST_ROOT AND _ENV_BOOSTROOT) + set(_ENV_BOOST_ROOT "${_ENV_BOOSTROOT}") +endif() + +# Collect inputs and cached results. Detect changes since the last run. +if(NOT BOOST_ROOT AND BOOSTROOT) + set(BOOST_ROOT "${BOOSTROOT}") +endif() +set(_Boost_VARS_DIR + BOOST_ROOT + Boost_NO_SYSTEM_PATHS + ) + +if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "Declared as CMake or Environmental Variables:") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " BOOST_ROOT = ${BOOST_ROOT}") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " BOOST_INCLUDEDIR = ${BOOST_INCLUDEDIR}") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " BOOST_LIBRARYDIR = ${BOOST_LIBRARYDIR}") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "_boost_TEST_VERSIONS = ${_boost_TEST_VERSIONS}") +endif() + +# ------------------------------------------------------------------------ +# Search for Boost include DIR +# ------------------------------------------------------------------------ + +set(_Boost_VARS_INC BOOST_INCLUDEDIR Boost_INCLUDE_DIR Boost_ADDITIONAL_VERSIONS) +_Boost_CHANGE_DETECT(_Boost_CHANGE_INCDIR ${_Boost_VARS_DIR} ${_Boost_VARS_INC}) +# Clear Boost_INCLUDE_DIR if it did not change but other input affecting the +# location did. We will find a new one based on the new inputs. +if(_Boost_CHANGE_INCDIR AND NOT _Boost_INCLUDE_DIR_CHANGED) + unset(Boost_INCLUDE_DIR CACHE) +endif() + +if(NOT Boost_INCLUDE_DIR) + set(_boost_INCLUDE_SEARCH_DIRS "") + if(BOOST_INCLUDEDIR) + list(APPEND _boost_INCLUDE_SEARCH_DIRS ${BOOST_INCLUDEDIR}) + elseif(_ENV_BOOST_INCLUDEDIR) + list(APPEND _boost_INCLUDE_SEARCH_DIRS ${_ENV_BOOST_INCLUDEDIR}) + endif() + + if( BOOST_ROOT ) + list(APPEND _boost_INCLUDE_SEARCH_DIRS ${BOOST_ROOT}/include ${BOOST_ROOT}) + elseif( _ENV_BOOST_ROOT ) + list(APPEND _boost_INCLUDE_SEARCH_DIRS ${_ENV_BOOST_ROOT}/include ${_ENV_BOOST_ROOT}) + endif() + + if( Boost_NO_SYSTEM_PATHS) + list(APPEND _boost_INCLUDE_SEARCH_DIRS NO_CMAKE_SYSTEM_PATH NO_SYSTEM_ENVIRONMENT_PATH) + else() + if("x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xMSVC") + foreach(ver ${_Boost_KNOWN_VERSIONS}) + string(REPLACE "." "_" ver "${ver}") + list(APPEND _boost_INCLUDE_SEARCH_DIRS PATHS "C:/local/boost_${ver}") + endforeach() + endif() + list(APPEND _boost_INCLUDE_SEARCH_DIRS PATHS + C:/boost/include + C:/boost + /sw/local/include + ) + endif() + + # Try to find Boost by stepping backwards through the Boost versions + # we know about. + # Build a list of path suffixes for each version. + set(_boost_PATH_SUFFIXES) + foreach(_boost_VER ${_boost_TEST_VERSIONS}) + # Add in a path suffix, based on the required version, ideally + # we could read this from version.hpp, but for that to work we'd + # need to know the include dir already + set(_boost_BOOSTIFIED_VERSION) + + # Transform 1.35 => 1_35 and 1.36.0 => 1_36_0 + if(_boost_VER MATCHES "([0-9]+)\\.([0-9]+)\\.([0-9]+)") + set(_boost_BOOSTIFIED_VERSION + "${CMAKE_MATCH_1}_${CMAKE_MATCH_2}_${CMAKE_MATCH_3}") + elseif(_boost_VER MATCHES "([0-9]+)\\.([0-9]+)") + set(_boost_BOOSTIFIED_VERSION + "${CMAKE_MATCH_1}_${CMAKE_MATCH_2}") + endif() + + list(APPEND _boost_PATH_SUFFIXES + "boost-${_boost_BOOSTIFIED_VERSION}" + "boost_${_boost_BOOSTIFIED_VERSION}" + "boost/boost-${_boost_BOOSTIFIED_VERSION}" + "boost/boost_${_boost_BOOSTIFIED_VERSION}" + ) + + endforeach() + + if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "Include debugging info:") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " _boost_INCLUDE_SEARCH_DIRS = ${_boost_INCLUDE_SEARCH_DIRS}") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " _boost_PATH_SUFFIXES = ${_boost_PATH_SUFFIXES}") + endif() + + # Look for a standard boost header file. + find_path(Boost_INCLUDE_DIR + NAMES boost/config.hpp + HINTS ${_boost_INCLUDE_SEARCH_DIRS} + PATH_SUFFIXES ${_boost_PATH_SUFFIXES} + ) +endif() + +# ------------------------------------------------------------------------ +# Extract version information from version.hpp +# ------------------------------------------------------------------------ + +# Set Boost_FOUND based only on header location and version. +# It will be updated below for component libraries. +if(Boost_INCLUDE_DIR) + if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "location of version.hpp: ${Boost_INCLUDE_DIR}/boost/version.hpp") + endif() + + # Extract Boost_VERSION and Boost_LIB_VERSION from version.hpp + set(Boost_VERSION 0) + set(Boost_LIB_VERSION "") + file(STRINGS "${Boost_INCLUDE_DIR}/boost/version.hpp" _boost_VERSION_HPP_CONTENTS REGEX "#define BOOST_(LIB_)?VERSION ") + set(_Boost_VERSION_REGEX "([0-9]+)") + set(_Boost_LIB_VERSION_REGEX "\"([0-9_]+)\"") + foreach(v VERSION LIB_VERSION) + if("${_boost_VERSION_HPP_CONTENTS}" MATCHES "#define BOOST_${v} ${_Boost_${v}_REGEX}") + set(Boost_${v} "${CMAKE_MATCH_1}") + endif() + endforeach() + unset(_boost_VERSION_HPP_CONTENTS) + + math(EXPR Boost_MAJOR_VERSION "${Boost_VERSION} / 100000") + math(EXPR Boost_MINOR_VERSION "${Boost_VERSION} / 100 % 1000") + math(EXPR Boost_SUBMINOR_VERSION "${Boost_VERSION} % 100") + + string(APPEND Boost_ERROR_REASON + "Boost version: ${Boost_MAJOR_VERSION}.${Boost_MINOR_VERSION}.${Boost_SUBMINOR_VERSION}\nBoost include path: ${Boost_INCLUDE_DIR}") + if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "version.hpp reveals boost " + "${Boost_MAJOR_VERSION}.${Boost_MINOR_VERSION}.${Boost_SUBMINOR_VERSION}") + endif() + + if(Boost_FIND_VERSION) + # Set Boost_FOUND based on requested version. + set(_Boost_VERSION "${Boost_MAJOR_VERSION}.${Boost_MINOR_VERSION}.${Boost_SUBMINOR_VERSION}") + if("${_Boost_VERSION}" VERSION_LESS "${Boost_FIND_VERSION}") + set(Boost_FOUND 0) + set(_Boost_VERSION_AGE "old") + elseif(Boost_FIND_VERSION_EXACT AND + NOT "${_Boost_VERSION}" VERSION_EQUAL "${Boost_FIND_VERSION}") + set(Boost_FOUND 0) + set(_Boost_VERSION_AGE "new") + else() + set(Boost_FOUND 1) + endif() + if(NOT Boost_FOUND) + # State that we found a version of Boost that is too new or too old. + string(APPEND Boost_ERROR_REASON + "\nDetected version of Boost is too ${_Boost_VERSION_AGE}. Requested version was ${Boost_FIND_VERSION_MAJOR}.${Boost_FIND_VERSION_MINOR}") + if (Boost_FIND_VERSION_PATCH) + string(APPEND Boost_ERROR_REASON + ".${Boost_FIND_VERSION_PATCH}") + endif () + if (NOT Boost_FIND_VERSION_EXACT) + string(APPEND Boost_ERROR_REASON " (or newer)") + endif () + string(APPEND Boost_ERROR_REASON ".") + endif () + else() + # Caller will accept any Boost version. + set(Boost_FOUND 1) + endif() +else() + set(Boost_FOUND 0) + string(APPEND Boost_ERROR_REASON + "Unable to find the Boost header files. Please set BOOST_ROOT to the root directory containing Boost or BOOST_INCLUDEDIR to the directory containing Boost's headers.") +endif() + +# ------------------------------------------------------------------------ +# Prefix initialization +# ------------------------------------------------------------------------ + +set(Boost_LIB_PREFIX "") +if ( (GHSMULTI AND Boost_USE_STATIC_LIBS) OR + (WIN32 AND Boost_USE_STATIC_LIBS AND NOT CYGWIN) ) + set(Boost_LIB_PREFIX "lib") +endif() + +if ( NOT Boost_NAMESPACE ) + set(Boost_NAMESPACE "boost") +endif() + +# ------------------------------------------------------------------------ +# Suffix initialization and compiler suffix detection. +# ------------------------------------------------------------------------ + +set(_Boost_VARS_NAME + Boost_NAMESPACE + Boost_COMPILER + Boost_THREADAPI + Boost_USE_DEBUG_PYTHON + Boost_USE_MULTITHREADED + Boost_USE_STATIC_LIBS + Boost_USE_STATIC_RUNTIME + Boost_USE_STLPORT + Boost_USE_STLPORT_DEPRECATED_NATIVE_IOSTREAMS + ) +_Boost_CHANGE_DETECT(_Boost_CHANGE_LIBNAME ${_Boost_VARS_NAME}) + +# Setting some more suffixes for the library +if (Boost_COMPILER) + set(_boost_COMPILER ${Boost_COMPILER}) + if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "using user-specified Boost_COMPILER = ${_boost_COMPILER}") + endif() +else() + # Attempt to guess the compiler suffix + # NOTE: this is not perfect yet, if you experience any issues + # please report them and use the Boost_COMPILER variable + # to work around the problems. + _Boost_GUESS_COMPILER_PREFIX(_boost_COMPILER) + if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "guessed _boost_COMPILER = ${_boost_COMPILER}") + endif() +endif() + +set (_boost_MULTITHREADED "-mt") +if( NOT Boost_USE_MULTITHREADED ) + set (_boost_MULTITHREADED "") +endif() +if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "_boost_MULTITHREADED = ${_boost_MULTITHREADED}") +endif() + +#====================== +# Systematically build up the Boost ABI tag +# http://boost.org/doc/libs/1_41_0/more/getting_started/windows.html#library-naming +set( _boost_RELEASE_ABI_TAG "-") +set( _boost_DEBUG_ABI_TAG "-") +# Key Use this library when: +# s linking statically to the C++ standard library and +# compiler runtime support libraries. +if(Boost_USE_STATIC_RUNTIME) + set( _boost_RELEASE_ABI_TAG "${_boost_RELEASE_ABI_TAG}s") + set( _boost_DEBUG_ABI_TAG "${_boost_DEBUG_ABI_TAG}s") +endif() +# g using debug versions of the standard and runtime +# support libraries +if(WIN32 AND Boost_USE_DEBUG_RUNTIME) + if("x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xMSVC" + OR "${CMAKE_CXX_COMPILER}" MATCHES "icl" + OR "${CMAKE_CXX_COMPILER}" MATCHES "icpc") + string(APPEND _boost_DEBUG_ABI_TAG "g") + endif() +endif() +# y using special debug build of python +if(Boost_USE_DEBUG_PYTHON) + string(APPEND _boost_DEBUG_ABI_TAG "y") +endif() +# d using a debug version of your code +string(APPEND _boost_DEBUG_ABI_TAG "d") +# p using the STLport standard library rather than the +# default one supplied with your compiler +if(Boost_USE_STLPORT) + string(APPEND _boost_RELEASE_ABI_TAG "p") + string(APPEND _boost_DEBUG_ABI_TAG "p") +endif() +# n using the STLport deprecated "native iostreams" feature +if(Boost_USE_STLPORT_DEPRECATED_NATIVE_IOSTREAMS) + string(APPEND _boost_RELEASE_ABI_TAG "n") + string(APPEND _boost_DEBUG_ABI_TAG "n") +endif() + +if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "_boost_RELEASE_ABI_TAG = ${_boost_RELEASE_ABI_TAG}") + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "_boost_DEBUG_ABI_TAG = ${_boost_DEBUG_ABI_TAG}") +endif() + +# ------------------------------------------------------------------------ +# Begin finding boost libraries +# ------------------------------------------------------------------------ + +set(_Boost_VARS_LIB "") +foreach(c DEBUG RELEASE) + set(_Boost_VARS_LIB_${c} BOOST_LIBRARYDIR Boost_LIBRARY_DIR_${c}) + list(APPEND _Boost_VARS_LIB ${_Boost_VARS_LIB_${c}}) + _Boost_CHANGE_DETECT(_Boost_CHANGE_LIBDIR_${c} ${_Boost_VARS_DIR} ${_Boost_VARS_LIB_${c}} Boost_INCLUDE_DIR) + # Clear Boost_LIBRARY_DIR_${c} if it did not change but other input affecting the + # location did. We will find a new one based on the new inputs. + if(_Boost_CHANGE_LIBDIR_${c} AND NOT _Boost_LIBRARY_DIR_${c}_CHANGED) + unset(Boost_LIBRARY_DIR_${c} CACHE) + endif() + + # If Boost_LIBRARY_DIR_[RELEASE,DEBUG] is set, prefer its value. + if(Boost_LIBRARY_DIR_${c}) + set(_boost_LIBRARY_SEARCH_DIRS_${c} ${Boost_LIBRARY_DIR_${c}} NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) + else() + set(_boost_LIBRARY_SEARCH_DIRS_${c} "") + if(BOOST_LIBRARYDIR) + list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} ${BOOST_LIBRARYDIR}) + elseif(_ENV_BOOST_LIBRARYDIR) + list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} ${_ENV_BOOST_LIBRARYDIR}) + endif() + + if(BOOST_ROOT) + list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} ${BOOST_ROOT}/lib ${BOOST_ROOT}/stage/lib) + _Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS(_boost_LIBRARY_SEARCH_DIRS_${c} "${BOOST_ROOT}") + elseif(_ENV_BOOST_ROOT) + list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} ${_ENV_BOOST_ROOT}/lib ${_ENV_BOOST_ROOT}/stage/lib) + _Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS(_boost_LIBRARY_SEARCH_DIRS_${c} "${_ENV_BOOST_ROOT}") + endif() + + list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} + ${Boost_INCLUDE_DIR}/lib + ${Boost_INCLUDE_DIR}/../lib + ${Boost_INCLUDE_DIR}/stage/lib + ) + _Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS(_boost_LIBRARY_SEARCH_DIRS_${c} "${Boost_INCLUDE_DIR}/..") + if( Boost_NO_SYSTEM_PATHS ) + list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} NO_CMAKE_SYSTEM_PATH NO_SYSTEM_ENVIRONMENT_PATH) + else() + foreach(ver ${_Boost_KNOWN_VERSIONS}) + string(REPLACE "." "_" ver "${ver}") + _Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS(_boost_LIBRARY_SEARCH_DIRS_${c} "C:/local/boost_${ver}") + endforeach() + _Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS(_boost_LIBRARY_SEARCH_DIRS_${c} "C:/boost") + list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} PATHS + C:/boost/lib + C:/boost + /sw/local/lib + ) + endif() + endif() +endforeach() + +if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "_boost_LIBRARY_SEARCH_DIRS_RELEASE = ${_boost_LIBRARY_SEARCH_DIRS_RELEASE}" + "_boost_LIBRARY_SEARCH_DIRS_DEBUG = ${_boost_LIBRARY_SEARCH_DIRS_DEBUG}") +endif() + +# Support preference of static libs by adjusting CMAKE_FIND_LIBRARY_SUFFIXES +if( Boost_USE_STATIC_LIBS ) + set( _boost_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) + if(WIN32) + list(INSERT CMAKE_FIND_LIBRARY_SUFFIXES 0 .lib .a) + else() + set(CMAKE_FIND_LIBRARY_SUFFIXES .a) + endif() +endif() + +# We want to use the tag inline below without risking double dashes +if(_boost_RELEASE_ABI_TAG) + if(${_boost_RELEASE_ABI_TAG} STREQUAL "-") + set(_boost_RELEASE_ABI_TAG "") + endif() +endif() +if(_boost_DEBUG_ABI_TAG) + if(${_boost_DEBUG_ABI_TAG} STREQUAL "-") + set(_boost_DEBUG_ABI_TAG "") + endif() +endif() + +# The previous behavior of FindBoost when Boost_USE_STATIC_LIBS was enabled +# on WIN32 was to: +# 1. Search for static libs compiled against a SHARED C++ standard runtime library (use if found) +# 2. Search for static libs compiled against a STATIC C++ standard runtime library (use if found) +# We maintain this behavior since changing it could break people's builds. +# To disable the ambiguous behavior, the user need only +# set Boost_USE_STATIC_RUNTIME either ON or OFF. +set(_boost_STATIC_RUNTIME_WORKAROUND false) +if(WIN32 AND Boost_USE_STATIC_LIBS) + if(NOT DEFINED Boost_USE_STATIC_RUNTIME) + set(_boost_STATIC_RUNTIME_WORKAROUND TRUE) + endif() +endif() + +# On versions < 1.35, remove the System library from the considered list +# since it wasn't added until 1.35. +if(Boost_VERSION AND Boost_FIND_COMPONENTS) + if(Boost_VERSION LESS 103500) + list(REMOVE_ITEM Boost_FIND_COMPONENTS system) + endif() +endif() + +# Additional components may be required via component dependencies. +# Add any missing components to the list. +_Boost_MISSING_DEPENDENCIES(Boost_FIND_COMPONENTS _Boost_EXTRA_FIND_COMPONENTS) + +# If thread is required, get the thread libs as a dependency +list(FIND Boost_FIND_COMPONENTS thread _Boost_THREAD_DEPENDENCY_LIBS) +if(NOT _Boost_THREAD_DEPENDENCY_LIBS EQUAL -1) + include(CMakeFindDependencyMacro) + find_dependency(Threads) +endif() + +# If the user changed any of our control inputs flush previous results. +if(_Boost_CHANGE_LIBDIR_DEBUG OR _Boost_CHANGE_LIBDIR_RELEASE OR _Boost_CHANGE_LIBNAME) + foreach(COMPONENT ${_Boost_COMPONENTS_SEARCHED}) + string(TOUPPER ${COMPONENT} UPPERCOMPONENT) + foreach(c DEBUG RELEASE) + set(_var Boost_${UPPERCOMPONENT}_LIBRARY_${c}) + unset(${_var} CACHE) + set(${_var} "${_var}-NOTFOUND") + endforeach() + endforeach() + set(_Boost_COMPONENTS_SEARCHED "") +endif() + +foreach(COMPONENT ${Boost_FIND_COMPONENTS}) + string(TOUPPER ${COMPONENT} UPPERCOMPONENT) + + set( _boost_docstring_release "Boost ${COMPONENT} library (release)") + set( _boost_docstring_debug "Boost ${COMPONENT} library (debug)") + + # Compute component-specific hints. + set(_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT "") + if(${COMPONENT} STREQUAL "mpi" OR ${COMPONENT} STREQUAL "mpi_python" OR + ${COMPONENT} STREQUAL "graph_parallel") + foreach(lib ${MPI_CXX_LIBRARIES} ${MPI_C_LIBRARIES}) + if(IS_ABSOLUTE "${lib}") + get_filename_component(libdir "${lib}" PATH) + string(REPLACE "\\" "/" libdir "${libdir}") + list(APPEND _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT ${libdir}) + endif() + endforeach() + endif() + + # Consolidate and report component-specific hints. + if(_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT) + list(REMOVE_DUPLICATES _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT) + if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "Component-specific library search paths for ${COMPONENT}: " + "${_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT}") + endif() + endif() + + # + # Find headers + # + _Boost_COMPONENT_HEADERS("${COMPONENT}" Boost_${UPPERCOMPONENT}_HEADER_NAME) + # Look for a standard boost header file. + if(Boost_${UPPERCOMPONENT}_HEADER_NAME) + if(EXISTS "${Boost_INCLUDE_DIR}/${Boost_${UPPERCOMPONENT}_HEADER_NAME}") + set(Boost_${UPPERCOMPONENT}_HEADER ON) + else() + set(Boost_${UPPERCOMPONENT}_HEADER OFF) + endif() + else() + set(Boost_${UPPERCOMPONENT}_HEADER ON) + message(WARNING "No header defined for ${COMPONENT}; skipping header check") + endif() + + # + # Find RELEASE libraries + # + unset(_boost_RELEASE_NAMES) + foreach(compiler IN LISTS _boost_COMPILER) + list(APPEND _boost_RELEASE_NAMES + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${compiler}${_boost_MULTITHREADED}${_boost_RELEASE_ABI_TAG}-${Boost_LIB_VERSION} + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${compiler}${_boost_MULTITHREADED}${_boost_RELEASE_ABI_TAG} ) + endforeach() + list(APPEND _boost_RELEASE_NAMES + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${_boost_MULTITHREADED}${_boost_RELEASE_ABI_TAG}-${Boost_LIB_VERSION} + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${_boost_MULTITHREADED}${_boost_RELEASE_ABI_TAG} + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT} ) + if(_boost_STATIC_RUNTIME_WORKAROUND) + set(_boost_RELEASE_STATIC_ABI_TAG "-s${_boost_RELEASE_ABI_TAG}") + foreach(compiler IN LISTS _boost_COMPILER) + list(APPEND _boost_RELEASE_NAMES + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${compiler}${_boost_MULTITHREADED}${_boost_RELEASE_STATIC_ABI_TAG}-${Boost_LIB_VERSION} + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${compiler}${_boost_MULTITHREADED}${_boost_RELEASE_STATIC_ABI_TAG} ) + endforeach() + list(APPEND _boost_RELEASE_NAMES + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${_boost_MULTITHREADED}${_boost_RELEASE_STATIC_ABI_TAG}-${Boost_LIB_VERSION} + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${_boost_MULTITHREADED}${_boost_RELEASE_STATIC_ABI_TAG} ) + endif() + if(Boost_THREADAPI AND ${COMPONENT} STREQUAL "thread") + _Boost_PREPEND_LIST_WITH_THREADAPI(_boost_RELEASE_NAMES ${_boost_RELEASE_NAMES}) + endif() + if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "Searching for ${UPPERCOMPONENT}_LIBRARY_RELEASE: ${_boost_RELEASE_NAMES}") + endif() + + # if Boost_LIBRARY_DIR_RELEASE is not defined, + # but Boost_LIBRARY_DIR_DEBUG is, look there first for RELEASE libs + if(NOT Boost_LIBRARY_DIR_RELEASE AND Boost_LIBRARY_DIR_DEBUG) + list(INSERT _boost_LIBRARY_SEARCH_DIRS_RELEASE 0 ${Boost_LIBRARY_DIR_DEBUG}) + endif() + + # Avoid passing backslashes to _Boost_FIND_LIBRARY due to macro re-parsing. + string(REPLACE "\\" "/" _boost_LIBRARY_SEARCH_DIRS_tmp "${_boost_LIBRARY_SEARCH_DIRS_RELEASE}") + + _Boost_FIND_LIBRARY(Boost_${UPPERCOMPONENT}_LIBRARY_RELEASE RELEASE + NAMES ${_boost_RELEASE_NAMES} + HINTS ${_boost_LIBRARY_SEARCH_DIRS_tmp} + NAMES_PER_DIR + DOC "${_boost_docstring_release}" + ) + + # + # Find DEBUG libraries + # + unset(_boost_DEBUG_NAMES) + foreach(compiler IN LISTS _boost_COMPILER) + list(APPEND _boost_DEBUG_NAMES + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${compiler}${_boost_MULTITHREADED}${_boost_DEBUG_ABI_TAG}-${Boost_LIB_VERSION} + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${compiler}${_boost_MULTITHREADED}${_boost_DEBUG_ABI_TAG} ) + endforeach() + list(APPEND _boost_DEBUG_NAMES + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${_boost_MULTITHREADED}${_boost_DEBUG_ABI_TAG}-${Boost_LIB_VERSION} + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${_boost_MULTITHREADED}${_boost_DEBUG_ABI_TAG} + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${_boost_MULTITHREADED} + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT} ) + if(_boost_STATIC_RUNTIME_WORKAROUND) + set(_boost_DEBUG_STATIC_ABI_TAG "-s${_boost_DEBUG_ABI_TAG}") + foreach(compiler IN LISTS _boost_COMPILER) + list(APPEND _boost_DEBUG_NAMES + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${compiler}${_boost_MULTITHREADED}${_boost_DEBUG_STATIC_ABI_TAG}-${Boost_LIB_VERSION} + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${compiler}${_boost_MULTITHREADED}${_boost_DEBUG_STATIC_ABI_TAG} ) + endforeach() + list(APPEND _boost_DEBUG_NAMES + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${_boost_MULTITHREADED}${_boost_DEBUG_STATIC_ABI_TAG}-${Boost_LIB_VERSION} + ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${COMPONENT}${_boost_MULTITHREADED}${_boost_DEBUG_STATIC_ABI_TAG} ) + endif() + if(Boost_THREADAPI AND ${COMPONENT} STREQUAL "thread") + _Boost_PREPEND_LIST_WITH_THREADAPI(_boost_DEBUG_NAMES ${_boost_DEBUG_NAMES}) + endif() + if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + "Searching for ${UPPERCOMPONENT}_LIBRARY_DEBUG: ${_boost_DEBUG_NAMES}") + endif() + + # if Boost_LIBRARY_DIR_DEBUG is not defined, + # but Boost_LIBRARY_DIR_RELEASE is, look there first for DEBUG libs + if(NOT Boost_LIBRARY_DIR_DEBUG AND Boost_LIBRARY_DIR_RELEASE) + list(INSERT _boost_LIBRARY_SEARCH_DIRS_DEBUG 0 ${Boost_LIBRARY_DIR_RELEASE}) + endif() + + # Avoid passing backslashes to _Boost_FIND_LIBRARY due to macro re-parsing. + string(REPLACE "\\" "/" _boost_LIBRARY_SEARCH_DIRS_tmp "${_boost_LIBRARY_SEARCH_DIRS_DEBUG}") + + _Boost_FIND_LIBRARY(Boost_${UPPERCOMPONENT}_LIBRARY_DEBUG DEBUG + NAMES ${_boost_DEBUG_NAMES} + HINTS ${_boost_LIBRARY_SEARCH_DIRS_tmp} + NAMES_PER_DIR + DOC "${_boost_docstring_debug}" + ) + + if(Boost_REALPATH) + _Boost_SWAP_WITH_REALPATH(Boost_${UPPERCOMPONENT}_LIBRARY_RELEASE "${_boost_docstring_release}") + _Boost_SWAP_WITH_REALPATH(Boost_${UPPERCOMPONENT}_LIBRARY_DEBUG "${_boost_docstring_debug}" ) + endif() + + _Boost_ADJUST_LIB_VARS(${UPPERCOMPONENT}) + +endforeach() + +# Restore the original find library ordering +if( Boost_USE_STATIC_LIBS ) + set(CMAKE_FIND_LIBRARY_SUFFIXES ${_boost_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES}) +endif() + +# ------------------------------------------------------------------------ +# End finding boost libraries +# ------------------------------------------------------------------------ + +set(Boost_INCLUDE_DIRS ${Boost_INCLUDE_DIR}) +set(Boost_LIBRARY_DIRS) +if(Boost_LIBRARY_DIR_RELEASE) + list(APPEND Boost_LIBRARY_DIRS ${Boost_LIBRARY_DIR_RELEASE}) +endif() +if(Boost_LIBRARY_DIR_DEBUG) + list(APPEND Boost_LIBRARY_DIRS ${Boost_LIBRARY_DIR_DEBUG}) +endif() +if(Boost_LIBRARY_DIRS) + list(REMOVE_DUPLICATES Boost_LIBRARY_DIRS) +endif() + +# The above setting of Boost_FOUND was based only on the header files. +# Update it for the requested component libraries. +if(Boost_FOUND) + # The headers were found. Check for requested component libs. + set(_boost_CHECKED_COMPONENT FALSE) + set(_Boost_MISSING_COMPONENTS "") + foreach(COMPONENT ${Boost_FIND_COMPONENTS}) + string(TOUPPER ${COMPONENT} COMPONENT) + set(_boost_CHECKED_COMPONENT TRUE) + if(NOT Boost_${COMPONENT}_FOUND) + string(TOLOWER ${COMPONENT} COMPONENT) + list(APPEND _Boost_MISSING_COMPONENTS ${COMPONENT}) + endif() + endforeach() + if(_Boost_MISSING_COMPONENTS AND _Boost_EXTRA_FIND_COMPONENTS) + # Optional indirect dependencies are not counted as missing. + list(REMOVE_ITEM _Boost_MISSING_COMPONENTS ${_Boost_EXTRA_FIND_COMPONENTS}) + endif() + + if(Boost_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] Boost_FOUND = ${Boost_FOUND}") + endif() + + if (_Boost_MISSING_COMPONENTS) + set(Boost_FOUND 0) + # We were unable to find some libraries, so generate a sensible + # error message that lists the libraries we were unable to find. + string(APPEND Boost_ERROR_REASON + "\nCould not find the following") + if(Boost_USE_STATIC_LIBS) + string(APPEND Boost_ERROR_REASON " static") + endif() + string(APPEND Boost_ERROR_REASON + " Boost libraries:\n") + foreach(COMPONENT ${_Boost_MISSING_COMPONENTS}) + string(APPEND Boost_ERROR_REASON + " ${Boost_NAMESPACE}_${COMPONENT}\n") + endforeach() + + list(LENGTH Boost_FIND_COMPONENTS Boost_NUM_COMPONENTS_WANTED) + list(LENGTH _Boost_MISSING_COMPONENTS Boost_NUM_MISSING_COMPONENTS) + if (${Boost_NUM_COMPONENTS_WANTED} EQUAL ${Boost_NUM_MISSING_COMPONENTS}) + string(APPEND Boost_ERROR_REASON + "No Boost libraries were found. You may need to set BOOST_LIBRARYDIR to the directory containing Boost libraries or BOOST_ROOT to the location of Boost.") + else () + string(APPEND Boost_ERROR_REASON + "Some (but not all) of the required Boost libraries were found. You may need to install these additional Boost libraries. Alternatively, set BOOST_LIBRARYDIR to the directory containing Boost libraries or BOOST_ROOT to the location of Boost.") + endif () + endif () + + if( NOT Boost_LIBRARY_DIRS AND NOT _boost_CHECKED_COMPONENT ) + # Compatibility Code for backwards compatibility with CMake + # 2.4's FindBoost module. + + # Look for the boost library path. + # Note that the user may not have installed any libraries + # so it is quite possible the Boost_LIBRARY_DIRS may not exist. + set(_boost_LIB_DIR ${Boost_INCLUDE_DIR}) + + if("${_boost_LIB_DIR}" MATCHES "boost-[0-9]+") + get_filename_component(_boost_LIB_DIR ${_boost_LIB_DIR} PATH) + endif() + + if("${_boost_LIB_DIR}" MATCHES "/include$") + # Strip off the trailing "/include" in the path. + get_filename_component(_boost_LIB_DIR ${_boost_LIB_DIR} PATH) + endif() + + if(EXISTS "${_boost_LIB_DIR}/lib") + string(APPEND _boost_LIB_DIR /lib) + elseif(EXISTS "${_boost_LIB_DIR}/stage/lib") + string(APPEND _boost_LIB_DIR "/stage/lib") + else() + set(_boost_LIB_DIR "") + endif() + + if(_boost_LIB_DIR AND EXISTS "${_boost_LIB_DIR}") + set(Boost_LIBRARY_DIRS ${_boost_LIB_DIR}) + endif() + + endif() +else() + # Boost headers were not found so no components were found. + foreach(COMPONENT ${Boost_FIND_COMPONENTS}) + string(TOUPPER ${COMPONENT} UPPERCOMPONENT) + set(Boost_${UPPERCOMPONENT}_FOUND 0) + endforeach() +endif() + +# ------------------------------------------------------------------------ +# Add imported targets +# ------------------------------------------------------------------------ + +if(Boost_FOUND) + # For header-only libraries + if(NOT TARGET Boost::boost) + add_library(Boost::boost INTERFACE IMPORTED) + if(Boost_INCLUDE_DIRS) + set_target_properties(Boost::boost PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${Boost_INCLUDE_DIRS}") + endif() + endif() + + foreach(COMPONENT ${Boost_FIND_COMPONENTS}) + if(_Boost_IMPORTED_TARGETS AND NOT TARGET Boost::${COMPONENT}) + string(TOUPPER ${COMPONENT} UPPERCOMPONENT) + if(Boost_${UPPERCOMPONENT}_FOUND) + if(Boost_USE_STATIC_LIBS) + add_library(Boost::${COMPONENT} STATIC IMPORTED) + else() + # Even if Boost_USE_STATIC_LIBS is OFF, we might have static + # libraries as a result. + add_library(Boost::${COMPONENT} UNKNOWN IMPORTED) + endif() + if(Boost_INCLUDE_DIRS) + set_target_properties(Boost::${COMPONENT} PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${Boost_INCLUDE_DIRS}") + endif() + if(EXISTS "${Boost_${UPPERCOMPONENT}_LIBRARY}") + set_target_properties(Boost::${COMPONENT} PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" + IMPORTED_LOCATION "${Boost_${UPPERCOMPONENT}_LIBRARY}") + endif() + if(EXISTS "${Boost_${UPPERCOMPONENT}_LIBRARY_RELEASE}") + set_property(TARGET Boost::${COMPONENT} APPEND PROPERTY + IMPORTED_CONFIGURATIONS RELEASE) + set_target_properties(Boost::${COMPONENT} PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${Boost_${UPPERCOMPONENT}_LIBRARY_RELEASE}") + endif() + if(EXISTS "${Boost_${UPPERCOMPONENT}_LIBRARY_DEBUG}") + set_property(TARGET Boost::${COMPONENT} APPEND PROPERTY + IMPORTED_CONFIGURATIONS DEBUG) + set_target_properties(Boost::${COMPONENT} PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_DEBUG "CXX" + IMPORTED_LOCATION_DEBUG "${Boost_${UPPERCOMPONENT}_LIBRARY_DEBUG}") + endif() + if(_Boost_${UPPERCOMPONENT}_DEPENDENCIES) + unset(_Boost_${UPPERCOMPONENT}_TARGET_DEPENDENCIES) + foreach(dep ${_Boost_${UPPERCOMPONENT}_DEPENDENCIES}) + list(APPEND _Boost_${UPPERCOMPONENT}_TARGET_DEPENDENCIES Boost::${dep}) + endforeach() + if(COMPONENT STREQUAL "thread") + list(APPEND _Boost_${UPPERCOMPONENT}_TARGET_DEPENDENCIES Threads::Threads) + endif() + set_target_properties(Boost::${COMPONENT} PROPERTIES + INTERFACE_LINK_LIBRARIES "${_Boost_${UPPERCOMPONENT}_TARGET_DEPENDENCIES}") + endif() + endif() + endif() + endforeach() +endif() + +# ------------------------------------------------------------------------ +# Notification to end user about what was found +# ------------------------------------------------------------------------ + +set(Boost_LIBRARIES "") +if(Boost_FOUND) + if(NOT Boost_FIND_QUIETLY) + message(STATUS "Boost version: ${Boost_MAJOR_VERSION}.${Boost_MINOR_VERSION}.${Boost_SUBMINOR_VERSION}") + if(Boost_FIND_COMPONENTS) + message(STATUS "Found the following Boost libraries:") + endif() + endif() + foreach( COMPONENT ${Boost_FIND_COMPONENTS} ) + string( TOUPPER ${COMPONENT} UPPERCOMPONENT ) + if( Boost_${UPPERCOMPONENT}_FOUND ) + if(NOT Boost_FIND_QUIETLY) + message (STATUS " ${COMPONENT}") + endif() + list(APPEND Boost_LIBRARIES ${Boost_${UPPERCOMPONENT}_LIBRARY}) + endif() + endforeach() +else() + if(Boost_FIND_REQUIRED) + message(SEND_ERROR "Unable to find the requested Boost libraries.\n${Boost_ERROR_REASON}") + else() + if(NOT Boost_FIND_QUIETLY) + # we opt not to automatically output Boost_ERROR_REASON here as + # it could be quite lengthy and somewhat imposing in its requests + # Since Boost is not always a required dependency we'll leave this + # up to the end-user. + if(Boost_DEBUG OR Boost_DETAILED_FAILURE_MSG) + message(STATUS "Could NOT find Boost\n${Boost_ERROR_REASON}") + else() + message(STATUS "Could NOT find Boost") + endif() + endif() + endif() +endif() + +# Configure display of cache entries in GUI. +foreach(v BOOSTROOT BOOST_ROOT ${_Boost_VARS_INC} ${_Boost_VARS_LIB}) + get_property(_type CACHE ${v} PROPERTY TYPE) + if(_type) + set_property(CACHE ${v} PROPERTY ADVANCED 1) + if("x${_type}" STREQUAL "xUNINITIALIZED") + if("x${v}" STREQUAL "xBoost_ADDITIONAL_VERSIONS") + set_property(CACHE ${v} PROPERTY TYPE STRING) + else() + set_property(CACHE ${v} PROPERTY TYPE PATH) + endif() + endif() + endif() +endforeach() + +# Record last used values of input variables so we can +# detect on the next run if the user changed them. +foreach(v + ${_Boost_VARS_INC} ${_Boost_VARS_LIB} + ${_Boost_VARS_DIR} ${_Boost_VARS_NAME} + ) + if(DEFINED ${v}) + set(_${v}_LAST "${${v}}" CACHE INTERNAL "Last used ${v} value.") + else() + unset(_${v}_LAST CACHE) + endif() +endforeach() + +# Maintain a persistent list of components requested anywhere since +# the last flush. +set(_Boost_COMPONENTS_SEARCHED "${_Boost_COMPONENTS_SEARCHED}") +list(APPEND _Boost_COMPONENTS_SEARCHED ${Boost_FIND_COMPONENTS}) +list(REMOVE_DUPLICATES _Boost_COMPONENTS_SEARCHED) +list(SORT _Boost_COMPONENTS_SEARCHED) +set(_Boost_COMPONENTS_SEARCHED "${_Boost_COMPONENTS_SEARCHED}" + CACHE INTERNAL "Components requested for this build tree.") + diff --git a/docs/Doxyfile b/docs/Doxyfile new file mode 100644 index 0000000000..ac636f4136 --- /dev/null +++ b/docs/Doxyfile @@ -0,0 +1,2489 @@ +# Doxyfile 1.8.12 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "ArmNN" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = NotReleased + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = docs/ + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 0. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 0 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = NO + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = YES + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = YES + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if <section_label> ... \endif and \cond <section_label> +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = include/ src/ tests/ docs/ + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, +# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f \ + *.for \ + *.tcl \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = caffe tensorflow cl armcomputetensorutils + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# <filter> <input-file> +# +# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = YES + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the +# cost of reduced performance. This can be particularly helpful with template +# rich C++ code for which doxygen's built-in parser lacks the necessary type +# information. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse-libclang=ON option for CMake. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = NO + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = -std=c++11 + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use <access key> + S +# (what the <access key> is depends on the OS and browser, but it is typically +# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down +# key> to jump into the search results window, the results can be navigated +# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel +# the search. The filter options can be selected when the cursor is inside the +# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys> +# to select a filter and <Enter> or <escape> to activate or cancel the filter +# option. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a web server instead of a web client using Javascript. There +# are two flavors of web server based searching depending on the EXTERNAL_SEARCH +# setting. When disabled, doxygen will generate a PHP script for searching and +# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing +# and searching needs to be provided by external tools. See the section +# "External Indexing and Searching" for details. +# The default value is: NO. +# This tag requires that the tag SEARCHENGINE is set to YES. + +SERVER_BASED_SEARCH = NO + +# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP +# script for searching. Instead the search results are written to an XML file +# which needs to be processed by an external indexer. Doxygen will invoke an +# external search engine pointed to by the SEARCHENGINE_URL option to obtain the +# search results. +# +# Doxygen ships with an example indexer (doxyindexer) and search engine +# (doxysearch.cgi) which are based on the open source search engine library +# Xapian (see: http://xapian.org/). +# +# See the section "External Indexing and Searching" for details. +# The default value is: NO. +# This tag requires that the tag SEARCHENGINE is set to YES. + +EXTERNAL_SEARCH = NO + +# The SEARCHENGINE_URL should point to a search engine hosted by a web server +# which will return the search results when EXTERNAL_SEARCH is enabled. +# +# Doxygen ships with an example indexer (doxyindexer) and search engine +# (doxysearch.cgi) which are based on the open source search engine library +# Xapian (see: http://xapian.org/). See the section "External Indexing and +# Searching" for details. +# This tag requires that the tag SEARCHENGINE is set to YES. + +SEARCHENGINE_URL = + +# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed +# search data is written to a file for indexing by an external tool. With the +# SEARCHDATA_FILE tag the name of this file can be specified. +# The default file is: searchdata.xml. +# This tag requires that the tag SEARCHENGINE is set to YES. + +SEARCHDATA_FILE = searchdata.xml + +# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the +# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is +# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple +# projects and redirect the results back to the right project. +# This tag requires that the tag SEARCHENGINE is set to YES. + +EXTERNAL_SEARCH_ID = + +# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen +# projects other than the one defined by this configuration file, but that are +# all added to the same external search index. Each project needs to have a +# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of +# to a relative location where the documentation can be found. The format is: +# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ... +# This tag requires that the tag SEARCHENGINE is set to YES. + +EXTRA_SEARCH_MAPPINGS = + +#--------------------------------------------------------------------------- +# Configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output. +# The default value is: YES. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: latex. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. +# +# Note that when enabling USE_PDFLATEX this option is only used for generating +# bitmaps for formulas in the HTML output, but not in the Makefile that is +# written to the output directory. +# The default file is: latex. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate +# index for LaTeX. +# The default file is: makeindex. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX +# documents. This may be useful for small projects and may help to save some +# trees in general. +# The default value is: NO. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used by the +# printer. +# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x +# 14 inches) and executive (7.25 x 10.5 inches). +# The default value is: a4. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +PAPER_TYPE = a4 + +# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names +# that should be included in the LaTeX output. The package can be specified just +# by its name or with the correct syntax as to be used with the LaTeX +# \usepackage command. To get the times font for instance you can specify : +# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times} +# To use the option intlimits with the amsmath package you can specify: +# EXTRA_PACKAGES=[intlimits]{amsmath} +# If left blank no extra packages will be included. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the +# generated LaTeX document. The header should contain everything until the first +# chapter. If it is left blank doxygen will generate a standard header. See +# section "Doxygen usage" for information on how to let doxygen write the +# default header to a separate file. +# +# Note: Only use a user-defined header if you know what you are doing! The +# following commands have a special meaning inside the header: $title, +# $datetime, $date, $doxygenversion, $projectname, $projectnumber, +# $projectbrief, $projectlogo. Doxygen will replace $title with the empty +# string, for the replacement values of the other commands the user is referred +# to HTML_HEADER. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_HEADER = + +# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the +# generated LaTeX document. The footer should contain everything after the last +# chapter. If it is left blank doxygen will generate a standard footer. See +# LATEX_HEADER for more information on how to generate a default footer and what +# special commands can be used inside the footer. +# +# Note: Only use a user-defined footer if you know what you are doing! +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_FOOTER = + +# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# LaTeX style sheets that are included after the standard style sheets created +# by doxygen. Using this option one can overrule certain style aspects. Doxygen +# will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_EXTRA_STYLESHEET = + +# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the LATEX_OUTPUT output +# directory. Note that the files will be copied as-is; there are no commands or +# markers available. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_EXTRA_FILES = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is +# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will +# contain links (just like the HTML output) instead of page references. This +# makes the output suitable for online browsing using a PDF viewer. +# The default value is: YES. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate +# the PDF file directly from the LaTeX files. Set this option to YES, to get a +# higher quality PDF documentation. +# The default value is: YES. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode +# command to the generated LaTeX files. This will instruct LaTeX to keep running +# if errors occur, instead of asking the user for help. This option is also used +# when generating formulas in HTML. +# The default value is: NO. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_BATCHMODE = NO + +# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the +# index chapters (such as File Index, Compound Index, etc.) in the output. +# The default value is: NO. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_HIDE_INDICES = NO + +# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source +# code with syntax highlighting in the LaTeX output. +# +# Note that which sources are shown also depends on other settings such as +# SOURCE_BROWSER. +# The default value is: NO. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_SOURCE_CODE = NO + +# The LATEX_BIB_STYLE tag can be used to specify the style to use for the +# bibliography, e.g. plainnat, or ieeetr. See +# http://en.wikipedia.org/wiki/BibTeX and \cite for more info. +# The default value is: plain. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_BIB_STYLE = plain + +# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated +# page will contain the date and time when the page was generated. Setting this +# to NO can help when comparing the output of multiple runs. +# The default value is: NO. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_TIMESTAMP = NO + +#--------------------------------------------------------------------------- +# Configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The +# RTF output is optimized for Word 97 and may not look too pretty with other RTF +# readers/editors. +# The default value is: NO. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: rtf. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF +# documents. This may be useful for small projects and may help to save some +# trees in general. +# The default value is: NO. +# This tag requires that the tag GENERATE_RTF is set to YES. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will +# contain hyperlink fields. The RTF file will contain links (just like the HTML +# output) instead of page references. This makes the output suitable for online +# browsing using Word or some other Word compatible readers that support those +# fields. +# +# Note: WordPad (write) and others do not support links. +# The default value is: NO. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's config +# file, i.e. a series of assignments. You only have to provide replacements, +# missing definitions are set to their default value. +# +# See also section "Doxygen usage" for information on how to generate the +# default style sheet that doxygen normally uses. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an RTF document. Syntax is +# similar to doxygen's config file. A template extensions file can be generated +# using doxygen -e rtf extensionFile. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_EXTENSIONS_FILE = + +# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code +# with syntax highlighting in the RTF output. +# +# Note that which sources are shown also depends on other settings such as +# SOURCE_BROWSER. +# The default value is: NO. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_SOURCE_CODE = NO + +#--------------------------------------------------------------------------- +# Configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for +# classes and files. +# The default value is: NO. + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. A directory man3 will be created inside the directory specified by +# MAN_OUTPUT. +# The default directory is: man. +# This tag requires that the tag GENERATE_MAN is set to YES. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to the generated +# man pages. In case the manual section does not start with a number, the number +# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is +# optional. +# The default value is: .3. +# This tag requires that the tag GENERATE_MAN is set to YES. + +MAN_EXTENSION = .3 + +# The MAN_SUBDIR tag determines the name of the directory created within +# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by +# MAN_EXTENSION with the initial . removed. +# This tag requires that the tag GENERATE_MAN is set to YES. + +MAN_SUBDIR = + +# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it +# will generate one additional man file for each entity documented in the real +# man page(s). These additional files only source the real man page, but without +# them the man command would be unable to find the correct page. +# The default value is: NO. +# This tag requires that the tag GENERATE_MAN is set to YES. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# Configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that +# captures the structure of the code including all documentation. +# The default value is: NO. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: xml. +# This tag requires that the tag GENERATE_XML is set to YES. + +XML_OUTPUT = xml + +# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program +# listings (including syntax highlighting and cross-referencing information) to +# the XML output. Note that enabling this will significantly increase the size +# of the XML output. +# The default value is: YES. +# This tag requires that the tag GENERATE_XML is set to YES. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the DOCBOOK output +#--------------------------------------------------------------------------- + +# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files +# that can be used to generate PDF. +# The default value is: NO. + +GENERATE_DOCBOOK = NO + +# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in +# front of it. +# The default directory is: docbook. +# This tag requires that the tag GENERATE_DOCBOOK is set to YES. + +DOCBOOK_OUTPUT = docbook + +# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the +# program listings (including syntax highlighting and cross-referencing +# information) to the DOCBOOK output. Note that enabling this will significantly +# increase the size of the DOCBOOK output. +# The default value is: NO. +# This tag requires that the tag GENERATE_DOCBOOK is set to YES. + +DOCBOOK_PROGRAMLISTING = NO + +#--------------------------------------------------------------------------- +# Configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an +# AutoGen Definitions (see http://autogen.sf.net) file that captures the +# structure of the code including all documentation. Note that this feature is +# still experimental and incomplete at the moment. +# The default value is: NO. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# Configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module +# file that captures the structure of the code including all documentation. +# +# Note that this feature is still experimental and incomplete at the moment. +# The default value is: NO. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary +# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI +# output from the Perl module output. +# The default value is: NO. +# This tag requires that the tag GENERATE_PERLMOD is set to YES. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely +# formatted so it can be parsed by a human reader. This is useful if you want to +# understand what is going on. On the other hand, if this tag is set to NO, the +# size of the Perl module output will be much smaller and Perl will parse it +# just the same. +# The default value is: YES. +# This tag requires that the tag GENERATE_PERLMOD is set to YES. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file are +# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful +# so different doxyrules.make files included by the same Makefile don't +# overwrite each other's variables. +# This tag requires that the tag GENERATE_PERLMOD is set to YES. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all +# C-preprocessor directives found in the sources and include files. +# The default value is: YES. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names +# in the source code. If set to NO, only conditional compilation will be +# performed. Macro expansion can be done in a controlled way by setting +# EXPAND_ONLY_PREDEF to YES. +# The default value is: NO. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +MACRO_EXPANSION = YES + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then +# the macro expansion is limited to the macros specified with the PREDEFINED and +# EXPAND_AS_DEFINED tags. +# The default value is: NO. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES, the include files in the +# INCLUDE_PATH will be searched if a #include is found. +# The default value is: YES. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by the +# preprocessor. +# This tag requires that the tag SEARCH_INCLUDES is set to YES. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will be +# used. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that are +# defined before the preprocessor is started (similar to the -D option of e.g. +# gcc). The argument of the tag is a list of macros of the form: name or +# name=definition (no spaces). If the definition and the "=" are omitted, "=1" +# is assumed. To prevent a macro definition from being undefined via #undef or +# recursively expanded use the := operator instead of the = operator. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +PREDEFINED = DOXYGEN_SHOULD_SKIP_THIS \ + ARMCOMPUTECL_ENABLED \ + ARMCOMPUTENEON_ENABLED + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this +# tag can be used to specify a list of macro names that should be expanded. The +# macro definition that is found in the sources will be used. Use the PREDEFINED +# tag if you want to use a different macro definition that overrules the +# definition found in the source code. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will +# remove all references to function-like macros that are alone on a line, have +# an all uppercase name, and do not end with a semicolon. Such function macros +# are typically used for boiler-plate code, and will confuse the parser if not +# removed. +# The default value is: YES. +# This tag requires that the tag ENABLE_PREPROCESSING is set to YES. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES tag can be used to specify one or more tag files. For each tag +# file the location of the external documentation should be added. The format of +# a tag file without this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where loc1 and loc2 can be relative or absolute paths or URLs. See the +# section "Linking to external documentation" for more information about the use +# of tag files. +# Note: Each tag file must have a unique name (where the name does NOT include +# the path). If a tag file is not located in the directory in which doxygen is +# run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create a +# tag file that is based on the input files it reads. See section "Linking to +# external documentation" for more information about the usage of tag files. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES, all external class will be listed in +# the class index. If set to NO, only the inherited external classes will be +# listed. +# The default value is: NO. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will be +# listed. +# The default value is: YES. + +EXTERNAL_GROUPS = YES + +# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in +# the related pages index. If set to NO, only the current project's pages will +# be listed. +# The default value is: YES. + +EXTERNAL_PAGES = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of 'which perl'). +# The default file (with absolute path) is: /usr/bin/perl. + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram +# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to +# NO turns the diagrams off. Note that this option also works with HAVE_DOT +# disabled, but it is recommended to install and use dot, since it yields more +# powerful graphs. +# The default value is: YES. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see: +# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# You can include diagrams made with dia in doxygen documentation. Doxygen will +# then run dia to produce the diagram and insert it in the documentation. The +# DIA_PATH tag allows you to specify the directory where the dia binary resides. +# If left empty dia is assumed to be found in the default search path. + +DIA_PATH = + +# If set to YES the inheritance and collaboration graphs will hide inheritance +# and usage relations if the target is undocumented or is not a class. +# The default value is: YES. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz (see: +# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent +# Bell Labs. The other options in this section have no effect if this option is +# set to NO +# The default value is: NO. + +HAVE_DOT = NO + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed +# to run in parallel. When set to 0 doxygen will base this on the number of +# processors available in the system. You can set it explicitly to a value +# larger than 0 to get control over the balance between CPU load and processing +# speed. +# Minimum value: 0, maximum value: 32, default value: 0. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_NUM_THREADS = 0 + +# When you want a differently looking font in the dot files that doxygen +# generates you can specify the font name using DOT_FONTNAME. You need to make +# sure dot is able to find the font, which can be done by putting it in a +# standard location or by setting the DOTFONTPATH environment variable or by +# setting DOT_FONTPATH to the directory containing the font. +# The default value is: Helvetica. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_FONTNAME = Helvetica + +# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of +# dot graphs. +# Minimum value: 4, maximum value: 24, default value: 10. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the default font as specified with +# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set +# the path where dot can find it using this tag. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_FONTPATH = + +# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for +# each documented class showing the direct and indirect inheritance relations. +# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a +# graph for each documented class showing the direct and indirect implementation +# dependencies (inheritance, containment, and class references variables) of the +# class with other documented classes. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for +# groups, showing the direct groups dependencies. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +UML_LOOK = NO + +# If the UML_LOOK tag is enabled, the fields and methods are shown inside the +# class node. If there are many fields or methods and many nodes the graph may +# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the +# number of items for each type to make the size more manageable. Set this to 0 +# for no limit. Note that the threshold may be exceeded by 50% before the limit +# is enforced. So when you set the threshold to 10, up to 15 fields may appear, +# but if the number exceeds 15, the total amount of fields shown is limited to +# 10. +# Minimum value: 0, maximum value: 100, default value: 10. +# This tag requires that the tag HAVE_DOT is set to YES. + +UML_LIMIT_NUM_FIELDS = 10 + +# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and +# collaboration graphs will show the relations between templates and their +# instances. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +TEMPLATE_RELATIONS = YES + +# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to +# YES then doxygen will generate a graph for each documented file showing the +# direct and indirect include dependencies of the file with other documented +# files. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +INCLUDE_GRAPH = YES + +# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are +# set to YES then doxygen will generate a graph for each documented file showing +# the direct and indirect include dependencies of the file with other documented +# files. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH tag is set to YES then doxygen will generate a call +# dependency graph for every global function or class method. +# +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable call graphs for selected +# functions only using the \callgraph command. Disabling a call graph can be +# accomplished by means of the command \hidecallgraph. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller +# dependency graph for every global function or class method. +# +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable caller graphs for selected +# functions only using the \callergraph command. Disabling a caller graph can be +# accomplished by means of the command \hidecallergraph. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical +# hierarchy of all classes instead of a textual one. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the +# dependencies a directory has on other directories in a graphical way. The +# dependency relations are determined by the #include relations between the +# files in the directories. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. For an explanation of the image formats see the section +# output formats in the documentation of the dot tool (Graphviz (see: +# http://www.graphviz.org/)). +# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order +# to make the SVG files visible in IE 9+ (other browsers do not have this +# requirement). +# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo, +# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and +# png:gdiplus:gdiplus. +# The default value is: png. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_IMAGE_FORMAT = png + +# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to +# enable generation of interactive SVG images that allow zooming and panning. +# +# Note that this requires a modern browser other than Internet Explorer. Tested +# and working are Firefox, Chrome, Safari, and Opera. +# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make +# the SVG files visible. Older versions of IE do not have SVG support. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +INTERACTIVE_SVG = NO + +# The DOT_PATH tag can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the \dotfile +# command). +# This tag requires that the tag HAVE_DOT is set to YES. + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the \mscfile +# command). + +MSCFILE_DIRS = + +# The DIAFILE_DIRS tag can be used to specify one or more directories that +# contain dia files that are included in the documentation (see the \diafile +# command). + +DIAFILE_DIRS = + +# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the +# path where java can find the plantuml.jar file. If left blank, it is assumed +# PlantUML is not used or called during a preprocessing step. Doxygen will +# generate a warning when it encounters a \startuml command in this case and +# will not generate output for the diagram. + +PLANTUML_JAR_PATH = + +# When using plantuml, the specified paths are searched for files specified by +# the !include statement in a plantuml block. + +PLANTUML_INCLUDE_PATH = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes +# that will be shown in the graph. If the number of nodes in a graph becomes +# larger than this value, doxygen will truncate the graph, which is visualized +# by representing a node as a red box. Note that doxygen if the number of direct +# children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that +# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. +# Minimum value: 0, maximum value: 10000, default value: 50. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_GRAPH_MAX_NODES = 500 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs +# generated by dot. A depth value of 3 means that only nodes reachable from the +# root by following a path via at most 3 edges will be shown. Nodes that lay +# further from the root node will be omitted. Note that setting this option to 1 +# or 2 may greatly reduce the computation time needed for large code bases. Also +# note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. +# Minimum value: 0, maximum value: 1000, default value: 0. +# This tag requires that the tag HAVE_DOT is set to YES. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not seem +# to support this out of the box. +# +# Warning: Depending on the platform used, enabling this option may lead to +# badly anti-aliased labels on the edges of a graph (i.e. they become hard to +# read). +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) support +# this, this feature is disabled by default. +# The default value is: NO. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page +# explaining the meaning of the various boxes and arrows in the dot generated +# graphs. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot +# files that are used to generate the various graphs. +# The default value is: YES. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_CLEANUP = YES diff --git a/include/armnn/ArmNN.hpp b/include/armnn/ArmNN.hpp new file mode 100644 index 0000000000..d1cb7a8488 --- /dev/null +++ b/include/armnn/ArmNN.hpp @@ -0,0 +1,16 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Descriptors.hpp" +#include "Exceptions.hpp" +#include "IRuntime.hpp" +#include "INetwork.hpp" +#include "LayerSupport.hpp" +#include "Tensor.hpp" +#include "Types.hpp" +#include "TypesUtils.hpp" +#include "Utils.hpp" +#include "Version.hpp" diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp new file mode 100644 index 0000000000..2595656c70 --- /dev/null +++ b/include/armnn/Descriptors.hpp @@ -0,0 +1,307 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "DescriptorsFwd.hpp" + +#include <cstdint> +#include <initializer_list> + +#include "Tensor.hpp" +#include "Types.hpp" + +namespace armnn +{ + +struct ActivationDescriptor +{ + ActivationDescriptor() : m_Function(ActivationFunction::Sigmoid), m_A(0), m_B(0) {}; + + ActivationFunction m_Function; + float m_A; + float m_B; +}; + +struct PermuteDescriptor +{ + PermuteDescriptor() + : m_DimMappings{} + { + } + PermuteDescriptor(const PermutationVector& dimMappings) + : m_DimMappings(dimMappings) + { + } + + PermutationVector m_DimMappings; +}; + +struct SoftmaxDescriptor +{ + SoftmaxDescriptor() : m_Beta(1.0f) {}; + + float m_Beta; +}; + + +struct OriginsDescriptor +{ + OriginsDescriptor(); + OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4); + OriginsDescriptor(const OriginsDescriptor& other); + OriginsDescriptor(OriginsDescriptor&& other); + + ~OriginsDescriptor(); + + OriginsDescriptor& operator=(OriginsDescriptor rhs); + + Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value); + uint32_t GetNumViews() const; + uint32_t GetNumDimensions() const; + const uint32_t* GetViewOrigin(uint32_t idx) const; + void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering); + friend void swap(OriginsDescriptor& first, OriginsDescriptor& second); + +private: + uint32_t m_NumViews; + uint32_t m_NumDimensions; + uint32_t** m_ViewOrigins; +}; + +struct ViewsDescriptor +{ + ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4); + ViewsDescriptor(const ViewsDescriptor& other); + ViewsDescriptor(); + ViewsDescriptor(ViewsDescriptor&& other); + + ~ViewsDescriptor(); + + ViewsDescriptor& operator=(ViewsDescriptor rhs); + + Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value); + Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value); + + uint32_t GetNumViews() const; + uint32_t GetNumDimensions() const; + const uint32_t* GetViewOrigin(uint32_t idx) const; + const uint32_t* GetViewSizes(uint32_t idx) const; + + friend void swap(ViewsDescriptor& first, ViewsDescriptor& second); +private: + OriginsDescriptor m_Origins; + uint32_t** m_ViewSizes; +}; + +// Convenience template to create a OriginsDescriptor to use when creating a Merger layer for performing concatenation +// of a number of input tensors +template <typename TensorShapeIt> +OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, + unsigned int concatenationDimension) +{ + auto numInputs = std::distance(first, last); + + if (numInputs < 2) + { + throw InvalidArgumentException("Concatenation requires at least 2 inputs"); + } + + const auto& firstInputShape = *first; + + const unsigned int numDimensions = firstInputShape.GetNumDimensions(); + for (auto it = first + 1; it != last; ++it) + { + if (it->GetNumDimensions() != numDimensions) + { + throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions"); + } + } + + if (concatenationDimension >= numDimensions) + { + throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions."); + } + + for (auto it = first; it != last; ++it) + { + for (unsigned int d = 0; d < numDimensions; ++d) + { + const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]); + if (!dimSizeOk) + { + throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions " + " except the concatenation dimension"); + } + } + } + + OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions); + + uint32_t viewIndex = 0u; + uint32_t coordAlongConcatDim = 0u; + for (auto it = first; it != last; ++it) + { + const auto& inputShape = *it; + + for (unsigned int i = 0; i < concatenationDimension; ++i) + { + viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0); + } + + viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim); + unsigned int dimSize = inputShape[concatenationDimension]; + coordAlongConcatDim += dimSize; + + + for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i) + { + viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0); + } + + ++viewIndex; + } + + return viewsDescriptor; +} + +struct Pooling2dDescriptor +{ + Pooling2dDescriptor() + : m_PoolType(PoolingAlgorithm::Max) + , m_PadLeft(0) + , m_PadRight(0) + , m_PadTop(0) + , m_PadBottom(0) + , m_PoolWidth(0) + , m_PoolHeight(0) + , m_StrideX(0) + , m_StrideY(0) + , m_OutputShapeRounding(OutputShapeRounding::Floor) + , m_PaddingMethod(PaddingMethod::Exclude) + {}; + + PoolingAlgorithm m_PoolType; + uint32_t m_PadLeft; + uint32_t m_PadRight; + uint32_t m_PadTop; + uint32_t m_PadBottom; + uint32_t m_PoolWidth; + uint32_t m_PoolHeight; + uint32_t m_StrideX; + uint32_t m_StrideY; + OutputShapeRounding m_OutputShapeRounding; + PaddingMethod m_PaddingMethod; +}; + +struct FullyConnectedDescriptor +{ + FullyConnectedDescriptor() + : m_BiasEnabled(false) + , m_TransposeWeightMatrix(false) + {}; + + bool m_BiasEnabled; + bool m_TransposeWeightMatrix; +}; + +struct Convolution2dDescriptor +{ + Convolution2dDescriptor() + : m_PadLeft(0) + , m_PadRight(0) + , m_PadTop(0) + , m_PadBottom(0) + , m_StrideX(0) + , m_StrideY(0) + , m_BiasEnabled(false) + {}; + + uint32_t m_PadLeft; + uint32_t m_PadRight; + uint32_t m_PadTop; + uint32_t m_PadBottom; + uint32_t m_StrideX; + uint32_t m_StrideY; + bool m_BiasEnabled; +}; + +struct DepthwiseConvolution2dDescriptor +{ + DepthwiseConvolution2dDescriptor() + : m_PadLeft(0) + , m_PadRight(0) + , m_PadTop(0) + , m_PadBottom(0) + , m_StrideX(0) + , m_StrideY(0) + , m_BiasEnabled(false) + {} + + uint32_t m_PadLeft; + uint32_t m_PadRight; + uint32_t m_PadTop; + uint32_t m_PadBottom; + uint32_t m_StrideX; + uint32_t m_StrideY; + bool m_BiasEnabled; +}; + + +struct NormalizationDescriptor +{ + NormalizationDescriptor() + : m_NormChannelType(NormalizationAlgorithmChannel::Across) + , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness) + , m_NormSize(0) + , m_Alpha(0.f) + , m_Beta(0.f) + , m_K(0.f) + {} + + NormalizationAlgorithmChannel m_NormChannelType; + NormalizationAlgorithmMethod m_NormMethodType; + uint32_t m_NormSize; + float m_Alpha; + float m_Beta; + float m_K; +}; + +struct BatchNormalizationDescriptor +{ + BatchNormalizationDescriptor() + : m_Eps(0.0001f) + {} + + float m_Eps; +}; + +struct FakeQuantizationDescriptor +{ + FakeQuantizationDescriptor() + : m_Min(-6.0f) + , m_Max(6.0f) + {} + + float m_Min; + float m_Max; +}; + +struct ResizeBilinearDescriptor +{ + ResizeBilinearDescriptor() + : m_TargetWidth(0) + , m_TargetHeight(0) + {} + + uint32_t m_TargetWidth; + uint32_t m_TargetHeight; +}; + +struct ReshapeDescriptor +{ + TensorShape m_TargetShape; +}; + +} diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp new file mode 100644 index 0000000000..58b4bcc626 --- /dev/null +++ b/include/armnn/DescriptorsFwd.hpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +namespace armnn +{ +struct ActivationDescriptor; +struct BatchNormalizationDescriptor; +struct Convolution2dDescriptor; +struct DepthwiseConvolution2dDescriptor; +struct FakeQuantizationDescriptor; +struct FullyConnectedDescriptor; +struct PermuteDescriptor; +struct NormalizationDescriptor; +struct Pooling2dDescriptor; +struct ReshapeDescriptor; +struct ResizeBilinearDescriptor; +struct SoftmaxDescriptor; +struct OriginsDescriptor; +struct ViewsDescriptor; + +using MergerDescriptor = OriginsDescriptor; +using SplitterDescriptor = ViewsDescriptor; +} diff --git a/include/armnn/Exceptions.hpp b/include/armnn/Exceptions.hpp new file mode 100644 index 0000000000..0b043997c4 --- /dev/null +++ b/include/armnn/Exceptions.hpp @@ -0,0 +1,75 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <stdexcept> +#include <string> + +namespace armnn +{ + +// base class for all ArmNN exceptions so that users can filter to just those +class Exception : public std::exception +{ +public: + explicit Exception(const std::string& message); + + virtual const char* what() const noexcept override; + +private: + std::string m_Message; +}; + +class ClRuntimeUnavailableException : public Exception +{ +public: + using Exception::Exception; +}; + +class InvalidArgumentException : public Exception +{ +public: + using Exception::Exception; +}; + +class FileNotFoundException : public Exception +{ +public: + using Exception::Exception; +}; + +class ParseException : public Exception +{ +public: + using Exception::Exception; +}; + +class UnimplementedException : public Exception +{ +public: + using Exception::Exception; + UnimplementedException(); +}; + +class LayerValidationException : public Exception +{ + using Exception::Exception; +}; + +class GraphValidationException : public Exception +{ + using Exception::Exception; +}; + +template <typename ExceptionType> +void ConditionalThrow(bool condition, const std::string& message) +{ + if (!condition) + { + throw ExceptionType(message); + } +} + +} diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp new file mode 100644 index 0000000000..8545629c96 --- /dev/null +++ b/include/armnn/INetwork.hpp @@ -0,0 +1,281 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "armnn/NetworkFwd.hpp" +#include "armnn/DescriptorsFwd.hpp" +#include "armnn/TensorFwd.hpp" + +#include "armnn/Types.hpp" + +#include <memory> + +namespace armnn +{ + +/// @brief An input connection slot for a layer. +/// The input slot can be connected to an output slot of the preceding layer in the graph. +/// Only one connection to the input slot is allowed. +class IInputSlot +{ +public: + virtual const IOutputSlot* GetConnection() const = 0; + virtual IOutputSlot* GetConnection() = 0; + +protected: + ~IInputSlot() {} /// Not user deletable +}; + +/// @brief An output connection slot for a layer. +/// The output slot may be connected to 1 or more input slots of subsequent layers in the graph. +class IOutputSlot +{ +public: + virtual unsigned int GetNumConnections() const = 0; + virtual const IInputSlot* GetConnection(unsigned int index) const = 0; + virtual IInputSlot* GetConnection(unsigned int index) = 0; + + virtual void SetTensorInfo(const TensorInfo& tensorInfo) = 0; + virtual const TensorInfo& GetTensorInfo() const = 0; + virtual bool IsTensorInfoSet() const = 0; + + virtual int Connect(IInputSlot& destination) = 0; + virtual void Disconnect(IInputSlot& slot) = 0; + +protected: + ~IOutputSlot() {} /// Not user deletable +}; + +/// @brief Interface for a layer that is connectable to other layers via InputSlots and OutputSlots. +class IConnectableLayer +{ +public: + virtual const char* GetName() const = 0; + + virtual unsigned int GetNumInputSlots() const = 0; + virtual unsigned int GetNumOutputSlots() const = 0; + + virtual const IInputSlot& GetInputSlot(unsigned int index) const = 0; + virtual IInputSlot& GetInputSlot(unsigned int index) = 0; + + virtual const IOutputSlot& GetOutputSlot(unsigned int index) const = 0; + virtual IOutputSlot& GetOutputSlot(unsigned int index) = 0; + +protected: + ~IConnectableLayer() {} // Objects are not deletable via the handle +}; + +using INetworkPtr = std::unique_ptr<INetwork, void(*)(INetwork* network)>; + +/// Main network class which provides the interface for building up a neural network. +/// This object is subsequently required by the IRuntime::Load() method. +class INetwork +{ +public: + static INetwork* CreateRaw(); + static INetworkPtr Create(); + static void Destroy(INetwork* network); + + virtual Status PrintGraph() = 0; + + /// Add an input layer to the network. + /// @param id User generated id to uniquely identify a particular input. The same id needs to be specified + /// when passing the inputs to the IRuntime::EnqueueWorkload() function. + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr) = 0; + + /// Add a 2D convolution layer to the network. + /// @param convolution2dDescriptor Description of the 2D convolution layer + /// @param weights Tensor for the weights data. + /// @param biases (Optional) Tensor for the bias data. Must match the output tensor shape. + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const char* name = nullptr) = 0; + + virtual IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const ConstTensor& biases, + const char* name = nullptr) = 0; + + /// Add a 2D depthwise convolution layer to the network. + /// @param convolution2dDescriptor Description of the 2D depthwise convolution layer + /// @param weights Tensor for the weights data. Expected format: [1, outputChannels, height, width] + /// @param biases (Optional) Tensor for the bias data. Must match the output tensor shape. + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddDepthwiseConvolution2dLayer( + const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const char* name = nullptr) = 0; + + virtual IConnectableLayer* AddDepthwiseConvolution2dLayer( + const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const ConstTensor& biases, + const char* name = nullptr) = 0; + + /// Add a fully connected layer to the network. + /// @param fullyConnectedDescriptor Description of the fully connected layer + /// @param weights Tensor for the weights data. + /// @param biases (Optional) Tensor for the bias data. + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor, + const ConstTensor& weights, + const char* name = nullptr) = 0; + + virtual IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor, + const ConstTensor& weights, + const ConstTensor& biases, + const char* name = nullptr) = 0; + + /// Add a permute layer to the network. + /// @param permuteDescriptor PermuteDescriptor to configure the permute + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor, + const char* name = nullptr) = 0; + + /// Add a pooling layer to the network. + /// @param pooling2dDescriptor Pooling2dDescriptor to configure the pooling + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor, + const char* name = nullptr) = 0; + + /// Add an activation layer to the network. + /// @param activationDescriptor ActivationDescriptor to configure the activation + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor, + const char* name = nullptr) = 0; + + /// Add a normalization layer to the network. + /// @param normalizationDescriptor NormalizationDescriptor to configure the normalization + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor, + const char* name = nullptr) = 0; + + /// Add a softmax layer to the network. + /// @param softmaxDescriptor SoftmaxDescriptor to configure the softmax + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor, + const char* name = nullptr) = 0; + + /// Add a splitter layer to the network. + /// @param splitterDescriptor WindowsDescriptor to configure the splitting process. Number of Views must be equal to + /// the number of outputs, and their order must match - e.g. first view corresponds to + /// the first output, second view to the second output, etc.... + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor + , const char* name = nullptr) = 0; + + /// Add a merger layer to the network. + /// @param mergerDescriptor WindowsDescriptor to configure the merging process. Number of Views must be equal to + /// the number of inputs, and their order must match - e.g. first view corresponds to + /// the first input, second view to the second input, etc.... + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddMergerLayer(const OriginsDescriptor& mergerDescriptor, + const char* name = nullptr) = 0; + + /// Add an addition layer to the network. + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddAdditionLayer(const char* name = nullptr) = 0; + + /// Add a multiplication layer to the network. + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr) = 0; + + /// Add a batch normalization layer to the network. + /// @param mean Pre-calculated mean for each channel + /// @param variance Pre-calculated variance for each channel + /// @param beta Per-channel additive factor + /// @param gamma Per-channel multiplicative factor + /// @return Interface for configuring the layer. + /// @param name Optional name for the layer + virtual IConnectableLayer* AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc, + const ConstTensor& mean, + const ConstTensor& variance, + const ConstTensor& beta, + const ConstTensor& gamma, + const char* name = nullptr) = 0; + + /// Add a resize bilinear layer to the network. + /// @param resizeDesc Parameters for the resize operation + /// @param name Optional name for the layer + /// @return Interface for configuring the layer + virtual IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc, + const char* name = nullptr) = 0; + + /// Add an L2 normalization layer to the network. + /// Normalization is performed along dimension 1, but requires a 4d input. + /// @param name Optional name for the layer + /// @return Interface for configuring the layer + virtual IConnectableLayer* AddL2NormalizationLayer(const char* name = nullptr) = 0; + + /// Adds a layer with no inputs and a single output, which always corresponds to + /// the passed in constant tensor. + /// @param input Tensor to be provided as the only output of the layer. The layer will maintain its own copy of the + /// tensor data, meaning the memory referenced by @a input can be freed or reused after this function is + /// called. + /// @param name Optional name for the layer + /// @return Interface for configuring the layer + virtual IConnectableLayer* AddConstantLayer(const ConstTensor& input, + const char* name = nullptr) = 0; + + /// Add a reshape layer to the network. + /// @param reshapeDescriptor Parameters for the reshape operation + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor, + const char* name = nullptr) = 0; + + /// Add a floor layer to the network. + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddFloorLayer(const char* name = nullptr) = 0; + + /// Add an output layer to the network. + /// @param id User generated id to uniquely identify a particular output. The same id needs to be specified + /// when passing the outputs to the IRuntime::EnqueueWorkload() function. + /// @param name Optional name for the layer + /// @return Interface for configuring the layer. + virtual IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr) = 0; + +protected: + ~INetwork() {} +}; + +using IOptimizedNetworkPtr = std::unique_ptr<IOptimizedNetwork, void(*)(IOptimizedNetwork* network)>; + +class IOptimizedNetwork +{ +public: + static void Destroy(IOptimizedNetwork* network); + + virtual Status PrintGraph() = 0; + +protected: + ~IOptimizedNetwork() {} +}; + + +/// Create an optimized version of the network +/// @param network INetwork description of the network to be optimized. +/// @param deviceSpec The choice of the default computation backend. +/// @return An IOptimizedNetworkPtr interface to the optimized network, throws an exception derived from +/// armnn::Exception if process fails. +IOptimizedNetworkPtr Optimize(const INetwork& network, const DeviceSpec& deviceSpec); + +} //namespace armnn diff --git a/include/armnn/IRuntime.hpp b/include/armnn/IRuntime.hpp new file mode 100644 index 0000000000..a1a3f0fda9 --- /dev/null +++ b/include/armnn/IRuntime.hpp @@ -0,0 +1,116 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <memory> + +#include "Types.hpp" +#include "Tensor.hpp" +#include "INetwork.hpp" +#include "TypesUtils.hpp" + +namespace armnn +{ + +using NetworkId = int; + +class IClTunedParameters; + +class IRuntime; +using IRuntimePtr = std::unique_ptr<IRuntime, void(*)(IRuntime* runtime)>; + +class IRuntime +{ +public: + struct CreationOptions + { + Compute m_DefaultComputeDevice; + bool m_UseCpuRefAsFallback; + /// If set, uses the CL tuned parameters from the given object when executing CL workloads. + /// It will also be updated with new tuned parameters if it is configured to do so. + IClTunedParameters* m_ClTunedParameters; + + CreationOptions(Compute defaultComputeDevice) + : m_DefaultComputeDevice(defaultComputeDevice) + , m_UseCpuRefAsFallback(true) + , m_ClTunedParameters(nullptr) + { + } + }; + + static IRuntime* CreateRaw(const CreationOptions& options); + static IRuntimePtr Create(const CreationOptions& options); + static void Destroy(IRuntime* runtime); + + /// Load a complete network into the IRuntime. + /// @param [out] networkIdOut Unique identifier for the network is returned in this reference. + /// @param [in] network Complete network to load into the IRuntime. + /// The runtime takes ownership of the network once passed in. + /// @return armnn::Status + virtual Status LoadNetwork(NetworkId& networkIdOut, IOptimizedNetworkPtr network) = 0; + + virtual TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const = 0; + virtual TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const = 0; + + // Evaluate network using input in inputTensors, outputs filled into outputTensors + virtual Status EnqueueWorkload(NetworkId networkId, + const InputTensors& inputTensors, + const OutputTensors& outputTensors) = 0; + + /// Unload a network from the IRuntime. + /// At the moment this only removes the network from the m_Impl->m_Network. + /// This might need more work in the future to be AndroidNN compliant. + /// @param [in] networkId Unique identifier for the network to be unloaded. Generated in LoadNetwork(). + /// @return armnn::Status + virtual Status UnloadNetwork(NetworkId networkId) = 0; + + virtual const DeviceSpec& GetDeviceSpec() const = 0; + +protected: + ~IRuntime() {} +}; + +using IClTunedParametersPtr = std::unique_ptr<IClTunedParameters, void(*)(IClTunedParameters* params)>; + +/// Manages a set of Open CL parameters which have been tuned for maximum performance. +/// Pass an instance of this object to the IRuntime::Create() method (via IRuntime::CreationOptions) to use it +/// for all CL workload execution. +/// +/// Can be created in two modes: +/// - In UseTunedParameters mode the parameters stored in this object are used to execute CL workloads. +/// - In UpdateTunedParameters mode, additionally, whenever a CL workload is executed for the first time the +/// optimum parameters will be found and stored in this object. WARNING - This tuning can be slow. +/// +/// The parameters can be loaded from and saved to a file so that you first run a slow initial read-write +/// execution, save the parameters for later and then run fast read-only executions using the optimised parameters. +class IClTunedParameters +{ +public: + enum class Mode + { + UseTunedParameters, + UpdateTunedParameters + }; + + /// Creates an IClTunedParameters with the given mode. + /// @{ + static IClTunedParameters* CreateRaw(Mode mode); + static IClTunedParametersPtr Create(Mode mode); + /// @} + static void Destroy(IClTunedParameters* params); + + /// Loads an existing set of tuned parameters from the given file. + /// If there is an error loading the file, an armnn::Exception is thrown. + virtual void Load(const char* filename) = 0; + + /// Saves the current set of tuned parameters to the given file. + /// If there is an error saving to the file, an armnn::Exception is thrown. + virtual void Save(const char* filename) const = 0; + +protected: + virtual ~IClTunedParameters() {}; +}; + +} diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp new file mode 100644 index 0000000000..d9de76f89c --- /dev/null +++ b/include/armnn/LayerSupport.hpp @@ -0,0 +1,140 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/DescriptorsFwd.hpp> +#include <armnn/Types.hpp> +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +bool IsActivationSupported(Compute compute, + const TensorInfo& input, + const ActivationDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsAdditionSupported(Compute compute, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsBatchNormalizationSupported(Compute compute, + const TensorInfo& input, + const BatchNormalizationDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsConstantSupported(Compute compute, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsConvolution2dSupported(Compute compute, + const TensorInfo& input, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsDepthwiseConvolutionSupported(Compute compute, + const TensorInfo& input, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsInputSupported(Compute compute, + const TensorInfo& input, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsFullyConnectedSupported(Compute compute, + const TensorInfo& input,const + FullyConnectedDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsL2NormalizationSupported(Compute compute, + const TensorInfo& input, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsMergerSupported(Compute compute, + const std::vector<const TensorInfo*> inputs, + const OriginsDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsMultiplicationSupported(Compute compute, + const TensorInfo& input0, + const TensorInfo& input1, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsNormalizationSupported(Compute compute, + const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsOutputSupported(Compute compute, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsPermuteSupported(Compute compute, + const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsPooling2dSupported(Compute compute, + const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsResizeBilinearSupported(Compute compute, + const TensorInfo& input, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsSoftmaxSupported(Compute compute, + const TensorInfo& input, + const SoftmaxDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsSplitterSupported(Compute compute, + const TensorInfo& input, + const ViewsDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsFakeQuantizationSupported(Compute compute, + const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsReshapeSupported(Compute compute, + const TensorInfo& input, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +bool IsFloorSupported(Compute compute, + const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + +} diff --git a/include/armnn/NetworkFwd.hpp b/include/armnn/NetworkFwd.hpp new file mode 100644 index 0000000000..75667fdfd0 --- /dev/null +++ b/include/armnn/NetworkFwd.hpp @@ -0,0 +1,16 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +namespace armnn +{ +class INetwork; +class IOptimizedNetwork; +class Graph; +class IInputSlot; +class IOutputSlot; +class IConnectableLayer; +class IDataLayer; +}
\ No newline at end of file diff --git a/include/armnn/Tensor.hpp b/include/armnn/Tensor.hpp new file mode 100644 index 0000000000..910278f33f --- /dev/null +++ b/include/armnn/Tensor.hpp @@ -0,0 +1,179 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once +#include "TensorFwd.hpp" + +#include "Types.hpp" +#include "Exceptions.hpp" + +#include <array> +#include <initializer_list> +#include <vector> + +namespace armnn +{ + +class TensorShape +{ +public: + /// Empty (invalid) constructor + TensorShape(); + + TensorShape(unsigned int numDimensions, const unsigned int* dimensionSizes); + + TensorShape(std::initializer_list<unsigned int> dimensionSizeList); + + TensorShape(const TensorShape& other); + + TensorShape& operator=(const TensorShape& other); + + unsigned int operator[](unsigned int i) const + { + return m_Dimensions.at(i); + } + + unsigned int& operator[](unsigned int i) + { + return m_Dimensions.at(i); + } + + bool operator==(const TensorShape& other) const; + bool operator!=(const TensorShape& other) const; + + unsigned int GetNumDimensions() const { return m_NumDimensions; } + unsigned int GetNumElements() const; + +private: + std::array<unsigned int, MaxNumOfTensorDimensions> m_Dimensions; + unsigned int m_NumDimensions; +}; + +class TensorInfo +{ +public: + /// Empty (invalid) constructor + TensorInfo(); + + TensorInfo(const TensorShape& shape, DataType dataType, + float quantizationScale = 0.0f, int32_t quantizationOffset = 0); + TensorInfo(unsigned int numDimensions, const unsigned int* dimensionSizes, DataType dataType, + float quantizationScale = 0.0f, int32_t quantizationOffset = 0); + + TensorInfo(const TensorInfo& other); + + TensorInfo& operator=(const TensorInfo& other); + + bool operator==(const TensorInfo& other) const; + bool operator!=(const TensorInfo& other) const; + + const TensorShape& GetShape() const { return m_Shape; } + TensorShape& GetShape() { return m_Shape; } + void SetShape(const TensorShape& newShape) { m_Shape = newShape; } + + unsigned int GetNumDimensions() const { return m_Shape.GetNumDimensions(); } + unsigned int GetNumElements() const { return m_Shape.GetNumElements(); } + + DataType GetDataType() const { return m_DataType; } + void SetDataType(DataType type) { m_DataType = type; } + + float GetQuantizationScale() const { return m_Quantization.m_Scale; } + int32_t GetQuantizationOffset() const { return m_Quantization.m_Offset; } + void SetQuantizationScale(float scale) { m_Quantization.m_Scale = scale; } + void SetQuantizationOffset(int32_t offset) { m_Quantization.m_Offset = offset; } + + unsigned int GetNumBytes() const; + +private: + TensorShape m_Shape; + DataType m_DataType; + /// Scale and offset values used for quantization + struct Quantization + { + Quantization() : m_Scale(0.f), m_Offset(0) {} + bool operator==(const Quantization& o) const {return ((m_Scale == o.m_Scale) && (m_Offset == o.m_Offset));} + float m_Scale; + int32_t m_Offset; + } m_Quantization; +}; + +template<typename MemoryType> +class BaseTensor +{ +public: + /// Empty (invalid) constructor + BaseTensor(); + + /// Constructor from a raw memory pointer. + /// @param memoryArea Region of CPU-addressable memory where tensor data will be stored. Must be valid while + /// workloads are on the fly. Tensor instances do not claim ownership of referenced memory regions, that is, + /// no attempt will be made by ArmNN to free these memory regions automatically. + BaseTensor(const TensorInfo& info, MemoryType memoryArea); + + /// Tensors are copyable. + BaseTensor(const BaseTensor& other); + + /// Tensors are copyable. + BaseTensor& operator=(const BaseTensor&); + + const TensorInfo& GetInfo() const { return m_Info; } + TensorInfo& GetInfo() { return m_Info; } + const TensorShape& GetShape() const { return m_Info.GetShape(); } + TensorShape& GetShape() { return m_Info.GetShape(); } + + DataType GetDataType() const { return m_Info.GetDataType(); } + unsigned int GetNumDimensions() const { return m_Info.GetNumDimensions(); } + unsigned int GetNumBytes() const { return m_Info.GetNumBytes(); } + unsigned int GetNumElements() const { return m_Info.GetNumElements(); } + + MemoryType GetMemoryArea() const { return m_MemoryArea; } + +protected: + // protected destructor to stop users from making these + // (could still new one on the heap and then leak it...) + ~BaseTensor() {} + + MemoryType m_MemoryArea; + +private: + TensorInfo m_Info; +}; + +/// A tensor defined by a TensorInfo (shape and data type) and a mutable backing store. +class Tensor : public BaseTensor<void*> +{ +public: + using BaseTensor<void*>::BaseTensor; // Bring in the constructors and assignment operator +}; + +/// A tensor defined by a TensorInfo (shape and data type) and an immutable backing store. +class ConstTensor : public BaseTensor<const void*> +{ +public: + using BaseTensor<const void*>::BaseTensor; // Bring in the constructors and assignment operator + ConstTensor() : BaseTensor<const void*>() {} // This needs to be redefined explicitly?? + + // Can be implicitly constructed from non-const Tensor + ConstTensor(const Tensor& other) : BaseTensor<const void*>(other.GetInfo(), other.GetMemoryArea()) {} + + /// Constructor from a backing container. + /// @param container An stl-like container type which implements data() and size() methods. + /// Presence of data() and size() is a strong indicator of the continuous memory layout of the container, + /// which is a requirement for Tensor data. Tensor instances do not claim ownership of referenced memory regions, + /// that is, no attempt will be made by ArmNN to free these memory regions automatically. + template < template<typename, typename...> class ContainerType, typename T, typename...ContainerArgs > + ConstTensor(const TensorInfo& info, const ContainerType<T, ContainerArgs...>& container) + : BaseTensor<const void*>(info, container.data()) + { + if (container.size() * sizeof(T) != info.GetNumBytes()) + { + throw InvalidArgumentException("Container size is not correct"); + } + } +}; + +using InputTensors = std::vector<std::pair<LayerBindingId, class ConstTensor>>; +using OutputTensors = std::vector<std::pair<LayerBindingId, class Tensor>>; + +} // namespace armnn diff --git a/include/armnn/TensorFwd.hpp b/include/armnn/TensorFwd.hpp new file mode 100644 index 0000000000..5ea035c877 --- /dev/null +++ b/include/armnn/TensorFwd.hpp @@ -0,0 +1,15 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +namespace armnn +{ + +class TensorShape; +class TensorInfo; +class Tensor; +class ConstTensor; + +} diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp new file mode 100644 index 0000000000..e1aa393ecc --- /dev/null +++ b/include/armnn/Types.hpp @@ -0,0 +1,155 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <array> + +namespace armnn +{ + +constexpr unsigned int MaxNumOfTensorDimensions = 4U; + +/// @enum Status enumeration +/// @var Status::Successful +/// @var Status::Failure +enum class Status +{ + Success = 0, + Failure = 1 +}; + +enum class DataType +{ + Float32 = 0, + QuantisedAsymm8 = 1, + Signed32 = 2 +}; + +enum class ActivationFunction +{ + Sigmoid = 0, + TanH = 1, + Linear = 2, + ReLu = 3, + BoundedReLu = 4, //< min(a, max(b, input)) + SoftReLu = 5, + LeakyReLu = 6, + Abs = 7, + Sqrt = 8, + Square = 9 +}; + +enum class PoolingAlgorithm +{ + Max = 0, + Average = 1, + L2 = 2 +}; + +/// +/// The padding method modifies the output of pooling layers. +/// In both supported methods, the values are ignored (they are +/// not even zeros which would make a difference for max pooling +/// a tensor with negative values). The difference between +/// IgnoreValue and Exclude is that the former count the padding +/// fields in the divisor of Average and L2 pooling, while +/// Exclude does not. +/// +enum class PaddingMethod +{ + IgnoreValue = 0, // The padding fields count, but ignored + Exclude = 1 // The padding fields don't count and ignored +}; + +enum class NormalizationAlgorithmChannel +{ + Across = 0, + Within = 1 +}; + +enum class NormalizationAlgorithmMethod +{ + LocalBrightness = 0, /* Krichevsky 2012: Local Brightness Normalization */ + LocalContrast = 1 /* Jarret 2009: Local Contrast Normalization */ +}; + +enum class OutputShapeRounding +{ + Floor = 0, + Ceiling = 1 +}; + +enum class Compute +{ + CpuRef = 0, // CPU Execution: Reference C++ kernels + CpuAcc = 1, // CPU Execution: NEON: ArmCompute + GpuAcc = 2, // GPU Execution: OpenCL: ArmCompute + Undefined = 5 +}; + +struct DeviceSpec +{ + Compute DefaultComputeDevice; +}; + +/// Type of identifiers for bindable layers (inputs, outputs). +using LayerBindingId = int; + +class PermutationVector +{ +public: + using ValueType = unsigned int; + using SizeType = unsigned int; + using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>; + using ConstIterator = typename ArrayType::const_iterator; + + /// @param dimMappings Indicates how to translate tensor elements from a given source into the target destination, + /// when source and target potentially have different memory layouts. + /// + /// E.g. For a 4-d tensor laid out in memory with format (Batch Element, Height, Width, Channels), + /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding + /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped + /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and + /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array: + /// [ 0, 2, 3, 1 ]. + /// + /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element, + /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have + /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents: + /// [ 0, 3, 1, 2 ]. + /// + PermutationVector(const ValueType *dimMappings, SizeType numDimMappings); + + PermutationVector(std::initializer_list<ValueType> dimMappings); + + ValueType operator[](SizeType i) const { return m_DimMappings.at(i); } + + SizeType GetSize() const { return m_NumDimMappings; } + + ConstIterator begin() const { return m_DimMappings.begin(); } + ConstIterator end() const { return m_DimMappings.end(); } + + bool IsEqual(const PermutationVector& other) const + { + return std::equal(begin(), end(), other.begin(), other.end()); + } + + bool IsInverse(const PermutationVector& other) const + { + bool isInverse = (GetSize() == other.GetSize()); + for (SizeType i = 0; isInverse && (i < GetSize()); ++i) + { + isInverse = (m_DimMappings[other.m_DimMappings[i]] == i); + } + return isInverse; + } + +private: + ArrayType m_DimMappings; + /// Number of valid entries in @ref m_DimMappings + SizeType m_NumDimMappings; +}; + +} diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp new file mode 100644 index 0000000000..a851b66b28 --- /dev/null +++ b/include/armnn/TypesUtils.hpp @@ -0,0 +1,182 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Types.hpp" +#include <cmath> +#include <ostream> +#include <boost/assert.hpp> +#include <boost/numeric/conversion/cast.hpp> + +namespace armnn +{ + +constexpr char const* GetStatusAsCString(Status compute) +{ + switch (compute) + { + case armnn::Status::Success: return "Status::Success"; + case armnn::Status::Failure: return "Status::Failure"; + default: return "Unknown"; + } +} + +constexpr char const* GetComputeDeviceAsCString(Compute compute) +{ + switch (compute) + { + case armnn::Compute::CpuRef: return "CpuRef"; + case armnn::Compute::CpuAcc: return "CpuAcc"; + case armnn::Compute::GpuAcc: return "GpuAcc"; + default: return "Unknown"; + } +} + +constexpr unsigned int GetDataTypeSize(DataType dataType) +{ + switch (dataType) + { + case DataType::Signed32: + case DataType::Float32: return 4U; + case DataType::QuantisedAsymm8: return 1U; + default: return 0U; + } +} + +template <int N> +constexpr bool StrEqual(const char* strA, const char (&strB)[N]) +{ + bool isEqual = true; + for (int i = 0; isEqual && (i < N); ++i) + { + isEqual = (strA[i] == strB[i]); + } + return isEqual; +} + +constexpr Compute ParseComputeDevice(const char* str) +{ + if (StrEqual(str, "CpuAcc")) + { + return armnn::Compute::CpuAcc; + } + else if (StrEqual(str, "CpuRef")) + { + return armnn::Compute::CpuRef; + } + else if (StrEqual(str, "GpuAcc")) + { + return armnn::Compute::GpuAcc; + } + else + { + return armnn::Compute::Undefined; + } +} + +constexpr const char* GetDataTypeName(DataType dataType) +{ + switch (dataType) + { + case DataType::Float32: return "Float32"; + case DataType::QuantisedAsymm8: return "Unsigned8"; + case DataType::Signed32: return "Signed32"; + default: return "Unknown"; + } +} + +template <typename T> +constexpr DataType GetDataType(); + +template <> +constexpr DataType GetDataType<float>() +{ + return DataType::Float32; +} + +template <> +constexpr DataType GetDataType<uint8_t>() +{ + return DataType::QuantisedAsymm8; +} + +template <> +constexpr DataType GetDataType<int32_t>() +{ + return DataType::Signed32; +} + +template<typename T> +constexpr bool IsQuantizedType() +{ + return std::is_integral<T>::value; +} + + +template<DataType DT> +struct ResolveTypeImpl; + +template<> +struct ResolveTypeImpl<DataType::QuantisedAsymm8> +{ + using Type = uint8_t; +}; + +template<> +struct ResolveTypeImpl<DataType::Float32> +{ + using Type = float; +}; + +template<DataType DT> +using ResolveType = typename ResolveTypeImpl<DT>::Type; + + +inline std::ostream& operator<<(std::ostream& os, Status stat) +{ + os << GetStatusAsCString(stat); + return os; +} + +inline std::ostream& operator<<(std::ostream& os, Compute compute) +{ + os << GetComputeDeviceAsCString(compute); + return os; +} + +/// Quantize a floating point data type into an 8-bit data type +/// @param value The value to quantize +/// @param scale The scale (must be non-zero) +/// @param offset The offset +/// @return The quantized value calculated as round(value/scale)+offset +/// +template<typename QuantizedType> +inline QuantizedType Quantize(float value, float scale, int32_t offset) +{ + static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type."); + constexpr QuantizedType max = std::numeric_limits<QuantizedType>::max(); + constexpr QuantizedType min = std::numeric_limits<QuantizedType>::lowest(); + BOOST_ASSERT(scale != 0.f); + int quantized = boost::numeric_cast<int>(round(value / scale)) + offset; + QuantizedType quantizedBits = quantized < min ? min : quantized > max ? max : static_cast<QuantizedType>(quantized); + return quantizedBits; +} + +/// Dequantize an 8-bit data type into a floating point data type +/// @param value The value to dequantize +/// @param scale The scale (must be non-zero) +/// @param offset The offset +/// @return The dequantized value calculated as (value-offset)*scale +/// +template <typename QuantizedType> +inline float Dequantize(QuantizedType value, float scale, int32_t offset) +{ + static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type."); + BOOST_ASSERT(scale != 0.f); + float dequantized = boost::numeric_cast<float>(value - offset) * scale; + return dequantized; +} + +} //namespace armnn
\ No newline at end of file diff --git a/include/armnn/Utils.hpp b/include/armnn/Utils.hpp new file mode 100644 index 0000000000..1a0c34baad --- /dev/null +++ b/include/armnn/Utils.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +namespace armnn +{ + +enum class LogSeverity +{ + Trace, + Debug, + Info, + Warning, + Error, + Fatal +}; + +/// Configures the logging behaviour of the ARMNN library. +/// printToStandardOutput: Set to true if log messages should be printed to the standard output. +/// printToDebugOutput: Set to true if log messages be printed to a platform-specific debug output +/// (where supported). +/// severity: All log messages that are at this severity level or higher will be printed, others will be ignored. +void ConfigureLogging(bool printToStandardOutput, bool printToDebugOutput, LogSeverity severity); + +} diff --git a/include/armnn/Version.hpp b/include/armnn/Version.hpp new file mode 100644 index 0000000000..6ce8256faa --- /dev/null +++ b/include/armnn/Version.hpp @@ -0,0 +1,12 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +// YYYYMMPP +// where: +// YYYY = 4-digit year number +// MM = 2-digit month number +// PP = 2-digit patch number +#define ARMNN_VERSION "20180200" diff --git a/include/armnnCaffeParser/ICaffeParser.hpp b/include/armnnCaffeParser/ICaffeParser.hpp new file mode 100644 index 0000000000..55fc85052b --- /dev/null +++ b/include/armnnCaffeParser/ICaffeParser.hpp @@ -0,0 +1,59 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "armnn/Types.hpp" +#include "armnn/NetworkFwd.hpp" +#include "armnn/Tensor.hpp" +#include "armnn/INetwork.hpp" + +#include <memory> +#include <map> +#include <vector> + +namespace armnnCaffeParser +{ + +using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>; + +class ICaffeParser; +using ICaffeParserPtr = std::unique_ptr<ICaffeParser, void(*)(ICaffeParser* parser)>; + +class ICaffeParser +{ +public: + static ICaffeParser* CreateRaw(); + static ICaffeParserPtr Create(); + static void Destroy(ICaffeParser* parser); + + /// Create the network from a protobuf text file on disk + virtual armnn::INetworkPtr CreateNetworkFromTextFile( + const char* graphFile, + const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs) = 0; + + /// Create the network from a protobuf binary file on disk + virtual armnn::INetworkPtr CreateNetworkFromBinaryFile( + const char* graphFile, + const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs) = 0; + + /// Create the network directly from protobuf text in a string. Useful for debugging/testing + virtual armnn::INetworkPtr CreateNetworkFromString( + const char* protoText, + const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs) = 0; + + /// Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name + virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const = 0; + + /// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name + virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const = 0; + +protected: + virtual ~ICaffeParser() {}; +}; + +}
\ No newline at end of file diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp new file mode 100644 index 0000000000..0b11b44260 --- /dev/null +++ b/src/armnn/Descriptors.cpp @@ -0,0 +1,279 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "armnn/Descriptors.hpp" + +#include <algorithm> +#include <array> +#include <vector> + +#include <boost/format.hpp> +#include <boost/log/trivial.hpp> +#include <boost/numeric/conversion/cast.hpp> + +namespace armnn +{ + +PermutationVector::PermutationVector(const ValueType *dimMappings, const SizeType numDimMappings) +{ + // Validation + + if (numDimMappings > MaxNumOfTensorDimensions) + { + boost::format fmt("The number of mappings (%1%) cannot be greater " + "than the maximum number of dimensions supported (%2%)"); + throw InvalidArgumentException(boost::str(fmt % numDimMappings % MaxNumOfTensorDimensions)); + } + + if ((dimMappings == nullptr) && (numDimMappings != 0)) + { + throw InvalidArgumentException("Dimension mappings must not be NULL if the number of mappings is positive"); + } + + for (SizeType i = 0; i < numDimMappings; ++i) + { + const ValueType dstIndex = dimMappings[i]; + if (dstIndex >= numDimMappings) + { + boost::format fmt("Dimension mapping at index %1% is invalid: %2% is outside of the valid range [0,%3%]"); + throw InvalidArgumentException(boost::str(fmt % i % dstIndex % (numDimMappings - 1))); + } + } + + // Validation: Detect duplicates + { + std::array<bool, MaxNumOfTensorDimensions> observedDims; + observedDims.fill(false); + + for (SizeType i = 0; i < numDimMappings; ++i) + { + const ValueType dstIndex = dimMappings[i]; + if (observedDims[dstIndex]) + { + throw InvalidArgumentException("Invalid dimension mappings: Two or more source dimensions are mapped " + "to the same output dimension"); + } + observedDims[dstIndex] = true; + } + } + + // Initialize + for (SizeType i = 0; i < numDimMappings; ++i) + { + m_DimMappings[i] = dimMappings[i]; + } + m_NumDimMappings = numDimMappings; +} + +PermutationVector::PermutationVector(std::initializer_list<ValueType> dimMappings) + : PermutationVector(dimMappings.begin(), boost::numeric_cast<SizeType>(dimMappings.size())) +{ +} + +OriginsDescriptor::OriginsDescriptor() +: m_NumViews(0) +, m_NumDimensions(0) +, m_ViewOrigins(nullptr) +{} + +OriginsDescriptor::OriginsDescriptor(uint32_t numViews, uint32_t numDimensions /*= 4*/) +: m_NumViews(numViews) +, m_NumDimensions(numDimensions) +, m_ViewOrigins(numViews && numDimensions > 0 ? new uint32_t *[numViews]() : nullptr) +{ + for (uint32_t i = 0; m_NumDimensions > 0 && i < m_NumViews; ++i) + { + m_ViewOrigins[i] = new uint32_t[m_NumDimensions](); + } +} + +OriginsDescriptor::OriginsDescriptor(const OriginsDescriptor& other) +: m_NumViews(other.m_NumViews) +, m_NumDimensions(other.m_NumDimensions) +, m_ViewOrigins(other.m_NumViews && other.m_NumDimensions > 0 ? new uint32_t *[other.m_NumViews]() : nullptr) +{ + for (uint32_t i = 0; m_NumDimensions > 0 && i < m_NumViews; ++i) + { + m_ViewOrigins[i] = new uint32_t[m_NumDimensions](); + memcpy(m_ViewOrigins[i], other.m_ViewOrigins[i], m_NumDimensions * sizeof(uint32_t)); + } +} + +OriginsDescriptor::OriginsDescriptor(OriginsDescriptor&& other) +: OriginsDescriptor() +{ + swap(*this, other); +} + +OriginsDescriptor::~OriginsDescriptor() +{ + for (uint32_t i = 0; m_NumDimensions > 0 && i < m_NumViews; ++i) + { + delete[] m_ViewOrigins[i]; + } + delete[] m_ViewOrigins; +} + +OriginsDescriptor& OriginsDescriptor::operator=(OriginsDescriptor rhs) +{ + swap(*this, rhs); + return *this; +} + +Status OriginsDescriptor::SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value) +{ + if (view >= m_NumViews) + { + BOOST_LOG_TRIVIAL(error) << "OriginsDescriptor::SetViewOriginCoord: view argument:" << view << + " is out of range"; + return Status::Failure; + } + if (coord >= m_NumDimensions) + { + BOOST_LOG_TRIVIAL(error) << "OriginsDescriptor::SetViewOriginCoord: coord argument:" << coord << + " is out of range"; + return Status::Failure; + } + + m_ViewOrigins[view][coord] = value; + return Status::Success; +} + + +uint32_t OriginsDescriptor::GetNumViews() const +{ + return m_NumViews; +} + +uint32_t OriginsDescriptor::GetNumDimensions() const +{ + return m_NumDimensions; +} + +const uint32_t* OriginsDescriptor::GetViewOrigin(uint32_t idx) const +{ + return m_ViewOrigins ? m_ViewOrigins[idx] : nullptr; +} + + +// Reorder the viewOrigins in accordance with the indices presented in newOrdering array +void OriginsDescriptor::ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering) +{ + BOOST_ASSERT_MSG(m_NumViews == numNewOrdering, "number of views must match number of " + "elements in the new ordering array"); + std::vector<uint32_t*> viewOrigins(&m_ViewOrigins[0], &m_ViewOrigins[m_NumViews]); + + for (unsigned int i = 0; i < numNewOrdering; ++i) + { + m_ViewOrigins[i] = viewOrigins[newOrdering[i]]; + } +} + +ViewsDescriptor::ViewsDescriptor() +: m_Origins() +, m_ViewSizes(nullptr) +{} + +ViewsDescriptor::ViewsDescriptor(uint32_t numViews, uint32_t numDimensions /*= 4*/) + : m_Origins(numViews, numDimensions) + , m_ViewSizes(numViews && numDimensions > 0 ? new uint32_t *[numViews]() : nullptr) +{ + for (uint32_t i = 0; GetNumDimensions() > 0 && i < GetNumViews(); ++i) + { + m_ViewSizes[i] = new uint32_t[GetNumDimensions()](); + } +} + +ViewsDescriptor::ViewsDescriptor(const ViewsDescriptor& other) + : m_Origins(other.m_Origins) + , m_ViewSizes(other.GetNumViews() && other.GetNumDimensions() > 0 ? new uint32_t *[other.GetNumViews()]() : nullptr) +{ + for (uint32_t i = 0; GetNumDimensions() > 0 && i < GetNumViews(); ++i) + { + m_ViewSizes[i] = new uint32_t[GetNumDimensions()](); + memcpy(m_ViewSizes[i], other.m_ViewSizes[i], GetNumDimensions() * sizeof(uint32_t)); + } +} + +ViewsDescriptor::ViewsDescriptor(ViewsDescriptor&& other) + : ViewsDescriptor() +{ + swap(*this, other); +} + +ViewsDescriptor::~ViewsDescriptor() +{ + for (uint32_t i = 0; GetNumDimensions() > 0 && i < GetNumViews(); ++i) + { + delete[] m_ViewSizes[i]; + } + delete[] m_ViewSizes; +} + +ViewsDescriptor& ViewsDescriptor::operator=(ViewsDescriptor rhs) +{ + swap(*this, rhs); + return *this; +} + +uint32_t ViewsDescriptor::GetNumViews() const +{ + return m_Origins.GetNumViews(); +} + +uint32_t ViewsDescriptor::GetNumDimensions() const +{ + return m_Origins.GetNumDimensions(); +} + +const uint32_t* ViewsDescriptor::GetViewOrigin(uint32_t idx) const +{ + return m_Origins.GetViewOrigin(idx); +} + +Status ViewsDescriptor::SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value) +{ + return m_Origins.SetViewOriginCoord(view, coord, value); +} + +Status ViewsDescriptor::SetViewSize(uint32_t view, uint32_t coord, uint32_t value) +{ + if (view >= GetNumViews()) + { + BOOST_LOG_TRIVIAL(error) << "ViewsDescriptor::SetViewSize: view argument:" << view << + " is out of range"; + return Status::Failure; + } + if (coord >= GetNumDimensions()) + { + BOOST_LOG_TRIVIAL(error) << "ViewsDescriptor::SetViewSize: coord argument:" << coord << + " is out of range"; + return Status::Failure; + } + + m_ViewSizes[view][coord] = value; + return Status::Success; +} + +const uint32_t* ViewsDescriptor::GetViewSizes(uint32_t idx) const +{ + return m_ViewSizes ? m_ViewSizes[idx] : nullptr; +} + +void swap(OriginsDescriptor& first, OriginsDescriptor& second) +{ + using std::swap; + swap(first.m_NumViews, second.m_NumViews); + swap(first.m_NumDimensions, second.m_NumDimensions); + swap(first.m_ViewOrigins, second.m_ViewOrigins); +} + +void swap(ViewsDescriptor& first, ViewsDescriptor& second) +{ + using std::swap; + swap(first.m_Origins, second.m_Origins); + swap(first.m_ViewSizes, second.m_ViewSizes); +} + +} diff --git a/src/armnn/Exceptions.cpp b/src/armnn/Exceptions.cpp new file mode 100644 index 0000000000..2cf95fa4d1 --- /dev/null +++ b/src/armnn/Exceptions.cpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "armnn/Exceptions.hpp" + +#include <string> + +namespace armnn +{ + +Exception::Exception(const std::string& message) +: m_Message(message) +{ +} + +const char* Exception::what() const noexcept +{ + return m_Message.c_str(); +} + +UnimplementedException::UnimplementedException() +: Exception("Function not yet implemented") +{ +} + +} diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp new file mode 100644 index 0000000000..97f702e50f --- /dev/null +++ b/src/armnn/Graph.cpp @@ -0,0 +1,169 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "Graph.hpp" +#include "Layers.hpp" + +#include <armnn/Utils.hpp> +#include <armnn/TypesUtils.hpp> + +#include <boost/polymorphic_cast.hpp> +#include <boost/log/trivial.hpp> +#include <boost/assert.hpp> +#include <boost/format.hpp> + +#include <unordered_map> + +namespace armnn +{ + +Graph::Graph(const Graph& other) +: m_LayersInOrder(other.m_LayersInOrder) +{ + std::unordered_map<const Layer*, Layer*> otherToClonedMap; + + for (auto&& otherLayer : other.m_Layers) + { + Layer* const layer = otherLayer->Clone(*this); + otherToClonedMap.emplace(otherLayer, layer); + } + + // Copy slot connections + for (auto&& otherLayer : other.m_Layers) + { + Layer* const thisLayer = otherToClonedMap[otherLayer]; + + auto outputSlot = thisLayer->BeginOutputSlots(); + for (auto&& otherOutputSlot : otherLayer->GetOutputSlots()) + { + for (auto&& otherInputSlot : otherOutputSlot.GetConnections()) + { + const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer(); + Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer]; + + InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex()); + outputSlot->Connect(inputSlot); + } + outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo()); + ++outputSlot; + } + } +} + +Status Graph::Print() const +{ + if (m_Layers.empty()) + { + BOOST_LOG_TRIVIAL(info) << "\n Graph is empty.\n"; + return Status::Success; + } + BOOST_LOG_TRIVIAL(info) << "\n"; + BOOST_LOG_TRIVIAL(info) << "Walking Pattern: \n"; + + for (auto&& it : TopologicalSort()) + { + BOOST_LOG_TRIVIAL(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType()) + << ":" << GetComputeDeviceAsCString(it->GetComputeDevice()); + } + BOOST_LOG_TRIVIAL(info) << "\n\n"; + + return Status::Success; +} + +Status Graph::AllocateDynamicBuffers() +{ + for (auto&& layer : m_Layers) + { + for (auto slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot) + { + slot->GetOutputHandler().AllocateTensors(); + } + } + return Status::Success; +} + +const Graph& Graph::TopologicalSort() const +{ + if (!m_LayersInOrder) + { + //Reset layer order + for (auto&& it : m_Layers) + { + it->ResetPriority(); + } + + auto compareLayerPriority = [](const LayersList::value_type& layerA, const LayersList::value_type& layerB) + { + return layerA->GetPriority() < layerB->GetPriority(); + }; + + m_Layers.sort(compareLayerPriority); + + m_LayersInOrder = true; + } + + return *this; +} + +void Graph::AddCopyLayers() +{ + // Returns true if the given layer could potentially need an intermediate copy layer (depending on its + // connections to other layers). At the time of writing, copy layers will be inserted in the following situations: + // CPU -> CL (and viceversa) + // CPU -> Neon (and viceversa) + auto MayNeedCopyLayer = [](const Layer& layer) + { + // All layers should have been associated with a valid compute device at this point + BOOST_ASSERT(layer.GetComputeDevice() != Compute::Undefined); + // Do not need another copy layer if copy layer is already present + return layer.GetType() != LayerType::MemCopy; + }; + + for (auto&& srcLayer : m_Layers) + { + if (MayNeedCopyLayer(*srcLayer)) + { + unsigned int srcOutputIndex = 0; + for (auto&& srcOutput : srcLayer->GetOutputSlots()) + { + for (auto&& dstInput : srcOutput.GetConnections()) + { + Layer& dstLayer = dstInput->GetOwningLayer(); + + if (MayNeedCopyLayer(dstLayer) && (dstLayer.GetComputeDevice() != srcLayer->GetComputeDevice())) + { + // A copy layer is needed in between the source and destination layers + // Record the operation rather than attempting to modify the graph as we go + // (invalidating iterators) + const std::string copyLayerName = boost::str(boost::format("[ %1% (%2%) -> %3% (%4%) ]") + % srcLayer->GetName() + % srcOutputIndex + % dstLayer.GetName() + % dstInput->GetSlotIndex()); + + MemCopyLayer* const copyLayer = InsertNewLayer<MemCopyLayer>(*dstInput, copyLayerName.c_str()); + copyLayer->SetComputeDevice(dstLayer.GetComputeDevice()); + } + } + ++srcOutputIndex; + } + } + } +} + +void Graph::InferTensorInfos() +{ + for (auto&& layer : TopologicalSort()) + { + for (auto&& input : layer->GetInputSlots()) + { + boost::ignore_unused(input); + BOOST_ASSERT_MSG(input.GetConnectedOutputSlot()->IsTensorInfoSet(), + "All inputs must have the TensorInfo set at this point."); + } + layer->ValidateTensorShapesFromInputs(); + } +} + +} // namespace armnn diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp new file mode 100644 index 0000000000..8888034197 --- /dev/null +++ b/src/armnn/Graph.hpp @@ -0,0 +1,315 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Layers.hpp" + +#include <armnn/Types.hpp> +#include <armnn/TensorFwd.hpp> +#include <armnn/NetworkFwd.hpp> +#include <armnn/Exceptions.hpp> + +#include <list> +#include <unordered_map> +#include <unordered_set> +#include <vector> + +#include <boost/assert.hpp> +#include <boost/iterator/transform_iterator.hpp> + +namespace armnn +{ +class Graph +{ +public: + template <typename CVLayerT> + static CVLayerT* PtrCast(Layer* const layer) + { + return boost::polymorphic_downcast<CVLayerT*>(layer); + } + + using LayersList = std::list<Layer*>; + using Iterator = LayersList::const_iterator; // const so pointers in the list can't be modified externally + using ConstIterator = boost::transform_iterator<decltype(&PtrCast<const Layer>), Iterator>; + using IteratorDifference = Iterator::difference_type; + + using ConstIteratorInputs = boost::transform_iterator<decltype(&PtrCast<const InputLayer>), Iterator>; + using ConstIteratorOutputs = boost::transform_iterator<decltype(&PtrCast<const OutputLayer>), Iterator>; + + /// Wrapper class returned by Graph::GetInputLayers() + struct InputLayersAccessor + { + explicit InputLayersAccessor(const Graph& graph) : m_Graph(graph) {} + + ConstIteratorInputs begin() const + { + return { m_Graph.m_Layers.begin(), &PtrCast<const InputLayer> }; + } + + ConstIteratorInputs end() const + { + return { std::next(m_Graph.m_Layers.begin(), static_cast<IteratorDifference>(m_Graph.GetNumInputs())), + &PtrCast<const InputLayer> }; + } + + const Graph& m_Graph; + }; + + /// Wrapper class returned by Graph::GetOutputLayers() + struct OutputLayersAccessor + { + explicit OutputLayersAccessor(const Graph& graph) : m_Graph(graph) {} + + ConstIteratorOutputs begin() const + { + return { std::prev(m_Graph.m_Layers.end(), static_cast<IteratorDifference>(m_Graph.GetNumOutputs())), + &PtrCast<const OutputLayer> }; + } + + ConstIteratorOutputs end() const + { + return { m_Graph.m_Layers.end(), &PtrCast<const OutputLayer> }; + } + + const Graph& m_Graph; + }; + + Graph() : m_LayersInOrder(true) {} + + Graph(const Graph& other); + + Graph& operator=(const Graph& other) = delete; + + ~Graph() + { + for (auto&& layer : m_Layers) + { + delete layer; + } + } + + Status Print() const; + + /// Adds a new layer of type LaterType to the graph constructed with the arguments passed. + template <typename LayerT, typename... Args> + LayerT* AddLayer(Args&&... args); + + /// Inserts a new layer between the output slot currently connected to insertBefore + /// and insertBefore itself. + template <typename LayerT, typename... Args> + LayerT* InsertNewLayer(InputSlot& insertBefore, Args&&... args); + + /// Deletes the layer at the specified position and returns an iterator pointing + /// to the next element after the one being deleted. + Iterator EraseLayer(Iterator pos); + + /// Deletes the layer and returns an iterator pointing to the next layer in the graph + /// (next in the list, after the one being deleted). Sets @a layer to nullptr on return. + /// Templated to support pointers to any layer type. + template <typename LayerT> + Iterator EraseLayer(LayerT*& layer); + + /// Return iterator pointing to begin of list. Lowercase for range-based for loops. + Iterator begin() { return m_Layers.begin(); } + /// Return iterator pointing to end of list. Lowercase for range-based for loops. + Iterator end() { return m_Layers.end(); } + + /// Return const iterator pointing to begin of list. Lowercase for range-based for loops. + ConstIterator begin() const { return {m_Layers.begin(), &PtrCast<const Layer>}; } + /// Return const iterator pointing to end of list. Lowercase for range-based for loops. + ConstIterator end() const { return {m_Layers.end(), &PtrCast<const Layer>}; } + + /// Sort layers in topological order and return this. + Graph& TopologicalSort() { const_cast<const Graph*>(this)->TopologicalSort(); return *this; } + const Graph& TopologicalSort() const; + + size_t GetNumInputs() const { return m_InputIds.size(); } + size_t GetNumOutputs() const { return m_OutputIds.size(); } + + /// Returns a wrapper object with begin(), end() methods to iterate over the input layers + /// in a range-based for loop + InputLayersAccessor GetInputLayers() const { return InputLayersAccessor(*this); } + + /// Returns a wrapper object with begin(), end() methods to iterate over the output layers + /// in a range-based for loop + OutputLayersAccessor GetOutputLayers() const { return OutputLayersAccessor(*this); } + + size_t GetNumLayers() const { return m_Layers.size(); } + + /// Allocate memory for all tensors under output tensor handers of each layer + Status AllocateDynamicBuffers(); + + /// Modifies the graph in-place, removing edges connecting layers using different compute devices, + /// and relinking them via an intermediary copy layers. + void AddCopyLayers(); + + void InferTensorInfos(); + +private: + template <typename LayerT> + class LayerInGraphBase; + + template <typename LayerT> + class LayerInGraph; + + /// Get the position of a layer in the graph. + Iterator GetPosInGraph(Layer& layer); + + /// Adds a new layer of type LaterType to the graph constructed with the arguments passed. + template <typename LayerT, typename... Args> + LayerInGraph<LayerT>* AddLayerImpl(Iterator insertBefore, Args&&... args); + + std::unordered_set<LayerBindingId> m_InputIds; + std::unordered_set<LayerBindingId> m_OutputIds; + std::unordered_map<const Layer*, Iterator> m_PosInGraphMap; + + /// Mutable to allow sorting on const object. + mutable LayersList m_Layers; + mutable bool m_LayersInOrder; +}; + +/// Common base class for layers in the graph +template <typename LayerT> +class Graph::LayerInGraphBase : public LayerT +{ +protected: + template <typename... Args> + LayerInGraphBase(Graph& graph, Iterator insertBefore, Args&&... args) + : LayerT(std::forward<Args>(args)...), m_Graph(graph) + { + m_Graph.m_PosInGraphMap.emplace(this, m_Graph.m_Layers.emplace(insertBefore, this)); + } + ~LayerInGraphBase() + { + const size_t numErased = m_Graph.m_PosInGraphMap.erase(this); + boost::ignore_unused(numErased); + BOOST_ASSERT(numErased == 1); + } + + Graph& m_Graph; +}; + +/// Input/Output layers specialize this template +template <typename LayerT> +class Graph::LayerInGraph final : public LayerInGraphBase<LayerT> +{ +public: + template <typename... Args> + LayerInGraph(Graph& graph, Iterator insertBefore, Args&&... args) + : LayerInGraphBase<LayerT>(graph, insertBefore, std::forward<Args>(args)...) + { + } +}; + +/// Inputs add/remove their binding id to m_InputIds in the graph. +template <> +class Graph::LayerInGraph<InputLayer> final : public LayerInGraphBase<InputLayer> +{ +public: + template <typename... Args> + LayerInGraph(Graph& graph, Iterator insertBefore, Args&&... args) + : LayerInGraphBase<InputLayer>(graph, insertBefore, std::forward<Args>(args)...) + { + const bool isNewId = m_Graph.m_InputIds.emplace(GetBindingId()).second; + if (!isNewId) + { + throw InvalidArgumentException("A layer already exists with the specified id"); + } + } + ~LayerInGraph() override + { + const size_t numErased = m_Graph.m_InputIds.erase(GetBindingId()); + boost::ignore_unused(numErased); + BOOST_ASSERT(numErased == 1); + } +}; + +/// Outputs add/remove their binding id to m_OutputIds in the graph. +template <> +class Graph::LayerInGraph<OutputLayer> final : public LayerInGraphBase<OutputLayer> +{ +public: + template <typename... Args> + LayerInGraph(Graph& graph, Iterator insertBefore, Args&&... args) + : LayerInGraphBase<OutputLayer>(graph, insertBefore, std::forward<Args>(args)...) + { + const bool isNewId = m_Graph.m_OutputIds.emplace(GetBindingId()).second; + if (!isNewId) + { + throw InvalidArgumentException("A layer already exists with the specified id"); + } + } + ~LayerInGraph() override + { + const size_t numErased = m_Graph.m_OutputIds.erase(GetBindingId()); + boost::ignore_unused(numErased); + BOOST_ASSERT(numErased == 1); + } +}; + +inline Graph::Iterator Graph::GetPosInGraph(Layer& layer) +{ + auto it = m_PosInGraphMap.find(&layer); + BOOST_ASSERT(it != m_PosInGraphMap.end()); + return it->second; +} + +template <typename LayerT, typename... Args> +inline Graph::LayerInGraph<LayerT>* Graph::AddLayerImpl(Iterator insertBefore, Args&&... args) +{ + return new LayerInGraph<LayerT>(*this, insertBefore, std::forward<Args>(args)...); +} + +/// Inputs are inserted at the front of the list, to keep the order correct if the list is sorted. +/// Outputs are inserted at the back of the list, to keep the order correct if the list is sorted. +/// Other layers are inserted before existing outputs, so the latter remain at the back of the list. +template <typename LayerT, typename... Args> +inline LayerT* Graph::AddLayer(Args&&... args) +{ + switch (LayerEnumOf<LayerT>()) + { + case LayerType::Input: + { + return AddLayerImpl<LayerT>(begin(), std::forward<Args>(args)...); + } + case LayerType::Output: + { + return AddLayerImpl<LayerT>(end(), std::forward<Args>(args)...); + } + default: + { + m_LayersInOrder = false; + const auto pos = std::prev(end(), IteratorDifference(GetNumOutputs())); + return AddLayerImpl<LayerT>(pos, std::forward<Args>(args)...); + } + } +} + +template <typename LayerT, typename... Args> +inline LayerT* Graph::InsertNewLayer(InputSlot& insertBefore, Args&&... args) +{ + // Insert before the child layer so topological order is kept. + const Iterator pos = GetPosInGraph(insertBefore.GetOwningLayer()); + LayerT* const layer = AddLayerImpl<LayerT>(pos, std::forward<Args>(args)...); + insertBefore.Insert(*layer); + return layer; +} + +inline Graph::Iterator Graph::EraseLayer(Iterator pos) +{ + delete *pos; + return m_Layers.erase(pos); +} + +template <typename LayerT> +inline Graph::Iterator Graph::EraseLayer(LayerT*& layer) +{ + BOOST_ASSERT(layer != nullptr); + Iterator next = EraseLayer(GetPosInGraph(*layer)); + layer = nullptr; + return next; +} + +} // namespace armnn diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp new file mode 100644 index 0000000000..e39b15be05 --- /dev/null +++ b/src/armnn/InternalTypes.cpp @@ -0,0 +1,45 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "InternalTypes.hpp" + +#include <boost/assert.hpp> + +namespace armnn +{ + +char const* GetLayerTypeAsCString(LayerType type) +{ + switch (type) + { + case LayerType::Activation: return "Activation"; + case LayerType::Addition: return "Addition"; + case LayerType::BatchNormalization: return "BatchNormalization"; + case LayerType::Constant: return "Constant"; + case LayerType::Convolution2d: return "Convolution2d"; + case LayerType::DepthwiseConvolution2d: return "DepthwiseConvolution2d"; + case LayerType::FakeQuantization: return "FakeQuantization"; + case LayerType::Floor: return "Floor"; + case LayerType::FullyConnected: return "FullyConnected"; + case LayerType::Input: return "Input"; + case LayerType::L2Normalization: return "L2Normalization"; + case LayerType::MemCopy: return "MemCopy"; + case LayerType::Merger: return "Merger"; + case LayerType::Multiplication: return "Multiplication"; + case LayerType::Normalization: return "Normalization"; + case LayerType::Output: return "Output"; + case LayerType::Permute: return "Permute"; + case LayerType::Pooling2d: return "Pooling2d"; + case LayerType::Reshape: return "Reshape"; + case LayerType::ResizeBilinear: return "ResizeBilinear"; + case LayerType::Softmax: return "Softmax"; + case LayerType::Splitter: return "Splitter"; + default: + BOOST_ASSERT_MSG(false, "Unknown layer type"); + return "Unknown"; + } +} + +} diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp new file mode 100644 index 0000000000..8db0da4cf2 --- /dev/null +++ b/src/armnn/InternalTypes.hpp @@ -0,0 +1,48 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/Types.hpp> + +#include <array> + +namespace armnn +{ + +enum class LayerType +{ + FirstLayer, + Activation = FirstLayer, + Addition, + BatchNormalization, + Constant, + Convolution2d, + DepthwiseConvolution2d, + FakeQuantization, + Floor, + FullyConnected, + Input, + L2Normalization, + MemCopy, + Merger, + Multiplication, + Normalization, + Output, + Permute, + Pooling2d, + Reshape, + ResizeBilinear, + Softmax, + // Last layer goes here + LastLayer, + Splitter = LastLayer, +}; + +const char* GetLayerTypeAsCString(LayerType type); + +using Coordinates = std::array<unsigned int, MaxNumOfTensorDimensions>; +using Dimensions = std::array<unsigned int, MaxNumOfTensorDimensions>; + +} diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp new file mode 100644 index 0000000000..20a8ba4926 --- /dev/null +++ b/src/armnn/Layer.cpp @@ -0,0 +1,220 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "Layer.hpp" + +#include "Graph.hpp" +#include "backends/WorkloadData.hpp" + +#include <boost/cast.hpp> +#include <boost/format.hpp> +#include <boost/log/trivial.hpp> + +#include <numeric> + +namespace armnn +{ + +void InputSlot::Insert(Layer& layer) +{ + BOOST_ASSERT(layer.GetNumInputSlots() <= 1); + BOOST_ASSERT(layer.GetNumOutputSlots() == 1); + + OutputSlot* const prevSlot = GetConnectedOutputSlot(); + + if (prevSlot != nullptr) + { + // Disconnect parent from this + prevSlot->Disconnect(*this); + + // Connect inserted layer to parent + BOOST_ASSERT(layer.GetNumInputSlots() == 1); + prevSlot->Connect(layer.GetInputSlot(0)); + + // Set tensor info for inserted layer + const TensorInfo& tensorInfo = prevSlot->GetTensorInfo(); + layer.GetOutputHandler().SetTensorInfo(tensorInfo); + } + + // Connect inserted layer to this + layer.GetOutputSlot(0).Connect(*this); +} + +const InputSlot* OutputSlot::GetConnection(unsigned int index) const +{ + ValidateConnectionIndex(index); + return m_Connections[index]; +} + +InputSlot* OutputSlot::GetConnection(unsigned int index) +{ + ValidateConnectionIndex(index); + return m_Connections[index]; +} + +void OutputSlot::SetTensorInfo(const TensorInfo& tensorInfo) +{ + GetOutputHandler().SetTensorInfo(tensorInfo); +} + +const TensorInfo& OutputSlot::GetTensorInfo() const +{ + return GetOutputHandler().GetTensorInfo(); +} + +bool OutputSlot::IsTensorInfoSet() const +{ + return GetOutputHandler().IsTensorInfoSet(); +} + +bool OutputSlot::ValidateTensorShape(const TensorShape& shape) const +{ + BOOST_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape."); + return shape == m_OutputHandler.GetTensorInfo().GetShape(); +} + +int OutputSlot::Connect(InputSlot& destination) +{ + destination.SetConnection(this); + m_Connections.push_back(&destination); + return boost::numeric_cast<int>(m_Connections.size() - 1); +} + +void OutputSlot::Disconnect(InputSlot& slot) +{ + slot.SetConnection(nullptr); + m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end()); +} + +void OutputSlot::DisconnectAll() +{ + while (GetNumConnections() > 0) + { + InputSlot& connection = *GetConnection(0); + Disconnect(connection); + } +} + +void OutputSlot::MoveAllConnections(OutputSlot& destination) +{ + while (GetNumConnections() > 0) + { + InputSlot& connection = *GetConnection(0); + Disconnect(connection); + destination.Connect(connection); + } +} + +void OutputSlot::ValidateConnectionIndex(unsigned int index) const +{ + if (boost::numeric_cast<std::size_t>(index) >= m_Connections.size()) + { + throw InvalidArgumentException( + boost::str(boost::format("GetConnection: Invalid index %1% provided") % index)); + } +} + +Layer::Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name) +: m_OutputHandlers(numOutputSlots) +, m_LayerName(name ? name : "") +, m_Type(type) +, m_ComputeDevice(Compute::Undefined) +{ + m_InputSlots.reserve(numInputSlots); + for (unsigned int i = 0; i < numInputSlots; ++i) + { + m_InputSlots.emplace_back(*this, i); + } + + m_OutputSlots.reserve(numOutputSlots); + for (unsigned int i = 0; i < numOutputSlots; ++i) + { + m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]); + } +} + +void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector, const Graph& graph) const +{ + for (auto&& inputSlot : GetInputSlots()) + { + // The graph must be well-formed at this point + BOOST_ASSERT(inputSlot.GetConnection()); + const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler(); + dataCollector.Push(outputHandler.GetData(), outputHandler.GetTensorInfo()); + } +} + +void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector, const Graph& graph) const +{ + for (auto&& outputHandler : m_OutputHandlers) + { + outputHandler.CollectWorkloadOutputs(dataCollector); + } +} + +void Layer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) +{ + for (auto&& outputHandler : m_OutputHandlers) + { + outputHandler.CreateTensorHandles(factory); + } +} + +DataType Layer::GetDataType() const +{ + if (GetNumInputSlots() > 0) // Ignore the input layer + { + return GetInputSlot(0).GetConnection()->GetTensorInfo().GetDataType(); + } + return DataType::Float32; +} + +void Layer::ResetPriority() const +{ + m_Priority = 0; + m_Visiting = false; +} + +LayerPriority Layer::GetPriority() const +{ + constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest(); + constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max(); + + if (GetType() == LayerType::Input) + { + m_Priority = inputPrio; + } + else if (GetType() == LayerType::Output) + { + m_Priority = outputPrio; + } + else if (m_Priority == 0) + { + if (m_Visiting) + { + throw GraphValidationException("Graph has circular dependencies: cannot walk"); + } + + auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority + { + const Layer& input = slot.GetConnectedOutputSlot()->GetOwningLayer(); + return std::max(prio, input.GetPriority()); + }; + + m_Visiting = true; + LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio); + m_Visiting = false; + + if (parentPrio >= outputPrio) + { + throw GraphValidationException("Graph has too many edges"); + } + + m_Priority = parentPrio + 1U; + } + + return m_Priority; +} + +} // namespace armnn diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp new file mode 100644 index 0000000000..1160f0ab09 --- /dev/null +++ b/src/armnn/Layer.hpp @@ -0,0 +1,309 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "LayerFwd.hpp" + +#include "backends/OutputHandler.hpp" +#include "backends/WorkloadDataCollector.hpp" +#include "backends/WorkloadInfo.hpp" +#include "InternalTypes.hpp" + +#include <armnn/Types.hpp> +#include <armnn/Tensor.hpp> +#include <armnn/INetwork.hpp> + +#include <algorithm> +#include <memory> +#include <string> +#include <vector> + +#include <boost/numeric/conversion/cast.hpp> +#include <boost/core/ignore_unused.hpp> +#include <boost/cast.hpp> + +namespace armnn +{ + +class IWorkload; +class IWorkloadFactory; +class Layer; +class Graph; + +class InputSlot final : public IInputSlot +{ +public: + explicit InputSlot(Layer& owner, unsigned int slotIndex) + : m_OwningLayer(owner) + , m_Connection(nullptr) + , m_SlotIndex(slotIndex) + {} + + ~InputSlot(); + + Layer& GetOwningLayer() const { return m_OwningLayer; } + unsigned int GetSlotIndex() const { return m_SlotIndex; } + + const OutputSlot* GetConnectedOutputSlot() const { return m_Connection; } + OutputSlot* GetConnectedOutputSlot() { return m_Connection; } + + /// Links the slot to an output slot or breaks an existing link if passing nullptr + void SetConnection(OutputSlot* source) + { + if (m_Connection != nullptr && source != nullptr) + { + throw InvalidArgumentException("Tried to connect an output slot to an input slot, " + "but the latter already has a connection"); + } + m_Connection = source; + } + + // Insert single-output existing layer at this point in the graph. + void Insert(Layer& layer); + + // IInputSlot + + const IOutputSlot* GetConnection() const override; + IOutputSlot* GetConnection() override; + +private: + Layer& m_OwningLayer; + OutputSlot* m_Connection; + const unsigned int m_SlotIndex; +}; + +class OutputSlot final : public IOutputSlot +{ +public: + explicit OutputSlot(Layer& owner, OutputHandler& outputHandler) + : m_OwningLayer(owner) + , m_OutputHandler(outputHandler) + {} + + ~OutputSlot() + { + DisconnectAll(); + } + + Layer& GetOwningLayer() const { return m_OwningLayer; } + + const OutputHandler& GetOutputHandler() const { return m_OutputHandler; } + OutputHandler& GetOutputHandler() { return m_OutputHandler; } + + int Connect(InputSlot& destination); + void Disconnect(InputSlot& slot); + + const std::vector<InputSlot*>& GetConnections() const { return m_Connections; } + + bool ValidateTensorShape(const TensorShape& shape) const; + + // Disconnect all conections + void DisconnectAll(); + + /// Move all connections to another OutputSlot + void MoveAllConnections(OutputSlot& destination); + + // IOutputSlot + + unsigned int GetNumConnections() const override { return boost::numeric_cast<unsigned int>(m_Connections.size()); } + const InputSlot* GetConnection(unsigned int index) const override; + InputSlot* GetConnection(unsigned int index) override; + + void SetTensorInfo(const TensorInfo& tensorInfo) override; + const TensorInfo& GetTensorInfo() const override; + bool IsTensorInfoSet() const override; + + int Connect(IInputSlot& destination) override + { + return Connect(*boost::polymorphic_downcast<InputSlot*>(&destination)); + } + + void Disconnect(IInputSlot& slot) override + { + return Disconnect(*boost::polymorphic_downcast<InputSlot*>(&slot)); + } + +private: + void ValidateConnectionIndex(unsigned int index) const; + + Layer& m_OwningLayer; + OutputHandler& m_OutputHandler; + std::vector<InputSlot*> m_Connections; +}; + +// InputSlot inlines that need OutputSlot declaration + +inline InputSlot::~InputSlot() +{ + if (m_Connection != nullptr) + { + m_Connection->Disconnect(*this); + } +} + +inline const IOutputSlot* InputSlot::GetConnection() const { return GetConnectedOutputSlot(); } +inline IOutputSlot* InputSlot::GetConnection() { return GetConnectedOutputSlot(); } + +// Base layer class + +using LayerPriority = unsigned int; + +class Layer : public IConnectableLayer +{ +public: + /// @param name Optional name for the layer (may be nullptr) + Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name); + + const std::string& GetNameStr() const + { + return m_LayerName; + } + + const OutputHandler& GetOutputHandler(unsigned int i = 0) const + { + return m_OutputHandlers[i]; + } + + OutputHandler& GetOutputHandler(unsigned int i = 0) + { + return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i)); + } + + const std::vector<InputSlot>& GetInputSlots() const { return m_InputSlots; } + const std::vector<OutputSlot>& GetOutputSlots() const { return m_OutputSlots; } + + // Allow non-const access to input slots, but don't expose vector (vector size is fixed at layer construction). + std::vector<InputSlot>::iterator BeginInputSlots() { return m_InputSlots.begin(); } + std::vector<InputSlot>::iterator EndInputSlots() { return m_InputSlots.end(); } + + // Allow non-const access to output slots, but don't expose vector (vector size is fixed at layer construction). + std::vector<OutputSlot>::iterator BeginOutputSlots() { return m_OutputSlots.begin(); } + std::vector<OutputSlot>::iterator EndOutputSlots() { return m_OutputSlots.end(); } + + // Check whether the outputs of this layer don't have any connection + bool IsOutputUnconnected() + { + unsigned int numConnections = 0; + + for (auto&& output : GetOutputSlots()) + { + numConnections += output.GetNumConnections(); + } + + return (GetNumOutputSlots() > 0) && (numConnections == 0); + } + + // Used for sorting + void ResetPriority() const; + LayerPriority GetPriority() const; + + LayerType GetType() const { return m_Type; } + + DataType GetDataType() const; + + Compute GetComputeDevice() const { return m_ComputeDevice; } + void SetComputeDevice(Compute device) { m_ComputeDevice = device; } + + // Virtuals + + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0; + + virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory); + + /// Creates a dynamically-allocated copy of this layer + /// @param graph The Graph into which this Layer is being cloned + virtual Layer* Clone(Graph& graph) const = 0; + + virtual void ValidateTensorShapesFromInputs() = 0; + + // IConnectableLayer + + const char* GetName() const override { return m_LayerName.c_str(); } + + unsigned int GetNumInputSlots() const override { return static_cast<unsigned int>(m_InputSlots.size()); } + unsigned int GetNumOutputSlots() const override { return static_cast<unsigned int>(m_OutputSlots.size()); } + + const InputSlot& GetInputSlot(unsigned int index) const override { return m_InputSlots.at(index); } + InputSlot& GetInputSlot(unsigned int index) override { return m_InputSlots.at(index); } + const OutputSlot& GetOutputSlot(unsigned int index = 0) const override { return m_OutputSlots.at(index); } + OutputSlot& GetOutputSlot(unsigned int index = 0) override { return m_OutputSlots.at(index); } + +protected: + // Graph needs access to the virtual destructor + friend class Graph; + virtual ~Layer() = default; + + template <typename QueueDescriptor> + void CollectQueueDescriptorInputs(QueueDescriptor& descriptor, WorkloadInfo& info, const Graph& graph) const + { + WorkloadDataCollector dataCollector(descriptor.m_Inputs, info.m_InputTensorInfos); + CollectWorkloadInputs(dataCollector, graph); + } + + template <typename QueueDescriptor> + void CollectQueueDescriptorOutputs(QueueDescriptor& descriptor, WorkloadInfo& info, const Graph& graph) const + { + WorkloadDataCollector dataCollector(descriptor.m_Outputs, info.m_OutputTensorInfos); + CollectWorkloadOutputs(dataCollector, graph); + } + + /// Helper function to reduce duplication in *Layer::CreateWorkload + template <typename QueueDescriptor> + WorkloadInfo PrepInfoAndDesc(QueueDescriptor& descriptor, const Graph& graph) const + { + WorkloadInfo info; + CollectQueueDescriptorInputs(descriptor, info, graph); + CollectQueueDescriptorOutputs(descriptor, info, graph); + return info; + } + + template <typename LayerType, typename ... Params> + LayerType* CloneBase(Graph& graph, Params&& ... params) const; + +private: + void CollectWorkloadInputs(WorkloadDataCollector& dataCollector, const Graph& graph) const; + void CollectWorkloadOutputs(WorkloadDataCollector& dataCollector, const Graph& graph) const; + +protected: + std::vector<OutputHandler> m_OutputHandlers; + +private: + const std::string m_LayerName; + + std::vector<InputSlot> m_InputSlots; + std::vector<OutputSlot> m_OutputSlots; + + const LayerType m_Type; + Compute m_ComputeDevice; + + /// Used for sorting + mutable LayerPriority m_Priority = 0; + mutable bool m_Visiting = false; +}; + +// A layer user-provided data can be bound to (e.g. inputs, outputs) +class BindableLayer : public Layer +{ +public: + BindableLayer(unsigned int numInputSlots, + unsigned int numOutputSlots, + LayerType type, + const char* name, + LayerBindingId id) + : Layer(numInputSlots, numOutputSlots, type, name) + , m_Id(id) + { + } + + LayerBindingId GetBindingId() const { return m_Id; }; + +protected: + ~BindableLayer() = default; + +private: + LayerBindingId m_Id; +}; + +} diff --git a/src/armnn/LayerFwd.hpp b/src/armnn/LayerFwd.hpp new file mode 100644 index 0000000000..c2f6c7363d --- /dev/null +++ b/src/armnn/LayerFwd.hpp @@ -0,0 +1,13 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +namespace armnn +{ + +class BindableLayer; +class Layer; + +} diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp new file mode 100644 index 0000000000..0567b94905 --- /dev/null +++ b/src/armnn/LayerSupport.cpp @@ -0,0 +1,260 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "armnn/LayerSupport.hpp" + +#include "backends/RefLayerSupport.hpp" +#include "backends/NeonLayerSupport.hpp" +#include "backends/ClLayerSupport.hpp" + +#include <boost/assert.hpp> + +#include <cstring> +#include <algorithm> + +namespace armnn +{ + +// Helper function to copy a full string to a truncated version +void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength) +{ + if(truncatedString != nullptr) + { + size_t copyLength = std::min(maxLength, strlen(fullString)); + std::strncpy(truncatedString, fullString, copyLength); + // Ensure null-terminated string + truncatedString[copyLength] = '\0'; + } +} + +// Helper macro to avoid code duplication. +// Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute +#define FORWARD_LAYER_SUPPORT_FUNC(compute, func, ...) \ + std::string reasonIfUnsupportedFull; \ + bool isSupported; \ + switch(compute) \ + { \ + case Compute::CpuRef: \ + isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \ + break; \ + case Compute::CpuAcc: \ + isSupported = func##Neon(__VA_ARGS__, &reasonIfUnsupportedFull); \ + break; \ + case Compute::GpuAcc: \ + isSupported = func##Cl(__VA_ARGS__, &reasonIfUnsupportedFull); \ + break; \ + default: \ + isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \ + break; \ + } \ + CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \ + return isSupported; + +bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1) +{ + return input0.GetDataType() == input1.GetDataType(); +} + +bool IsActivationSupported(Compute compute, + const TensorInfo& input, + const ActivationDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsActivationSupported, input, descriptor); +} + +bool IsAdditionSupported(Compute compute, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + if(!CheckTensorDataTypesEqual(input0, input1)) + { + return false; + } + + FORWARD_LAYER_SUPPORT_FUNC(compute, IsAdditionSupported, input0, input1, output); +} + +bool IsBatchNormalizationSupported(Compute compute, + const TensorInfo& input, + const BatchNormalizationDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsBatchNormalizationSupported, input, descriptor); +} + +bool IsConstantSupported(Compute compute, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsConstantSupported, output); +} + +bool IsConvolution2dSupported(Compute compute, + const TensorInfo& input, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvolution2dSupported, input, descriptor, weights); +} + +bool IsDepthwiseConvolutionSupported(Compute compute, + const TensorInfo& input, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsDepthwiseConvolutionSupported, input, descriptor, weights); +} + +bool IsInputSupported(Compute compute, + const TensorInfo& input, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsInputSupported, input); +} + +bool IsFullyConnectedSupported(Compute compute, + const TensorInfo& input, + const FullyConnectedDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsFullyConnectedSupported, input, descriptor); +} + +bool IsL2NormalizationSupported(Compute compute, + const TensorInfo& input, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsL2NormalizationSupported, input); +} + +bool IsMergerSupported(Compute compute, + std::vector<const TensorInfo*> inputs, + const OriginsDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + BOOST_ASSERT(inputs.size() > 0); + FORWARD_LAYER_SUPPORT_FUNC(compute, IsMergerSupported, inputs, descriptor); +} + +bool IsMultiplicationSupported(Compute compute, + const TensorInfo& input0, + const TensorInfo& input1, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsMultiplicationSupported, input0, input1); +} + +bool IsNormalizationSupported(Compute compute, + const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsNormalizationSupported, input, output, descriptor); +} + +bool IsOutputSupported(Compute compute, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsOutputSupported, output); +} + +bool IsPermuteSupported(Compute compute, + const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsPermuteSupported, input, output, descriptor); +} + +bool IsPooling2dSupported(Compute compute, + const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsPooling2dSupported, input, output, descriptor); +} + +bool IsResizeBilinearSupported(Compute compute, + const TensorInfo& input, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsResizeBilinearSupported, input); +} + +bool IsSoftmaxSupported(Compute compute, + const TensorInfo& input, + const SoftmaxDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsSoftmaxSupported, input, descriptor); +} + +bool IsSplitterSupported(Compute compute, + const TensorInfo& input, + const ViewsDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsSplitterSupported, input, descriptor); +} + +bool IsFakeQuantizationSupported(Compute compute, + const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsFakeQuantizationSupported, input, descriptor); +} + +bool IsReshapeSupported(Compute compute, + const TensorInfo& input, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsReshapeSupported, input); +} + +bool IsFloorSupported(Compute compute, + const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + // By definition (that is, regardless of compute device), shapes and data type must match + if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType()) + { + return false; + } + + FORWARD_LAYER_SUPPORT_FUNC(compute, IsFloorSupported, input, output); +} + +} diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp new file mode 100644 index 0000000000..5b7feac387 --- /dev/null +++ b/src/armnn/LayerSupportCommon.hpp @@ -0,0 +1,64 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/DescriptorsFwd.hpp> +#include <armnn/Types.hpp> +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +template<typename Float32Func, typename Uint8Func, typename ... Params> +bool IsSupportedForDataTypeGeneric(std::string* reasonIfUnsupported, + DataType dataType, + Float32Func floatFuncPtr, + Uint8Func uint8FuncPtr, + Params&&... params) +{ + switch(dataType) + { + case DataType::Float32: + return floatFuncPtr(reasonIfUnsupported, std::forward<Params>(params)...); + case DataType::QuantisedAsymm8: + return uint8FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...); + default: + return false; + } +} + +template<typename ... Params> +bool TrueFunc(std::string* reasonIfUnsupported, Params&&... params) +{ + return true; +} + +template<typename ... Params> +bool FalseFunc(std::string* reasonIfUnsupported, Params&&... params) +{ + return false; +} + +template<typename ... Params> +bool FalseFuncF32(std::string* reasonIfUnsupported, Params&&... params) +{ + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "Layer is not supported with float32 data type"; + } + return false; +} + +template<typename ... Params> +bool FalseFuncU8(std::string* reasonIfUnsupported, Params&&... params) +{ + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "Layer is not supported with 8-bit data type"; + } + return false; +} + +} diff --git a/src/armnn/Layers.cpp b/src/armnn/Layers.cpp new file mode 100644 index 0000000000..ddbc7d222c --- /dev/null +++ b/src/armnn/Layers.cpp @@ -0,0 +1,986 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "Layers.hpp" +#include "Graph.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/Workload.hpp" +#include "backends/WorkloadFactory.hpp" + +#include "Permute.hpp" + + +namespace armnn +{ + +template <typename LayerType, typename ... Params> +LayerType* Layer::CloneBase(Graph& graph, Params&& ... params) const +{ + LayerType* const layer = graph.AddLayer<LayerType>(std::forward<Params>(params)...); + + layer->SetComputeDevice(m_ComputeDevice); + + return layer; +} + +ActivationLayer::ActivationLayer(const ActivationDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::Activation, param, name) +{ +} + +std::unique_ptr<IWorkload> ActivationLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + ActivationQueueDescriptor descriptor; + return factory.CreateActivation(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +ActivationLayer* ActivationLayer::Clone(Graph& graph) const +{ + return CloneBase<ActivationLayer>(graph, m_Param, GetName()); +} + +void ActivationLayer::ValidateTensorShapesFromInputs() +{ + auto& info = GetInputSlot(0).GetConnection()->GetTensorInfo(); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(info.GetShape()), + "ActivationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +AdditionLayer::AdditionLayer(const char* name) + : Layer(2, 1, LayerType::Addition, name) +{ +} + +std::unique_ptr<IWorkload> AdditionLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + AdditionQueueDescriptor descriptor; + return factory.CreateAddition(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +AdditionLayer* AdditionLayer::Clone(Graph& graph) const +{ + return CloneBase<AdditionLayer>(graph, GetName()); +} + +void AdditionLayer::ValidateTensorShapesFromInputs() +{ + auto& input0 = GetInputSlot(0).GetConnection()->GetTensorInfo(); + auto& input1 = GetInputSlot(1).GetConnection()->GetTensorInfo(); + + // Get the max of the inputs + BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions()); + unsigned int numDims = input0.GetNumDimensions(); + std::vector<unsigned int> dims(numDims); + + // validate inputs are broadcast compatible +#if !NDEBUG + for (unsigned int i = 0; i < numDims; i++) + { + unsigned int dim0 = input0.GetShape()[i]; + unsigned int dim1 = input1.GetShape()[i]; + if (dim0 != dim1) + { + BOOST_ASSERT_MSG(dim0 == 1 || dim1 == 1, "Dimensions should either match or one should be one length"); + } + } +#endif + + + for (unsigned int i = 0; i < numDims; i++) + { + unsigned int dim0 = input0.GetShape()[i]; + unsigned int dim1 = input1.GetShape()[i]; + dims[i] = std::max(dim0, dim1); + } + + TensorShape outShape(numDims, dims.data()); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "AdditionLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +BatchNormalizationLayer::BatchNormalizationLayer(const armnn::BatchNormalizationDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::BatchNormalization, param, name) +{ +} + +std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + BatchNormalizationQueueDescriptor descriptor; + + descriptor.m_Mean = m_Mean.get(); + descriptor.m_Variance = m_Variance.get(); + descriptor.m_Beta = m_Beta.get(); + descriptor.m_Gamma = m_Gamma.get(); + return factory.CreateBatchNormalization(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +BatchNormalizationLayer* BatchNormalizationLayer::Clone(Graph& graph) const +{ + auto layer = CloneBase<BatchNormalizationLayer>(graph, m_Param, GetName()); + + layer->m_Mean = m_Mean ? std::make_unique<ScopedCpuTensorHandle>(*m_Mean) : nullptr; + layer->m_Variance = m_Variance ? std::make_unique<ScopedCpuTensorHandle>(*m_Variance) : nullptr; + layer->m_Beta = m_Beta ? std::make_unique<ScopedCpuTensorHandle>(*m_Beta) : nullptr; + layer->m_Gamma = m_Gamma ? std::make_unique<ScopedCpuTensorHandle>(*m_Gamma) : nullptr; + + return std::move(layer); +} + +void BatchNormalizationLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "BatchNormalizationLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "BatchNormalizationLayer: TensorInfo must be set on connected OutputSlot."); + + auto& info = GetInputSlot(0).GetConnection()->GetTensorInfo(); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(info.GetShape()), + "BatchNormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +Convolution2dLayer::Convolution2dLayer(const Convolution2dDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::Convolution2d, param, name) +{ +} + +std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + Convolution2dQueueDescriptor descriptor; + + descriptor.m_Weight = m_Weight.get(); + if (m_Param.m_BiasEnabled) + { + descriptor.m_Bias = m_Bias.get(); + } + return factory.CreateConvolution2d(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +Convolution2dLayer* Convolution2dLayer::Clone(Graph& graph) const +{ + auto layer = CloneBase<Convolution2dLayer>(graph, m_Param, GetName()); + layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr; + + if (layer->m_Param.m_BiasEnabled) + { + layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr; + } + + return std::move(layer); +} + +void Convolution2dLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "Convolution2dLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "Convolution2dLayer: TensorInfo must be set on connected OutputSlot."); + + + IOutputSlot* input = GetInputSlot(0).GetConnection(); + const TensorShape& inputShape = input->GetTensorInfo().GetShape(); + const TensorShape filterShape = m_Weight->GetTensorInfo().GetShape(); + + // If we support multiple batch dimensions in the future, then this assert will need to change. + BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input."); + + unsigned int inWidth = inputShape[3]; + unsigned int inHeight = inputShape[2]; + unsigned int inBatchSize = inputShape[0]; + + unsigned int filterWidth = filterShape[3]; + unsigned int readWidth = (inWidth + m_Param.m_PadLeft + m_Param.m_PadRight) - (filterWidth); + unsigned int outWidth = 1+(readWidth / m_Param.m_StrideX); + + unsigned int filterHeight = filterShape[2]; + unsigned int readHeight = (inHeight + m_Param.m_PadTop + m_Param.m_PadBottom) - (filterHeight); + unsigned int outHeight = 1+(readHeight / m_Param.m_StrideY); + + unsigned int outChannels = filterShape[0]; + unsigned int outBatchSize = inBatchSize; + + TensorShape shapeOut({outBatchSize, outChannels, outHeight, outWidth}); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(shapeOut), + "Convolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + + +DepthwiseConvolution2dLayer::DepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor& param, + const char* name) + : LayerWithParameters(1, 1, LayerType::DepthwiseConvolution2d, param, name) +{ +} + +std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + DepthwiseConvolution2dQueueDescriptor descriptor; + + descriptor.m_Weight = m_Weight.get(); + if (m_Param.m_BiasEnabled) + { + descriptor.m_Bias = m_Bias.get(); + } + return factory.CreateDepthwiseConvolution2d(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +DepthwiseConvolution2dLayer* DepthwiseConvolution2dLayer::Clone(Graph& graph) const +{ + auto layer = CloneBase<DepthwiseConvolution2dLayer>(graph, m_Param, GetName()); + layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr; + + if (layer->m_Param.m_BiasEnabled) + { + layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr; + } + + return std::move(layer); +} + +void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "DepthwiseConvolution2dLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "DepthwiseConvolution2dLayer: TensorInfo must be set on connected OutputSlot."); + + IOutputSlot* input = GetInputSlot(0).GetConnection(); + const TensorShape& inputShape = input->GetTensorInfo().GetShape(); + const TensorShape filterShape = m_Weight->GetTensorInfo().GetShape(); + + BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input."); + + unsigned int inWidth = inputShape[3]; + unsigned int inHeight = inputShape[2]; + unsigned int inBatchSize = inputShape[0]; + + unsigned int filterWidth = filterShape[3]; + unsigned int readWidth = (inWidth + m_Param.m_PadLeft + m_Param.m_PadRight) - (filterWidth); + unsigned int outWidth = 1+(readWidth / m_Param.m_StrideX); + + unsigned int filterHeight = filterShape[2]; + unsigned int readHeight = (inHeight + m_Param.m_PadTop + m_Param.m_PadBottom) - (filterHeight); + unsigned int outHeight = 1+(readHeight / m_Param.m_StrideY); + unsigned int depthMultiplier = filterShape[0]; + + unsigned int outChannels = filterShape[1]*depthMultiplier; + unsigned int outBatchSize = inBatchSize; + + TensorShape outShape({outBatchSize, outChannels, outHeight, outWidth}); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "DepthwiseConvolution2dLayer: " + "TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +FakeQuantizationLayer::FakeQuantizationLayer(const FakeQuantizationDescriptor& param, const char* name) +: LayerWithParameters(1, 1, LayerType::FakeQuantization, param, name) +{ +} + +std::unique_ptr<IWorkload> FakeQuantizationLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + FakeQuantizationQueueDescriptor descriptor; + return factory.CreateFakeQuantization(descriptor, PrepInfoAndDesc(descriptor, graph) ); +} + +FakeQuantizationLayer* FakeQuantizationLayer::Clone(Graph& graph) const +{ + return CloneBase<FakeQuantizationLayer>(graph, m_Param, GetName()); +} + +void FakeQuantizationLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "FakeQuantizationLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "FakeQuantizationLayer: TensorInfo must be set on connected OutputSlot."); + + + IOutputSlot* input = GetInputSlot(0).GetConnection(); + + // input and output shapes are the same + TensorShape const& outShape = input->GetTensorInfo().GetShape(); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "FakeQuantizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +FloorLayer::FloorLayer(const char* name) + : Layer(1, 1, LayerType::Floor, name) +{ +} + +std::unique_ptr<IWorkload> FloorLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + FloorQueueDescriptor descriptor; + return factory.CreateFloor(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +FloorLayer* FloorLayer::Clone(Graph& graph) const +{ + return CloneBase<FloorLayer>(graph, GetName()); +} + +void FloorLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "FloorLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "FloorLayer: TensorInfo must be set on connected OutputSlot."); + + // input and output shapes are the same + IOutputSlot* input = GetInputSlot(0).GetConnection(); + TensorShape const& outShape = input->GetTensorInfo().GetShape(); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "FloorLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +FullyConnectedLayer::FullyConnectedLayer(const FullyConnectedDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::FullyConnected, param, name) +{ +} + +std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + FullyConnectedQueueDescriptor descriptor; + + descriptor.m_Weight = m_Weight.get(); + if (m_Param.m_BiasEnabled) + { + descriptor.m_Bias = m_Bias.get(); + } + return factory.CreateFullyConnected(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +FullyConnectedLayer* FullyConnectedLayer::Clone(Graph& graph) const +{ + auto layer = CloneBase<FullyConnectedLayer>(graph, m_Param, GetName()); + + layer->m_Weight = m_Weight ? std::make_unique<ScopedCpuTensorHandle>(*m_Weight) : nullptr; + if (layer->m_Param.m_BiasEnabled) + { + layer->m_Bias = m_Bias ? std::make_unique<ScopedCpuTensorHandle>(*m_Bias) : nullptr; + } + + return std::move(layer); +} + +void FullyConnectedLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "FullyConnectedLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "FullyConnectedLayer: TensorInfo must be set on connected OutputSlot."); + + + TensorShape const& weightShape = m_Weight->GetTensorInfo().GetShape(); + + // output for FC is [1, w[1]] + unsigned int batches = GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()[0]; + unsigned int dimIdx = m_Param.m_TransposeWeightMatrix ? 0 : 1; + TensorShape outShape({batches, weightShape[dimIdx]}); + + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "FullyConnectedLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +InputLayer::InputLayer(LayerBindingId id, const char* name) + : BindableLayer(0, 1, LayerType::Input, name, id) +{ +} + +std::unique_ptr<IWorkload> InputLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + return nullptr; +} + +InputLayer* InputLayer::Clone(Graph& graph) const +{ + return CloneBase<InputLayer>(graph, GetBindingId(), GetName()); +} + +void InputLayer::ValidateTensorShapesFromInputs() +{ + //The input layer should already have it's inputs set during graph building phase in the driver/parser. + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).IsTensorInfoSet(), + "InputLayer should already have the TensorInfo set."); +} + + +MergerLayer::MergerLayer(const OriginsDescriptor& param, const char* name) + : LayerWithParameters(param.GetNumViews(), 1, LayerType::Merger, param, name) +{ +} + +std::unique_ptr<IWorkload> MergerLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + MergerQueueDescriptor descriptor; + + // copy the view origins to the descriptor + descriptor.m_ViewOrigins.reserve(m_Param.GetNumViews()); + for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i) + { + descriptor.m_ViewOrigins.emplace_back( + std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions())); + } + + return factory.CreateMerger(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) +{ + //if sub tensors are supported than the merger + //just needs to make sure that the outputs of the prev layer + //are made subtensors of the output of the merger layer + m_OutputHandlers[0].CreateTensorHandles(factory); + if (factory.SupportsSubTensors()) + { + const unsigned int numInputSlots = GetNumInputSlots(); + for (unsigned int i = 0; i < numInputSlots; ++i) + { + OutputHandler& outputHandler = GetInputSlot(i).GetConnectedOutputSlot()->GetOutputHandler(); + + outputHandler.SetData(factory.CreateSubTensorHandle(*m_OutputHandlers[0].GetData(), + outputHandler.GetTensorInfo().GetShape(), + m_Param.GetViewOrigin(i))); + } + } +} + +MergerLayer* MergerLayer::Clone(Graph& graph) const +{ + return CloneBase<MergerLayer>(graph, m_Param, GetName()); +} + +void MergerLayer::ValidateTensorShapesFromInputs() +{ + // Validate Merger layer + ConditionalThrow<LayerValidationException>(m_Param.GetNumViews() == GetNumInputSlots(), + "MergerLayer: Num Inputs must match num views."); + + unsigned int numDims = m_Param.GetNumDimensions(); + for (unsigned int i=0; i<GetNumInputSlots(); i++) + { + auto& inputInfo = GetInputSlot(i).GetConnection()->GetTensorInfo(); + + boost::ignore_unused(inputInfo); + ConditionalThrow<LayerValidationException>(numDims == inputInfo.GetNumDimensions(), + "MergerLayer: Num Dimensions must match all inputs."); + } + + // Find the bounding box (extents) of all the views + std::vector<unsigned int> extentMin(numDims); + std::vector<unsigned int> extentMax(numDims); + for (unsigned int i = 0; i < GetNumInputSlots(); i++) + { + const uint32_t* origin = m_Param.GetViewOrigin(i); + const armnn::TensorShape& shape = GetInputSlot(i).GetConnection()->GetTensorInfo().GetShape(); + for (unsigned int d = 0; d < numDims; d++) + { + extentMin[d] = std::min(extentMin[d], origin[d]); + extentMax[d] = std::max(extentMax[d], origin[d] + shape[d]); + } + } + + // Check that the bounding box starts at the origin + if (!std::all_of(extentMin.begin(), extentMin.end(), [](unsigned int s) { return s == 0; })) + { + throw LayerValidationException("MergerLayer: there is no view that starts at the origin"); + } + + // Check that there are no overlaps of views (this would lead to undefined output at those locations). + // Check each pair of views against each other + // (and don't bother to check against self, or check the same pair both ways round) + for (unsigned int a = 0; a < GetNumInputSlots(); a++) + { + const uint32_t* aOrigin = m_Param.GetViewOrigin(a); + const armnn::TensorShape& aShape = GetInputSlot(a).GetConnection()->GetTensorInfo().GetShape(); + for (unsigned int b = 0; b < a; b++) + { + const uint32_t* bOrigin = m_Param.GetViewOrigin(b); + const armnn::TensorShape& bShape = GetInputSlot(b).GetConnection()->GetTensorInfo().GetShape(); + + bool allAxesOverlap = true; + for (unsigned int d = 0; d < numDims && allAxesOverlap; d++) + { + unsigned int a1 = aOrigin[d]; + unsigned int a2 = aOrigin[d] + aShape[d]; + + unsigned int b1 = bOrigin[d]; + unsigned int b2 = bOrigin[d] + bShape[d]; + + if (a2 <= b1 || b2 <= a1) + { + allAxesOverlap = false; + } + } + if (allAxesOverlap) + { + throw LayerValidationException("MergerLayer: Some views overlap."); + } + } + } + + // Check that there are no "holes", i.e. regions of the output which is not covered by a view. + // Because we already checked that there are no overlaps, this can be done simply by checking that + // the total 'volume' of the views is the same as the output. + unsigned int totalViewsVolume = 0; + for (unsigned int i = 0; i < GetNumInputSlots(); i++) + { + totalViewsVolume += GetInputSlot(i).GetConnection()->GetTensorInfo().GetNumElements(); + } + unsigned int outputVolume = 1; + for (unsigned int d = 0; d < numDims; d++) + { + outputVolume *= (extentMax[d] - extentMin[d]); + } + if (totalViewsVolume != outputVolume) + { + throw LayerValidationException("MergerLayer: there are some gaps between views"); + } + + TensorShape outShape(numDims, extentMax.data()); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "MergerLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +MultiplicationLayer::MultiplicationLayer(const char* name) + : Layer(2, 1, LayerType::Multiplication, name) +{ +} + +std::unique_ptr<IWorkload> MultiplicationLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + MultiplicationQueueDescriptor descriptor; + + return factory.CreateMultiplication(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +MultiplicationLayer* MultiplicationLayer::Clone(Graph& graph) const +{ + return CloneBase<MultiplicationLayer>(graph, GetName()); +} + +void MultiplicationLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() == + GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(), + "MultiplicationLayer: Inputs must match"); + + TensorInfo infoOut(GetInputSlot(0).GetConnection()->GetTensorInfo()); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(infoOut.GetShape()), + "MultiplicationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +NormalizationLayer::NormalizationLayer(const NormalizationDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::Normalization, param, name) +{ +} + +std::unique_ptr<IWorkload> NormalizationLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + NormalizationQueueDescriptor descriptor; + return factory.CreateNormalization(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +NormalizationLayer* NormalizationLayer::Clone(Graph& graph) const +{ + return CloneBase<NormalizationLayer>(graph, m_Param, GetName()); +} + +void NormalizationLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "NormalizationLayer: Input slot must be connected."); + + const TensorShape& outShape = GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +OutputLayer::OutputLayer(LayerBindingId id, const char* name) + : BindableLayer(1, 0, LayerType::Output, name, id) +{ +} + +std::unique_ptr<IWorkload> OutputLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + return nullptr; +} + +OutputLayer* OutputLayer::Clone(Graph& graph) const +{ + return CloneBase<OutputLayer>(graph, GetBindingId(), GetName()); +} + +void OutputLayer::ValidateTensorShapesFromInputs() +{ + // Just validate the input is connected + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "OutputLayer: Input slot must be connected."); +} + +PermuteLayer::PermuteLayer(const PermuteDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::Permute, param, name) +{ +} + +std::unique_ptr<IWorkload> PermuteLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + PermuteQueueDescriptor descriptor; + return factory.CreatePermute(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +PermuteLayer* PermuteLayer::Clone(Graph& graph) const +{ + return CloneBase<PermuteLayer>(graph, m_Param, GetName()); +} + +void PermuteLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "PermuteLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "PermuteLayer: TensorInfo must be set on connected InputSlot."); + + const TensorInfo& infoIn = GetInputSlot(0).GetConnection()->GetTensorInfo(); + TensorShape shapeOut = armnnUtils::Permuted(infoIn.GetShape(), m_Param.m_DimMappings); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(shapeOut), + "PermuteLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +Pooling2dLayer::Pooling2dLayer(const Pooling2dDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::Pooling2d, param, name) +{ +} + +std::unique_ptr<IWorkload> Pooling2dLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + Pooling2dQueueDescriptor descriptor; + return factory.CreatePooling2d(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +Pooling2dLayer* Pooling2dLayer::Clone(Graph& graph) const +{ + return CloneBase<Pooling2dLayer>(graph, m_Param, GetName()); +} + +void Pooling2dLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "Pooling2dLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "Pooling2dLayer: TensorInfo must be set on connected InputSlot."); + + IOutputSlot* input = GetInputSlot(0).GetConnection(); + const TensorShape& inputShape = input->GetTensorInfo().GetShape(); + + // If we support multiple batch dimensions in the future, then this assert will need to change. + BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input."); + + + unsigned int inWidth = inputShape[3]; + unsigned int inHeight = inputShape[2]; + unsigned int inChannels = inputShape[1]; + unsigned int inBatchSize = inputShape[0]; + + bool isGlobalPooling = (m_Param.m_StrideX==0 && m_Param.m_StrideY==0); + unsigned int outWidth = 1; + unsigned int outHeight = 1; + if (!isGlobalPooling) + { + BOOST_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0, + "Stride can only be zero when performing global pooling"); + + auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto padMethod, + auto outputShapeRounding) + { + unsigned int readSize = inSize + lowPad + highPad - poolSize; + float div = static_cast<float>(readSize) / static_cast<float>(stride); + + unsigned int size = 0; + switch (outputShapeRounding) + { + case OutputShapeRounding::Ceiling: + size = static_cast<unsigned int>(ceil(div)) + 1; + break; + case OutputShapeRounding ::Floor: + size = static_cast<unsigned int>(floor(div)) + 1; + break; + default: + BOOST_ASSERT_MSG(false, "Unsupported Output Shape Rounding"); + } + + // Make sure that border operations will start from inside the input and not the padded area + // This is what both Caffe and CL does... + if ((size - 1)*stride >= inSize + lowPad) + { + --size; + } + + return size; + }; + + outWidth = CalcSize(inWidth, m_Param.m_PadLeft, m_Param.m_PadRight, m_Param.m_PoolWidth, m_Param.m_StrideX, + m_Param.m_PaddingMethod, m_Param.m_OutputShapeRounding); + outHeight= CalcSize(inHeight, m_Param.m_PadTop, m_Param.m_PadBottom, m_Param.m_PoolHeight, m_Param.m_StrideY, + m_Param.m_PaddingMethod, m_Param.m_OutputShapeRounding); + + + } + unsigned int outChannels = inChannels; + unsigned int outBatchSize = inBatchSize; + + TensorShape shapeOut({outBatchSize, outChannels, outHeight, outWidth}); + + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(shapeOut), + "Pooling2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +SoftmaxLayer::SoftmaxLayer(const SoftmaxDescriptor ¶m, const char* name) + : LayerWithParameters(1, 1, LayerType::Softmax, param, name) +{ +} + +std::unique_ptr<IWorkload> SoftmaxLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + SoftmaxQueueDescriptor descriptor; + return factory.CreateSoftmax(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +SoftmaxLayer* SoftmaxLayer::Clone(Graph& graph) const +{ + return CloneBase<SoftmaxLayer>(graph, m_Param, GetName()); +} + +void SoftmaxLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "SoftmaxLayer: Input slot must be connected."); + const TensorShape& outShape = GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "SoftmaxLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +SplitterLayer::SplitterLayer(const ViewsDescriptor& param, const char* name) + : LayerWithParameters(1, param.GetNumViews(), LayerType::Splitter, param, name) +{ +} + +std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + SplitterQueueDescriptor descriptor; + + // copy the window origins to the descriptor + for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i) + { + descriptor.m_ViewOrigins.emplace_back( + std::vector<unsigned int>(m_Param.GetViewOrigin(i), m_Param.GetViewOrigin(i) + m_Param.GetNumDimensions())); + } + + return factory.CreateSplitter(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +void SplitterLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) +{ + //if sub tensors are supported than all the "splitter" need to do is to + //set the outputs to be appropriate sub tensors of the input. + if (factory.SupportsSubTensors()) + { + const OutputHandler& outputHandler = GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler(); + + ITensorHandle* inputData = outputHandler.GetData(); + //create the outputs as subtensors of the input + for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i) + { + m_OutputHandlers[i].SetData(factory.CreateSubTensorHandle(*inputData, + m_OutputHandlers[i].GetTensorInfo().GetShape(), + m_Param.GetViewOrigin(i))); + } + } + else + { + for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i) + { + m_OutputHandlers[i].CreateTensorHandles(factory); + } + } +} + +SplitterLayer* SplitterLayer::Clone(Graph& graph) const +{ + return CloneBase<SplitterLayer>(graph, m_Param, GetName()); +} + +void SplitterLayer::ValidateTensorShapesFromInputs() +{ + //Output shapes must match View shapes. + for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++) + { + const uint32_t* sizes = m_Param.GetViewSizes(viewIdx); + + TensorShape outShape(m_Param.GetNumDimensions(), sizes); + ConditionalThrow<LayerValidationException>(GetOutputSlot(viewIdx).ValidateTensorShape(outShape), + "SplitterLayer: View sizes must match output tensor shapes."); + } +} + +MemCopyLayer::MemCopyLayer(const char* name) + : Layer(1, 1, LayerType::MemCopy, name) +{ +} + +MemCopyLayer* MemCopyLayer::Clone(Graph& graph) const +{ + return CloneBase<MemCopyLayer>(graph, GetName()); +} + +std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + MemCopyQueueDescriptor descriptor; + return factory.CreateMemCopy(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +void MemCopyLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "MemCopyLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "MemCopyLayer: TensorInfo must be set on connected OutputSlot."); + + + IOutputSlot* input = GetInputSlot(0).GetConnection(); + + // input and output shapes are the same + TensorShape const& outShape = input->GetTensorInfo().GetShape(); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "MemCopyLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +ResizeBilinearLayer::ResizeBilinearLayer(const ResizeBilinearDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::ResizeBilinear, param, name) +{ +} + +std::unique_ptr<IWorkload> ResizeBilinearLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + ResizeBilinearQueueDescriptor descriptor; + return factory.CreateResizeBilinear(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +ResizeBilinearLayer* ResizeBilinearLayer::Clone(Graph& graph) const +{ + return CloneBase<ResizeBilinearLayer>(graph, m_Param, GetName()); +} + +void ResizeBilinearLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "MemCopyLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "MemCopyLayer: TensorInfo must be set on connected OutputSlot."); + + const TensorShape& inputShape = GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(); + unsigned int outWidth = m_Param.m_TargetWidth; + unsigned int outHeight = m_Param.m_TargetHeight; + unsigned int outChannels = inputShape[1]; + unsigned int outBatch = inputShape[0]; + TensorShape outShape({outBatch, outChannels, outHeight, outWidth}); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "ResizeBilinearLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +L2NormalizationLayer::L2NormalizationLayer(const char* name) + : Layer(1, 1, LayerType::L2Normalization, name) +{ +} + +std::unique_ptr<IWorkload> L2NormalizationLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + L2NormalizationQueueDescriptor descriptor; + return factory.CreateL2Normalization(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +L2NormalizationLayer* L2NormalizationLayer::Clone(Graph& graph) const +{ + return CloneBase<L2NormalizationLayer>(graph, GetName()); +} + +void L2NormalizationLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "L2NormalizationLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "L2NormalizationLayer: TensorInfo must be set on connected OutputSlot."); + + IOutputSlot* input = GetInputSlot(0).GetConnection(); + + // input and output shapes are the same + TensorShape const& outShape = input->GetTensorInfo().GetShape(); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "L2NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +ConstantLayer::ConstantLayer(const std::shared_ptr<ScopedCpuTensorHandle>& input, const char* name) + : Layer(0, 1, LayerType::Constant, name) + , m_LayerOutput(input) +{ +} + +std::unique_ptr<IWorkload> ConstantLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + ConstantQueueDescriptor descriptor; + descriptor.m_LayerOutput = m_LayerOutput.get(); + return factory.CreateConstant(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +ConstantLayer* ConstantLayer::Clone(Graph& graph) const +{ + // Cloned layers share the same layer output object + return CloneBase<ConstantLayer>(graph, m_LayerOutput, GetName()); +} + +void ConstantLayer::ValidateTensorShapesFromInputs() +{ + // get the output shape from the value of the constant layer + TensorShape const& outShape = m_LayerOutput->GetTensorInfo().GetShape(); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(outShape), + "ConstantLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +ReshapeLayer::ReshapeLayer(const ReshapeDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::Reshape, param, name) +{ +} + +std::unique_ptr<IWorkload> ReshapeLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + ReshapeQueueDescriptor descriptor; + return factory.CreateReshape(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +ReshapeLayer* ReshapeLayer::Clone(Graph& graph) const +{ + return CloneBase<ReshapeLayer>(graph, m_Param, GetName()); +} + +void ReshapeLayer::ValidateTensorShapesFromInputs() +{ + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr, + "ReshapeLayer: InputSlot must be connected to an OutputSlot"); + ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(), + "ReshapeLayer: TensorInfo must be set on connected OutputSlot."); + ConditionalThrow<LayerValidationException>(GetOutputSlot(0).ValidateTensorShape(m_Param.m_TargetShape), + "ReshapeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape."); +} + +} diff --git a/src/armnn/Layers.hpp b/src/armnn/Layers.hpp new file mode 100644 index 0000000000..5a1e3ca063 --- /dev/null +++ b/src/armnn/Layers.hpp @@ -0,0 +1,430 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "LayersFwd.hpp" + +#include "Layer.hpp" +#include "InternalTypes.hpp" + +#include <armnn/Descriptors.hpp> + +#include <boost/core/ignore_unused.hpp> + +namespace armnn +{ + +class ScopedCpuTensorHandle; + +template <typename Parameters> +class LayerWithParameters : public Layer +{ +public: + typedef Parameters DescriptorType; + + const Parameters& GetParameters() const { return m_Param; } + +protected: + LayerWithParameters(unsigned int numInputSlots, + unsigned int numOutputSlots, + LayerType type, + const Parameters& param, + const char* name) + : Layer(numInputSlots, numOutputSlots, type, name) + , m_Param(param) + { + } + + ~LayerWithParameters() = default; + + /// Helper function to reduce duplication in *Layer::CreateWorkload + template <typename QueueDescriptor> + WorkloadInfo PrepInfoAndDesc(QueueDescriptor& descriptor, const Graph& graph) const + { + descriptor.m_Parameters = m_Param; + return Layer::PrepInfoAndDesc(descriptor, graph); + } + + /// The parameters for the layer (not including tensor-valued weights etc.) + Parameters m_Param; +}; + +class ActivationLayer : public LayerWithParameters<ActivationDescriptor> +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + ActivationLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + ActivationLayer(const ActivationDescriptor ¶m, const char* name); + ~ActivationLayer() = default; +}; + +class AdditionLayer : public Layer +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + AdditionLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + AdditionLayer(const char* name); + ~AdditionLayer() = default; +}; + +class BatchNormalizationLayer : public LayerWithParameters<BatchNormalizationDescriptor> +{ +public: + std::unique_ptr<ScopedCpuTensorHandle> m_Mean; + std::unique_ptr<ScopedCpuTensorHandle> m_Variance; + std::unique_ptr<ScopedCpuTensorHandle> m_Beta; + std::unique_ptr<ScopedCpuTensorHandle> m_Gamma; + + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + BatchNormalizationLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + BatchNormalizationLayer(const BatchNormalizationDescriptor& param, const char* name); + ~BatchNormalizationLayer() = default; +}; + +class Convolution2dLayer : public LayerWithParameters<Convolution2dDescriptor> +{ +public: + std::unique_ptr<ScopedCpuTensorHandle> m_Weight; + std::unique_ptr<ScopedCpuTensorHandle> m_Bias; + + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + Convolution2dLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + Convolution2dLayer(const Convolution2dDescriptor& param, const char* name); + ~Convolution2dLayer() = default; +}; + +class DepthwiseConvolution2dLayer : public LayerWithParameters<DepthwiseConvolution2dDescriptor> +{ +public: + std::unique_ptr<ScopedCpuTensorHandle> m_Weight; + std::unique_ptr<ScopedCpuTensorHandle> m_Bias; + + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + DepthwiseConvolution2dLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + DepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor& param, const char* name); + ~DepthwiseConvolution2dLayer() = default; +}; + +class FakeQuantizationLayer : public LayerWithParameters<FakeQuantizationDescriptor> +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + FakeQuantizationLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + FakeQuantizationLayer(const FakeQuantizationDescriptor& descriptor, const char* name); + ~FakeQuantizationLayer() = default; +}; + +class FloorLayer : public Layer +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + FloorLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + FloorLayer(const char* name); + ~FloorLayer() = default; +}; + +class FullyConnectedLayer : public LayerWithParameters<FullyConnectedDescriptor> +{ +public: + std::unique_ptr<ScopedCpuTensorHandle> m_Weight; + std::unique_ptr<ScopedCpuTensorHandle> m_Bias; + + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + FullyConnectedLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + FullyConnectedLayer(const FullyConnectedDescriptor& param, const char* name); + ~FullyConnectedLayer() = default; +}; + +class InputLayer : public BindableLayer +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + InputLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + InputLayer(LayerBindingId id, const char* name); + ~InputLayer() = default; +}; + +class MergerLayer : public LayerWithParameters<OriginsDescriptor> +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override; + + MergerLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + MergerLayer(const OriginsDescriptor& param, const char* name); + ~MergerLayer() = default; +}; + +class MultiplicationLayer : public Layer +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + MultiplicationLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + MultiplicationLayer(const char* name); + ~MultiplicationLayer() = default; +}; + +class NormalizationLayer : public LayerWithParameters<NormalizationDescriptor> +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + NormalizationLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + NormalizationLayer(const NormalizationDescriptor& param, const char* name); + ~NormalizationLayer() = default; +}; + +class OutputLayer : public BindableLayer +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override + { + boost::ignore_unused(graph, factory); + } + + OutputLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + OutputLayer(LayerBindingId id, const char* name); + ~OutputLayer() = default; +}; + +class PermuteLayer : public LayerWithParameters<PermuteDescriptor> +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + PermuteLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + + const PermutationVector& GetPermutation() const + { + return m_Param.m_DimMappings; + } + + bool IsInverse(const Layer& other) const + { + return (other.GetType() == LayerType::Permute) && + GetPermutation().IsInverse(boost::polymorphic_downcast<const PermuteLayer*>(&other)->GetPermutation()); + } + + bool IsEqual(const Layer& other) const + { + return (other.GetType() == LayerType::Permute) && + GetPermutation().IsEqual(boost::polymorphic_downcast<const PermuteLayer*>(&other)->GetPermutation()); + } + +protected: + PermuteLayer(const PermuteDescriptor& param, const char* name); + ~PermuteLayer() = default; +}; + +class Pooling2dLayer : public LayerWithParameters<Pooling2dDescriptor> +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + Pooling2dLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + Pooling2dLayer(const Pooling2dDescriptor& param, const char* name); + ~Pooling2dLayer() = default; +}; + +class SoftmaxLayer : public LayerWithParameters<SoftmaxDescriptor> +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + SoftmaxLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + SoftmaxLayer(const SoftmaxDescriptor& param, const char* name); + ~SoftmaxLayer() = default; +}; + +class SplitterLayer : public LayerWithParameters<ViewsDescriptor> +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override; + + SplitterLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + SplitterLayer(const ViewsDescriptor& param, const char* name); + ~SplitterLayer() = default; +}; + +class MemCopyLayer : public Layer +{ +public: + virtual std::unique_ptr<IWorkload> + CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const override; + + MemCopyLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + MemCopyLayer(const char* name); + ~MemCopyLayer() = default; +}; + +class ResizeBilinearLayer : public LayerWithParameters<ResizeBilinearDescriptor> +{ +public: + virtual std::unique_ptr<IWorkload> + CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const override; + + ResizeBilinearLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + ResizeBilinearLayer(const ResizeBilinearDescriptor& param, const char* name); + ~ResizeBilinearLayer() = default; +}; + +class L2NormalizationLayer : public Layer +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + L2NormalizationLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + L2NormalizationLayer(const char* name); + ~L2NormalizationLayer() = default; +}; + +class ConstantLayer : public Layer +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + ConstantLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + ConstantLayer(const std::shared_ptr<ScopedCpuTensorHandle>& input, const char* name); + ~ConstantLayer() = default; + +private: + std::shared_ptr<ScopedCpuTensorHandle> m_LayerOutput; +}; + +class ReshapeLayer : public LayerWithParameters<ReshapeDescriptor> +{ +public: + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + ReshapeLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + + bool IsEqual(const Layer& other) const + { + return (other.GetType() == LayerType::Reshape) && + m_Param.m_TargetShape == boost::polymorphic_downcast<const ReshapeLayer*>(&other)->m_Param.m_TargetShape; + } + +protected: + ReshapeLayer(const ReshapeDescriptor& desc, const char* name); + ~ReshapeLayer() = default; +}; + +} diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp new file mode 100644 index 0000000000..a77c723751 --- /dev/null +++ b/src/armnn/LayersFwd.hpp @@ -0,0 +1,59 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "InternalTypes.hpp" + +namespace armnn +{ + +template <LayerType Type> +struct LayerTypeOfImpl; + +template <LayerType Type> +using LayerTypeOf = typename LayerTypeOfImpl<Type>::Type; + +template <typename T> +constexpr LayerType LayerEnumOf(const T* = nullptr); + +#define DECLARE_LAYER_IMPL(_, LayerName) \ + class LayerName##Layer; \ + template <> \ + struct LayerTypeOfImpl<LayerType::_##LayerName> \ + { \ + using Type = LayerName##Layer; \ + }; \ + template <> \ + constexpr LayerType LayerEnumOf(const LayerName##Layer*) \ + { \ + return LayerType::_##LayerName; \ + } + +#define DECLARE_LAYER(LayerName) DECLARE_LAYER_IMPL(, LayerName) + +DECLARE_LAYER(Activation) +DECLARE_LAYER(Addition) +DECLARE_LAYER(BatchNormalization) +DECLARE_LAYER(Constant) +DECLARE_LAYER(Convolution2d) +DECLARE_LAYER(DepthwiseConvolution2d) +DECLARE_LAYER(FakeQuantization) +DECLARE_LAYER(Floor) +DECLARE_LAYER(FullyConnected) +DECLARE_LAYER(Input) +DECLARE_LAYER(L2Normalization) +DECLARE_LAYER(MemCopy) +DECLARE_LAYER(Merger) +DECLARE_LAYER(Multiplication) +DECLARE_LAYER(Normalization) +DECLARE_LAYER(Output) +DECLARE_LAYER(Permute) +DECLARE_LAYER(Pooling2d) +DECLARE_LAYER(Reshape) +DECLARE_LAYER(ResizeBilinear) +DECLARE_LAYER(Softmax) +DECLARE_LAYER(Splitter) + +} diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp new file mode 100644 index 0000000000..14712d209c --- /dev/null +++ b/src/armnn/LoadedNetwork.cpp @@ -0,0 +1,424 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "LoadedNetwork.hpp" +#include "Layer.hpp" +#include "Layers.hpp" +#include "Graph.hpp" +#include "Network.hpp" +#include "Runtime.hpp" +#include "Profiling.hpp" + +#ifdef ARMCOMPUTECL_ENABLED +#include <arm_compute/core/CL/OpenCL.h> +#endif + +#include <backends/CpuTensorHandle.hpp> + +#include <boost/polymorphic_cast.hpp> +#include <boost/assert.hpp> +#include <boost/format.hpp> +#include <boost/log/trivial.hpp> + +namespace armnn +{ + +using namespace std; + +std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net, + const WorkloadFactories& workloadFactories) +{ + std::unique_ptr<LoadedNetwork> loadedNetwork; + + try + { + loadedNetwork.reset(new LoadedNetwork(std::move(net), workloadFactories)); + } + catch (const std::runtime_error& error) + { + BOOST_LOG_TRIVIAL(error) << "An error occurred when preparing the network workloads: " << error.what(); + return std::unique_ptr<LoadedNetwork>(); + } + catch (const armnn::Exception& error) + { + BOOST_LOG_TRIVIAL(error) << "An error occurred when preparing the network workloads: " << error.what(); + return std::unique_ptr<LoadedNetwork>(); + } +#if ARMCOMPUTECL_ENABLED + catch (const cl::Error& error) + { + BOOST_LOG_TRIVIAL(error) << "A CL error occurred attempting to prepare a network workload: " + << error.what() << ". CL error code is: " << error.err(); + return std::unique_ptr<LoadedNetwork>(); + } +#endif + + return loadedNetwork; +} + +LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net, const WorkloadFactories& workloadFactories) +: m_OptimizedNetwork(std::move(net)) +{ + Graph& order = m_OptimizedNetwork->GetGraph().TopologicalSort(); + //first create tensor handlers + //handlers are created before workloads are + //because workload creation can modify some of the handlers + //(for example the splitter and merger layers) + for (auto&& layer : order) + { + layer->CreateTensorHandles(m_OptimizedNetwork->GetGraph(), *GetWorkloadFactory(*layer, workloadFactories)); + } + + //then create workloads + for (auto&& layer : order) + { + const shared_ptr<IWorkloadFactory> workloadFactory = GetWorkloadFactory(*layer, workloadFactories); + + switch (layer->GetType()) + { + case LayerType::Input: + case LayerType::Output: + { + // Inputs and outputs are treated in a special way - see EnqueueInput() and EnqueueOutput() + break; + } + default: + { + auto workload = layer->CreateWorkload(m_OptimizedNetwork->GetGraph(), *workloadFactory); + + if (!workload) + { + const char* const layerName = layer->GetNameStr().length() != 0 ? layer->GetName() : "<Unnamed>"; + throw InvalidArgumentException(boost::str( + boost::format("No workload created for layer (name: '%1%' type: '%2%') (compute '%3%')") + % layerName % static_cast<int>(layer->GetType()) % layer->GetComputeDevice() + )); + } + + m_WorkloadQueue.push_back(move(workload)); + break; + } + } + } + + // set up memory + m_OptimizedNetwork->GetGraph().AllocateDynamicBuffers(); +} + +TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const +{ + for (auto&& inputLayer : m_OptimizedNetwork->GetGraph().GetInputLayers()) + { + BOOST_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot"); + if (inputLayer->GetBindingId() == layerId) + { + return inputLayer->GetOutputSlot(0).GetTensorInfo(); + } + } + + throw InvalidArgumentException(boost::str(boost::format("No input layer is associated with id %1%") % layerId)); +} + +TensorInfo LoadedNetwork::GetOutputTensorInfo(LayerBindingId layerId) const +{ + for (auto&& outputLayer : m_OptimizedNetwork->GetGraph().GetOutputLayers()) + { + BOOST_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot"); + BOOST_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected"); + if (outputLayer->GetBindingId() == layerId) + { + return outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo(); + } + } + + throw InvalidArgumentException(boost::str(boost::format("No output layer is associated with id %1%") % layerId)); +} + +const shared_ptr<IWorkloadFactory> LoadedNetwork::GetWorkloadFactory(const Layer& layer, + const WorkloadFactories& workloadFactories) const +{ + shared_ptr<IWorkloadFactory> workloadFactory; + + switch (layer.GetComputeDevice()) + { + case Compute::CpuAcc: + { + workloadFactory = workloadFactories.m_CpuAcc; + break; + } + case Compute::GpuAcc: + { + workloadFactory = workloadFactories.m_GpuAcc; + break; + } + case Compute::CpuRef: + default: + { + workloadFactory = workloadFactories.m_CpuRef; + break; + } + } + + BOOST_ASSERT_MSG(workloadFactory, "No workload factory"); + + std::string reasonIfUnsupported; + BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported), + "Factory does not support layer"); + boost::ignore_unused(reasonIfUnsupported); + + return workloadFactory; +} + +namespace { + +// Non-copyable class owning accelerator-specific tensor data. +class TensorPin +{ +public: + TensorPin(std::unique_ptr<ITensorHandle> handle, const TensorInfo& info, LayerBindingId id) + : m_TensorHandle(std::move(handle)) + , m_TensorInfo(info) + , m_Id(id) + { + } + + ITensorHandle* GetTensorHandle() const { return m_TensorHandle.get(); } + const TensorInfo& GetTensorInfo() const { return m_TensorInfo; } + LayerBindingId GetBindingId() const { return m_Id; } + +private: + std::unique_ptr<ITensorHandle> m_TensorHandle; + TensorInfo m_TensorInfo; + LayerBindingId m_Id; +}; + +static const TensorPin& GetTensorPin(LayerBindingId id, + const std::vector<TensorPin>& pins, + char const* bindingPointDesc) +{ + auto it = std::find_if(pins.begin(), pins.end(), + [id](const TensorPin& pin) + { + return pin.GetBindingId() == id; + }); + + if (it != pins.end()) + { + return *it; + } + else + { + throw InvalidArgumentException(boost::str( + boost::format("No tensor supplied for %1% %2%") % bindingPointDesc % id)); + } +} + +// Stores data that needs to be kept accessible for the entire execution of a workload. +class WorkloadData +{ +public: + WorkloadData(const InputTensors& inputTensors, const OutputTensors& outputTensors) + { + m_InputTensorPins.reserve(inputTensors.size()); + m_OutputTensorPins.reserve(outputTensors.size()); + + for (auto inputTensorPair : inputTensors) + { + auto inputTensor = inputTensorPair.second; + + std::unique_ptr<ITensorHandle> tensorHandle = + std::make_unique<ConstPassthroughCpuTensorHandle>(inputTensor.GetInfo(),inputTensor.GetMemoryArea()); + LayerBindingId layerId = inputTensorPair.first; + + m_InputTensorPins.emplace_back(std::move(tensorHandle), inputTensor.GetInfo(), layerId); + } + + for (auto outputTensorPair : outputTensors) + { + auto outputTensor = outputTensorPair.second; + + std::unique_ptr<ITensorHandle> tensorHandle = + std::make_unique<PassthroughCpuTensorHandle>(outputTensor.GetInfo(), outputTensor.GetMemoryArea()); + LayerBindingId layerId = outputTensorPair.first; + + m_OutputTensorPins.emplace_back(std::move(tensorHandle), outputTensor.GetInfo(), layerId); + } + } + + const TensorPin& GetInputTensorPin(LayerBindingId id) const + { + return GetTensorPin(id, m_InputTensorPins, "input"); + } + + const TensorPin& GetOutputTensorPin(LayerBindingId id) const + { + return GetTensorPin(id, m_OutputTensorPins, "output"); + } + +private: + + std::vector<TensorPin> m_InputTensorPins; + std::vector<TensorPin> m_OutputTensorPins; +}; + +} + +Status LoadedNetwork::EnqueueWorkload(const InputTensors& inputTensors, + const OutputTensors& outputTensors, + const WorkloadFactories& workloadFactories) +{ + ARMNN_UPDATE_PROFILING_EVENT_TAG(); + ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "EnqueueWorkload"); + + const Graph& graph = m_OptimizedNetwork->GetGraph(); + + // Walk graph to determine the order of execution + if (graph.GetNumLayers() < 2) + { + BOOST_LOG_TRIVIAL(warning) << "IRuntime::EnqueueWorkload()::Less than two nodes in graph"; + return Status::Failure; + } + + // Data that must be kept alive for the entire execution of the workload + WorkloadData workloadData(inputTensors, outputTensors); + + if (graph.GetNumInputs() != inputTensors.size()) + { + throw InvalidArgumentException("Number of inputs provided does not match network."); + } + + // for each input to the network, call EnqueueInput with the data passed by the user + for (const BindableLayer* inputLayer : graph.GetInputLayers()) + { + const TensorPin& pin = workloadData.GetInputTensorPin(inputLayer->GetBindingId()); + EnqueueInput(*inputLayer, pin.GetTensorHandle(), pin.GetTensorInfo(), workloadFactories); + } + + // for each output to the network, call EnqueueOutput with the data passed by the user + for (const BindableLayer* outputLayer : graph.GetOutputLayers()) + { + const TensorPin& pin = workloadData.GetOutputTensorPin(outputLayer->GetBindingId()); + EnqueueOutput(*outputLayer, pin.GetTensorHandle(), pin.GetTensorInfo(), workloadFactories); + } + + bool executionSucceeded = true; + + { + ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Execute"); + executionSucceeded = Execute(); + } + + // Hack: get rid of inputs and outputs we added + TidyWorkloadQueue(graph.GetNumInputs(), graph.GetNumOutputs()); + + return executionSucceeded ? Status::Success : Status::Failure; +} + +void LoadedNetwork::EnqueueInput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo, + const WorkloadFactories& workloadFactories) +{ + if (layer.GetType() != LayerType::Input) + { + throw InvalidArgumentException("EnqueueInput: given layer not an InputLayer"); + } + + if (tensorHandle == nullptr) + { + throw InvalidArgumentException("EnqueueInput: tensorHandle must not be NULL"); + } + + InputQueueDescriptor inputQueueDescriptor; + WorkloadInfo info; + + inputQueueDescriptor.m_Inputs.push_back(tensorHandle); + info.m_InputTensorInfos.push_back(tensorInfo); + + BOOST_ASSERT_MSG(layer.GetNumOutputSlots() == 1, "Can only handle Input Layer with one output"); + const OutputHandler& handler = layer.GetOutputHandler(); + const TensorInfo& outputTensorInfo = handler.GetTensorInfo(); + ITensorHandle* outputTensorHandle = handler.GetData(); + BOOST_ASSERT_MSG(outputTensorHandle != nullptr, + "Data should have been allocated."); + inputQueueDescriptor.m_Outputs.push_back(outputTensorHandle); + info.m_OutputTensorInfos.push_back(outputTensorInfo); + + shared_ptr<IWorkloadFactory> workloadFactory = GetWorkloadFactory(layer, workloadFactories); + auto inputWorkload = workloadFactory->CreateInput(inputQueueDescriptor, info); + BOOST_ASSERT_MSG(inputWorkload, "No input workload created"); + m_WorkloadQueue.insert(m_WorkloadQueue.begin(), move(inputWorkload)); +} + +void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* tensorHandle, + const TensorInfo& tensorInfo, const WorkloadFactories& workloadFactories) +{ + if (layer.GetType() != LayerType::Output) + { + throw InvalidArgumentException("EnqueueOutput: given layer not an OutputLayer"); + } + + if (tensorHandle == nullptr) + { + throw InvalidArgumentException("EnqueueOutput: tensorHandle must not be NULL"); + } + + OutputQueueDescriptor outputQueueDescriptor; + WorkloadInfo info; + + outputQueueDescriptor.m_Outputs.push_back(tensorHandle); + info.m_OutputTensorInfos.push_back(tensorInfo); + + BOOST_ASSERT_MSG(layer.GetNumInputSlots() == 1, "Output Layer should have exactly one input."); + + // Get the output handler from the previous node + const OutputHandler& outputHandler = layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler(); + + const TensorInfo& inputTensorInfo = outputHandler.GetTensorInfo(); + ITensorHandle* inputTensorHandle = outputHandler.GetData(); + BOOST_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated."); + + outputQueueDescriptor.m_Inputs.push_back(inputTensorHandle); + info.m_InputTensorInfos.push_back(inputTensorInfo); + + shared_ptr<IWorkloadFactory> workloadFactory = GetWorkloadFactory(layer, workloadFactories); + auto outputWorkload = workloadFactory->CreateOutput(outputQueueDescriptor, info); + BOOST_ASSERT_MSG(outputWorkload, "No output workload created"); + m_WorkloadQueue.push_back(move(outputWorkload)); +} + +bool LoadedNetwork::Execute() +{ + bool success = true; + + try + { + for (size_t i = 0; i < m_WorkloadQueue.size(); ++i) + { + m_WorkloadQueue[i]->Execute(); + } + } +#if ARMCOMPUTECL_ENABLED + catch (const cl::Error& error) + { + BOOST_LOG_TRIVIAL(error) << "A CL error occurred attempting to execute a workload: " + << error.what() << ". CL error code is: " << error.err(); + success = false; + } +#endif + catch (const std::runtime_error& error) + { + BOOST_LOG_TRIVIAL(error) << "An error occurred attempting to execute a workload: " << error.what(); + success = false; + } + + return success; +} + +void LoadedNetwork::TidyWorkloadQueue(size_t numInputs, size_t numOutputs) +{ + m_WorkloadQueue.erase(m_WorkloadQueue.begin(), m_WorkloadQueue.begin() + boost::numeric_cast<long>(numInputs)); + m_WorkloadQueue.erase(m_WorkloadQueue.end() - boost::numeric_cast<long>(numOutputs), m_WorkloadQueue.end()); +} + +} diff --git a/src/armnn/LoadedNetwork.hpp b/src/armnn/LoadedNetwork.hpp new file mode 100644 index 0000000000..d6af11e779 --- /dev/null +++ b/src/armnn/LoadedNetwork.hpp @@ -0,0 +1,59 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "armnn/Tensor.hpp" +#include "armnn/Types.hpp" +#include "Network.hpp" +#include "LayerFwd.hpp" +#include "backends/Workload.hpp" +#include "backends/WorkloadFactory.hpp" + +namespace cl +{ + class Context; + class CommandQueue; + class Device; +} + +namespace armnn +{ + +struct WorkloadFactories; + +class LoadedNetwork +{ +public: + TensorInfo GetInputTensorInfo(LayerBindingId layerId) const; + TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const; + + Status EnqueueWorkload(const InputTensors& inputTensors, const OutputTensors& outputTensors, + const WorkloadFactories& workloadFactories); + + static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net, + const WorkloadFactories& workloadFactories); + +private: + LoadedNetwork(std::unique_ptr<OptimizedNetwork> net, const WorkloadFactories& workloadFactories); + + void EnqueueInput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo, + const WorkloadFactories& workloadFactories); + + void EnqueueOutput(const BindableLayer& layer, ITensorHandle* tensorHandle, + const TensorInfo& tensorInfo, const WorkloadFactories& workloadFactories); + + bool Execute(); + + void TidyWorkloadQueue(size_t numInputs, size_t numOutputs); + + const std::shared_ptr<IWorkloadFactory> GetWorkloadFactory(const Layer& layer, + const WorkloadFactories& workloadFactories) const; + + std::unique_ptr<OptimizedNetwork> m_OptimizedNetwork; + + std::vector< std::unique_ptr<IWorkload> > m_WorkloadQueue; +}; + +} diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp new file mode 100644 index 0000000000..4ee68b3c48 --- /dev/null +++ b/src/armnn/Network.cpp @@ -0,0 +1,335 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "Network.hpp" +#include "Graph.hpp" +#include "Layer.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" +#include "Layers.hpp" +#include "Optimizer.hpp" + +#include <armnn/Utils.hpp> + +#include <fcntl.h> +#include <algorithm> +#include <fstream> +#include <memory> + +#include <boost/assert.hpp> +#include <boost/format.hpp> +#include <boost/log/trivial.hpp> +#include <boost/numeric/conversion/converter_policies.hpp> +#include <boost/cast.hpp> + +namespace armnn +{ + +armnn::INetwork* INetwork::CreateRaw() +{ + return new Network(); +} + +armnn::INetworkPtr INetwork::Create() +{ + return INetworkPtr(CreateRaw(), &INetwork::Destroy); +} + +void INetwork::Destroy(INetwork* network) +{ + delete boost::polymorphic_downcast<Network*>(network); +} + +Status Network::PrintGraph() +{ + m_Graph->Print(); + return Status::Success; +} + +void IOptimizedNetwork::Destroy(IOptimizedNetwork* network) +{ + delete boost::polymorphic_downcast<OptimizedNetwork*>(network); +} + +Status OptimizedNetwork::PrintGraph() +{ + m_Graph->Print(); + return Status::Success; +} + +IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, const DeviceSpec& deviceSpec) +{ + const Network& network = *boost::polymorphic_downcast<const Network*>(&inNetwork); + std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph()); + + OptimizedNetwork* optNet = new OptimizedNetwork(std::move(graph)); + + Optimizer::Get().Optimize(optNet->GetGraph()); + + // Infer the tensor infos for all output slots. Throws an exception on failure. + optNet->GetGraph().InferTensorInfos(); + + // Assign a compute device for all nodes + for (auto&& layer : optNet->GetGraph()) + { + DataType dataType = layer->GetDataType(); + + // Default to the user-requested compute device from the Runtime + layer->SetComputeDevice(deviceSpec.DefaultComputeDevice); + + // If the layer is unsupported by this device, fall back to reference + std::string reasonIfUnsupported; + if (!IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported)) + { + BOOST_LOG_TRIVIAL(warning) << "Layer of type " << GetLayerTypeAsCString(layer->GetType()) << + " is not supported on requested backend " << layer->GetComputeDevice() << " (reason: " << + reasonIfUnsupported << "), falling back to CpuRef backend."; + layer->SetComputeDevice(Compute::CpuRef); + } + + BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(*layer, dataType, reasonIfUnsupported), + "Layer has no valid compute device"); + } + + optNet->GetGraph().AddCopyLayers(); + + return {optNet, &IOptimizedNetwork::Destroy}; +} + +Network::Network() +: m_Graph(std::make_unique<Graph>()) +{ +} + +Network::~Network() +{ +} + +IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name) +{ + return m_Graph->AddLayer<InputLayer>(id, name); +} + +IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor, + const ConstTensor& weights, + const ConstTensor* biases, + const char* name) +{ + if (fullyConnectedDescriptor.m_BiasEnabled && (biases == nullptr)) + { + throw InvalidArgumentException("AddFullyConnectedLayer: biases cannot be NULL"); + } + + const auto layer = m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name); + + layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights); + + if (fullyConnectedDescriptor.m_BiasEnabled) + { + layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases); + } + + return layer; +} + +IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor, + const ConstTensor& weights, + const char* name) +{ + return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, nullptr, name); +} + +IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor, + const ConstTensor& weights, + const ConstTensor& biases, + const char* name) +{ + return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, &biases, name); +} + +IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const ConstTensor* biases, + const char* name) +{ + if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr)) + { + throw InvalidArgumentException("AddConvolution2dLayer: biases cannot be NULL"); + } + + const auto layer = m_Graph->AddLayer<Convolution2dLayer>(convolution2dDescriptor, name); + + layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights); + + if (convolution2dDescriptor.m_BiasEnabled) + { + layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases); + } + + return layer; +} + +IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const char* name) +{ + return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name); +} +IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const ConstTensor& biases, + const char* name) +{ + return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name); +} + +IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl( + const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const ConstTensor* biases, + const char* name) +{ + if (convolution2dDescriptor.m_BiasEnabled && (biases == nullptr)) + { + throw InvalidArgumentException("AddDepthwiseConvolution2dLayer: biases cannot be NULL"); + } + + const auto layer = m_Graph->AddLayer<DepthwiseConvolution2dLayer>(convolution2dDescriptor, name); + + layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(weights); + + if (convolution2dDescriptor.m_BiasEnabled) + { + layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(*biases); + } + + return layer; +} + +IConnectableLayer* Network::AddDepthwiseConvolution2dLayer( + const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const char* name) +{ + return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, nullptr, name); +} +IConnectableLayer* Network::AddDepthwiseConvolution2dLayer( + const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const ConstTensor& biases, + const char* name) +{ + return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name); +} + +IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor, + const char* name) +{ + return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name); +} + +IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor, + const char* name) +{ + return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name); +} + +IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor, + const char* name) +{ + return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name); +} + +IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor, + const char* name) +{ + return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name); +} + +IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor, + const char* name) +{ + return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name); +} + +IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor, + const char* name) +{ + return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name); +} + +IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor, + const char* name) +{ + return m_Graph->AddLayer<MergerLayer>(mergerDescriptor, name); +} + +IConnectableLayer* Network::AddAdditionLayer(const char* name) +{ + return m_Graph->AddLayer<AdditionLayer>(name); +} + +IConnectableLayer* Network::AddMultiplicationLayer(const char* name) +{ + return m_Graph->AddLayer<MultiplicationLayer>(name); +} + +IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name) +{ + return m_Graph->AddLayer<OutputLayer>(id, name); +} + +IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc, + const ConstTensor& mean, + const ConstTensor& variance, + const ConstTensor& beta, + const ConstTensor& gamma, + const char* name) +{ + const auto layer = m_Graph->AddLayer<BatchNormalizationLayer>(desc, name); + + layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(mean); + layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(variance); + layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(beta); + layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(gamma); + + return layer; +} + +IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDescriptor, const char* name) +{ + return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name); +} + +IConnectableLayer* Network::AddL2NormalizationLayer(const char* name) +{ + return m_Graph->AddLayer<L2NormalizationLayer>(name); +} + +IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name) +{ + return m_Graph->AddLayer<ConstantLayer>(std::make_shared<ScopedCpuTensorHandle>(input), name); +} + +IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor, const char* name) +{ + return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name); +} + +IConnectableLayer* Network::AddFloorLayer(const char* name) +{ + return m_Graph->AddLayer<FloorLayer>(name); +} + +OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph) + : m_Graph(std::move(graph)) +{ +} + +OptimizedNetwork::~OptimizedNetwork() +{ +} + +} // namespace armnn + diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp new file mode 100644 index 0000000000..de0c1ecf2f --- /dev/null +++ b/src/armnn/Network.hpp @@ -0,0 +1,145 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/DescriptorsFwd.hpp> +#include <armnn/TensorFwd.hpp> +#include <armnn/Types.hpp> + +#include <armnn/INetwork.hpp> + +#include <string> +#include <vector> +#include <memory> + +#include "Layer.hpp" + +namespace armnn +{ +class Graph; + +/// Private implementation of INetwork +class Network final : public INetwork +{ +public: + Network(); + ~Network(); + + const Graph& GetGraph() const { return *m_Graph; } + + Status PrintGraph() override; + + IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name=nullptr) override; + + IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const char* name = nullptr) override; + + IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const ConstTensor& biases, + const char* name = nullptr) override; + + IConnectableLayer* AddDepthwiseConvolution2dLayer( + const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const char* name = nullptr) override; + + IConnectableLayer* AddDepthwiseConvolution2dLayer( + const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const ConstTensor& biases, + const char* name = nullptr) override; + + IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor, + const ConstTensor& weights, + const char* name = nullptr) override; + + IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor, + const ConstTensor& weights, + const ConstTensor& biases, + const char* name = nullptr) override; + + IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor, + const char* name = nullptr) override; + + IConnectableLayer* AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor, + const char* name = nullptr) override; + + IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor, + const char* name = nullptr) override; + + IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor, + const char* name = nullptr) override; + + IConnectableLayer* AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor, + const char* name = nullptr) override; + + IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor, + const char* name = nullptr) override; + + IConnectableLayer* AddMergerLayer(const OriginsDescriptor& mergerDescriptor, + const char* name = nullptr) override; + + IConnectableLayer* AddAdditionLayer(const char* name = nullptr) override; + + IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr) override; + + IConnectableLayer* AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc, + const ConstTensor& mean, + const ConstTensor& variance, + const ConstTensor& beta, + const ConstTensor& gamma, + const char* name = nullptr) override; + + IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc, + const char* name = nullptr) override; + + IConnectableLayer* AddL2NormalizationLayer(const char* name = nullptr) override; + + IConnectableLayer* AddConstantLayer(const ConstTensor& input, const char* name = nullptr) override; + + IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor, + const char* name = nullptr) override; + + IConnectableLayer* AddFloorLayer(const char* name = nullptr) override; + + IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr) override; + +private: + IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor, + const ConstTensor& weights, + const ConstTensor* biases, + const char* name); + + IConnectableLayer* AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const ConstTensor* biases, + const char* name); + + IConnectableLayer* AddDepthwiseConvolution2dLayerImpl( + const DepthwiseConvolution2dDescriptor& convolution2dDescriptor, + const ConstTensor& weights, + const ConstTensor* biases, + const char* name); + + std::unique_ptr<Graph> m_Graph; +}; + +class OptimizedNetwork final : public IOptimizedNetwork +{ +public: + OptimizedNetwork(std::unique_ptr<Graph> graph); + ~OptimizedNetwork(); + + Status PrintGraph() override; + + Graph& GetGraph() { return *m_Graph; } + +private: + std::unique_ptr<Graph> m_Graph; +}; + +} // namespace armnn diff --git a/src/armnn/Optimizer.cpp b/src/armnn/Optimizer.cpp new file mode 100644 index 0000000000..85b9f2803c --- /dev/null +++ b/src/armnn/Optimizer.cpp @@ -0,0 +1,55 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "Optimizer.hpp" +#include "optimizations/All.hpp" + +namespace armnn +{ + +const Optimizer& Optimizer::Get() +{ + // Add optimizations here + static optimizations::SquashEqualPermuteSiblings squashEqualPermuteSiblings; + static optimizations::SquashEqualReshapeSiblings squashEqualReshapeSiblings; + static optimizations::OptimizeInversePermutes optimizeInversePermutes; + static optimizations::MovePermuteUp movePermuteUp; + static optimizations::PermuteAsReshape permuteAsReshape; + static optimizations::OptimizeConsecutiveReshapes optimizeConsecutiveReshapes; + + // Set optimizations in desired order + static const Optimizer optimizer({ + &squashEqualPermuteSiblings, + &squashEqualReshapeSiblings, + &optimizeInversePermutes, + &movePermuteUp, + &permuteAsReshape, + &optimizeConsecutiveReshapes, + }); + + return optimizer; +} + +void Optimizer::Optimize(Graph& graph) const +{ + auto it = graph.TopologicalSort().end(); + // Call TopologicalSort() in every iteration to re-order the list in case layers where added/removed. + while (it != graph.TopologicalSort().begin()) + { + --it; + for (auto&& optimization : m_Optimizations) + { + optimization->Run(graph, it); + + if ((*it)->IsOutputUnconnected()) + { + it = graph.EraseLayer(it); + break; + } + } + } +} + + +} // namespace armnn diff --git a/src/armnn/Optimizer.hpp b/src/armnn/Optimizer.hpp new file mode 100644 index 0000000000..262f264c28 --- /dev/null +++ b/src/armnn/Optimizer.hpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <vector> + +namespace armnn +{ + +class Graph; +class Optimization; + +class Optimizer +{ +public: + static const Optimizer& Get(); + + void Optimize(Graph& graph) const; + +private: + ~Optimizer() = default; + + Optimizer(std::initializer_list<Optimization*> optimizations) : m_Optimizations(optimizations) {} + + std::vector<Optimization*> m_Optimizations; +}; + +} // namespace armnn diff --git a/src/armnn/Profiling.cpp b/src/armnn/Profiling.cpp new file mode 100644 index 0000000000..15a195e6bd --- /dev/null +++ b/src/armnn/Profiling.cpp @@ -0,0 +1,293 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "Profiling.hpp" + +#if ARMNN_PROFILING_ENABLED + +#if ARMNN_STREAMLINE_ENABLED +#include <streamline_annotate.h> +#endif + +#if ARMCOMPUTECL_ENABLED +#include <arm_compute/runtime/CL/CLFunctions.h> +#endif + +#include <algorithm> +#include <iomanip> +#include <iostream> +#include <map> +#include <stack> +#include <boost/algorithm/string.hpp> + +namespace armnn +{ + +// Controls the amount of memory initially allocated to store profiling events. +// If chosen carefully, the profiling system will not make any additional allocations, thus minimizing its impact on +// measured times. +constexpr std::size_t g_ProfilingEventCountHint = 1024; + +// Whether profiling reports should include the sequence of events together with their timings. +constexpr bool g_WriteProfilingEventSequence = true; + +// Whether profiling reports should also report detailed information on events grouped by tag. +// This is used to group stats per inference (see usage of ARMNN_UPDATE_PROFILING_EVENT_TAG in +// Runtime::EnqueueWorkload). This can spam the output stream, so use carefully (or adapt +// the code to just output information for a tag of interest). +constexpr bool g_AggregateProfilingEventsByTag = false; + +// Whether a call to Profiler::AnalyzeEventsAndWriteResults() will be made when the Profiler +// singleton is destroyed. It can be convenient for local tests. +constexpr bool g_WriteReportToStdOutOnProfilerDestruction = true; + +// Whether events denoting operations running on the GPU should force a sync before/after the event. +// This is hardcoded to true for now as the profiling timings are not very useful without it. +constexpr bool g_ProfilingForceGpuSync = true; + +std::map<std::string, Profiler::ProfilingEventStats> Profiler::CalculateProfilingEventStats() const +{ + std::map<std::string, ProfilingEventStats> nameToStatsMap; + + for (auto&& event : m_EventSequence) + { + auto mapIter = nameToStatsMap.find(event.m_Label); + if (mapIter != nameToStatsMap.end()) + { + ProfilingEventStats& stats = mapIter->second; + stats.m_TotalMs += event.DurationMs(); + stats.m_MinMs = std::min(stats.m_MinMs, event.DurationMs()); + stats.m_MaxMs = std::max(stats.m_MaxMs, event.DurationMs()); + ++stats.m_Count; + } + else + { + ProfilingEventStats stats; + stats.m_TotalMs = event.DurationMs(); + stats.m_MinMs = event.DurationMs(); + stats.m_MaxMs = event.DurationMs(); + stats.m_Count = 1; + + nameToStatsMap[event.m_Label] = stats; + } + } + + return nameToStatsMap; +} + +void Profiler::AnalyzeEventSequenceAndWriteResults(std::vector<ProfilingEvent>::const_iterator first, + std::vector<ProfilingEvent>::const_iterator last, + std::ostream& outStream) const +{ + // Output event sequence, if needed + if (g_WriteProfilingEventSequence) + { + // Make sure timestamps are output with 6 decimals, and save old settings + std::streamsize oldPrecision = outStream.precision(); + outStream.precision(6); + std::ios_base::fmtflags oldFlags = outStream.flags(); + outStream.setf(std::ios::fixed); + // Output fields + outStream << "Event Sequence - Name | Duration (ms) | Start (ms) | Stop (ms) | Device" << std::endl; + for (auto event = first; event != last; ++event) + { + std::chrono::duration<double, std::milli> startTimeMs = event->m_StartTime.time_since_epoch(); + std::chrono::duration<double, std::milli> stopTimeMs = event->m_StopTime.time_since_epoch(); + + outStream << std::setw(50) << event->m_Label << " " + << std::setw(20) << event->DurationMs() + << std::setw(20) << startTimeMs.count() + << std::setw(20) << stopTimeMs.count() + << std::setw(20) << Profiler::Get().GetEventComputeDevice(event->m_Device) + << std::endl; + } + outStream << std::endl; + // Restore previous precision settings + outStream.flags(oldFlags); + outStream.precision(oldPrecision); + } + + // Aggregate results per event name + std::map<std::string, ProfilingEventStats> nameToStatsMap = CalculateProfilingEventStats(); + + // Output aggregated stats + outStream << "Event Stats - Name | Avg (ms) | Min (ms) | Max (ms) | Total (ms) | Count" << std::endl; + for (const auto& pair : nameToStatsMap) + { + const std::string& eventLabel = pair.first; + const ProfilingEventStats& eventStats = pair.second; + const double avgMs = eventStats.m_TotalMs / double(eventStats.m_Count); + + outStream << "\t" << std::setw(50) << eventLabel << " " << std::setw(9) << avgMs << " " + << std::setw(9) << eventStats.m_MinMs << " " << std::setw(9) << eventStats.m_MaxMs << " " + << std::setw(9) << eventStats.m_TotalMs << " " << std::setw(9) << eventStats.m_Count << std::endl; + } + outStream << std::endl; +} + +Profiler Profiler::s_Instance; + +Profiler::Profiler() + : m_EventTag(0) + , m_NestingLevel(0) + , m_EventTagUpdated(false) +{ + m_EventSequence.reserve(g_ProfilingEventCountHint); + +#if ARMNN_STREAMLINE_ENABLED + // Initialise streamline annotations + ANNOTATE_SETUP; +#endif +} + +Profiler::~Profiler() +{ + if (g_WriteReportToStdOutOnProfilerDestruction) + { + AnalyzeEventsAndWriteResults(std::cout); + } +} + +void Profiler::BeginEvent(Compute compute, const std::string label) +{ + // We need to sync just before the begin event to not include time before the period we want to time. + WaitForDevice(compute); + + const TimePoint timeStamp = Clock::now(); + m_ObservedMarkers.emplace(Marker{m_EventSequence.size(), label, timeStamp, compute, m_EventTag}); + m_EventSequence.emplace_back(); + +#if ARMNN_STREAMLINE_ENABLED + ANNOTATE_CHANNEL_COLOR(m_NestingLevel, GetEventColor(compute), label.c_str()); +#endif + + m_NestingLevel++; +} + +void Profiler::EndEvent(Compute compute) +{ + // We need to sync just before the end event to include all the time of the timed period. + WaitForDevice(compute); + + const Marker& marker = m_ObservedMarkers.top(); + + const TimePoint startTime = marker.m_TimeStamp; + const TimePoint stopTime = Clock::now(); + + m_EventSequence[marker.m_Id] = {std::move(marker.m_EventName), + startTime, + stopTime, + marker.m_ComputeDevice, + marker.m_Tag}; + + m_ObservedMarkers.pop(); + +#if ARMNN_STREAMLINE_ENABLED + ANNOTATE_CHANNEL_END(m_NestingLevel); +#endif + + m_NestingLevel--; +} + +void Profiler::AnalyzeEventsAndWriteResults(std::ostream& outStream) const +{ + // Stack should be empty now. + const bool saneMarkerSequence = m_ObservedMarkers.empty(); + + // Abort if the sequence of markers was found to have incorrect information: + // The stats cannot be trusted. + if (!saneMarkerSequence) + { + outStream << "Cannot write profiling stats. " + "Unexpected errors were found when analyzing the sequence of logged events, which may lead to plainly " + "wrong stats. The profiling system may contain implementation issues or could have been used in an " + "unsafe manner." << std::endl; + return; + } + + // Analyze the full sequence of events + AnalyzeEventSequenceAndWriteResults(m_EventSequence.begin(), m_EventSequence.end(), outStream); + + // Aggregate events by tag if requested (spams the output stream if done for all tags) + if (m_EventTagUpdated && g_AggregateProfilingEventsByTag) + { + outStream << std::endl; + outStream << "***" << std::endl; + outStream << "*** Per Tag Stats" << std::endl; + outStream << "***" << std::endl; + outStream << std::endl; + + for (auto iter = m_EventSequence.begin(); iter != m_EventSequence.end();) + { + const uint32_t tag = iter->m_Tag; + + // Advance iter until we find the first non-matching tag + auto tagEndIter = iter; + for (; tagEndIter != m_EventSequence.end(); ++tagEndIter) + { + if (tagEndIter->m_Tag != tag) + { + break; + } + } + + outStream << "> Begin Tag: " << tag << std::endl; + outStream << std::endl; + AnalyzeEventSequenceAndWriteResults(iter, tagEndIter, outStream); + outStream << std::endl; + outStream << "> End Tag: " << tag << std::endl; + + iter = tagEndIter; + } + } +} + +void Profiler::WaitForDevice(Compute compute) const +{ +#if ARMCOMPUTECL_ENABLED + if(compute == Compute::GpuAcc && g_ProfilingForceGpuSync) + { + arm_compute::CLScheduler::get().sync(); + } +#endif +} + +const char* Profiler::GetEventComputeDevice(Compute compute) const +{ + switch(compute) + { + case Compute::CpuRef: + return "CpuRef"; + case Compute::CpuAcc: + return "CpuAcc"; + case Compute::GpuAcc: + return "GpuAcc"; + default: + return "Undefined"; + } +} + +std::uint32_t Profiler::GetEventColor(Compute compute) const +{ + switch(compute) + { + case Compute::CpuRef: + // Cyan + return 0xffff001b; + case Compute::CpuAcc: + // Green + return 0x00ff001b; + case Compute::GpuAcc: + // Purple + return 0xff007f1b; + default: + // Dark gray + return 0x5555551b; + } +} + +} // namespace armnn + +#endif // ARMNN_PROFILING_ENABLED + diff --git a/src/armnn/Profiling.hpp b/src/armnn/Profiling.hpp new file mode 100644 index 0000000000..88a7adff7c --- /dev/null +++ b/src/armnn/Profiling.hpp @@ -0,0 +1,159 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#if ARMNN_PROFILING_ENABLED + +#include "armnn/ArmNN.hpp" + +#include <chrono> +#include <iosfwd> +#include <ctime> +#include <vector> +#include <stack> +#include <map> + +namespace armnn +{ + +// Clock class that uses the same timestamp function as the Mali DDK +class monotonic_clock { +public: + using duration = std::chrono::nanoseconds; + using time_point = std::chrono::time_point<monotonic_clock, duration>; + + static std::chrono::time_point<monotonic_clock, std::chrono::nanoseconds> now() noexcept + { + timespec ts; +#if defined(CLOCK_MONOTONIC_RAW) + clock_gettime(CLOCK_MONOTONIC_RAW, &ts); +#else + clock_gettime(CLOCK_MONOTONIC, &ts); +#endif + return time_point(std::chrono::nanoseconds(ts.tv_sec*1000000000 + ts.tv_nsec)); + } +}; + +// Simple single-threaded profiler. +// Tracks events reported by BeginEvent()/EndEvent() and outputs detailed information and stats when +// Profiler::AnalyzeEventsAndWriteResults() is called. +class Profiler +{ +public: + // Marks the beginning of a user-defined event. + // No attempt will be made to copy the name string: It must be known at compile time. + void BeginEvent(Compute compute, const std::string name); + + // Marks the end of a user-defined event. + void EndEvent(Compute compute); + + // Increments the event tag, allowing grouping of events in a user-defined manner (e.g. per inference). + void UpdateEventTag() { ++m_EventTag; m_EventTagUpdated = true; } + + // Analyzes the tracked events and writes the results to the given output stream. + // Please refer to the configuration variables in Profiling.cpp to customize the information written. + void AnalyzeEventsAndWriteResults(std::ostream& outStream) const; + + // Accesses the singleton + static Profiler& Get() { return s_Instance; } + + // Gets a string name for a given Compute device enum + const char* GetEventComputeDevice(Compute compute) const; + + // Gets the color to render an event with, based on which device it denotes + std::uint32_t GetEventColor(Compute compute) const; + + typedef monotonic_clock Clock; + typedef std::chrono::time_point<Clock> TimePoint; + +private: + + struct Marker + { + std::size_t m_Id; + const std::string m_EventName; + TimePoint m_TimeStamp; + Compute m_ComputeDevice; + std::uint32_t m_Tag; + }; + + struct ProfilingEvent + { + std::string m_Label; + TimePoint m_StartTime; + TimePoint m_StopTime; + Compute m_Device; + std::uint32_t m_Tag; + + double DurationMs() const + { + return std::chrono::duration<double>(m_StopTime - m_StartTime).count()*1000.0; + } + }; + + struct ProfilingEventStats + { + double m_TotalMs; + double m_MinMs; + double m_MaxMs; + std::uint32_t m_Count; + }; + + Profiler(); + ~Profiler(); + + // Waits for a compute device to finish working to guarantee correct timings. + // Currently used exclusively when emitting profiling events denoting GPU work. + void WaitForDevice(Compute compute) const; + + void AnalyzeEventSequenceAndWriteResults(std::vector<ProfilingEvent>::const_iterator first, + std::vector<ProfilingEvent>::const_iterator last, + std::ostream& outStream) const; + + std::map<std::string, ProfilingEventStats> CalculateProfilingEventStats() const; + + std::stack<Marker> m_ObservedMarkers; + std::vector<ProfilingEvent> m_EventSequence; + std::uint32_t m_EventTag; + std::uint32_t m_NestingLevel; + bool m_EventTagUpdated; + + static Profiler s_Instance; +}; + +// Helper to easily add event markers to the codebase +class ScopedProfilingEvent +{ +public: + ScopedProfilingEvent(Compute compute, const std::string name) + : m_Compute(compute) + { + Profiler::Get().BeginEvent(compute, name); + } + + ~ScopedProfilingEvent() + { + Profiler::Get().EndEvent(m_Compute); + } + +private: + armnn::Compute m_Compute; +}; + +} // namespace armnn + +// Allows grouping events in an user-defined manner (e.g. per inference) +#define ARMNN_UPDATE_PROFILING_EVENT_TAG() armnn::Profiler::Get().UpdateEventTag(); + +// The event name must be known at compile time +#define ARMNN_SCOPED_PROFILING_EVENT(compute, name) armnn::ScopedProfilingEvent e_##__FILE__##__LINE__(compute, name); + +#else + +#define ARMNN_UPDATE_PROFILING_EVENT_TAG() +#define ARMNN_SCOPED_PROFILING_EVENT(compute, name) + +#endif // ARMNN_PROFILING_ENABLED + diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp new file mode 100644 index 0000000000..ea6d19bd31 --- /dev/null +++ b/src/armnn/Runtime.cpp @@ -0,0 +1,118 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "Runtime.hpp" + +#include "armnn/Version.hpp" + +#ifdef ARMCOMPUTECL_ENABLED +#include <arm_compute/core/CL/OpenCL.h> +#include <arm_compute/core/CL/CLKernelLibrary.h> +#endif + +#include <boost/log/trivial.hpp> +#include <boost/polymorphic_cast.hpp> + +using namespace armnn; +using namespace std; + +namespace armnn +{ + +IRuntime* IRuntime::CreateRaw(const CreationOptions& options) +{ + return new Runtime(options); +} + +IRuntimePtr IRuntime::Create(const CreationOptions& options) +{ + return IRuntimePtr(CreateRaw(options), &IRuntime::Destroy); +} + +void IRuntime::Destroy(IRuntime* runtime) +{ + delete boost::polymorphic_downcast<Runtime*>(runtime); +} + +int Runtime::GenerateNetworkId() +{ + return m_NetworkIdCounter++; +} + +Status Runtime::LoadNetwork(NetworkId& networkIdOut, IOptimizedNetworkPtr inNetwork) +{ + IOptimizedNetwork* rawNetwork = inNetwork.release(); + unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork( + std::unique_ptr<OptimizedNetwork>(boost::polymorphic_downcast<OptimizedNetwork*>(rawNetwork)), + m_WorkloadFactories); + + if (!loadedNetwork) + { + return Status::Failure; + } + + networkIdOut = GenerateNetworkId(); + + // store the network + m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork); + + return Status::Success; + +} + +Status Runtime::UnloadNetwork(NetworkId networkId) +{ + if (m_LoadedNetworks.erase(networkId) == 0) + { + BOOST_LOG_TRIVIAL(warning) << "WARNING: Runtime::UnloadNetwork(): " << networkId << " not found!"; + return Status::Failure; + } +#ifdef ARMCOMPUTECL_ENABLED + arm_compute::CLKernelLibrary::get().clear_programs_cache(); +#endif + BOOST_LOG_TRIVIAL(debug) << "Runtime::UnloadNetwork(): Unloaded network with ID: " << networkId; + return Status::Success; +} + +Runtime::Runtime(const CreationOptions& options) +: m_NetworkIdCounter(0) +{ + BOOST_LOG_TRIVIAL(info) << "ArmNN v" << ARMNN_VERSION << "\n"; + BOOST_LOG_TRIVIAL(info) << "Using compute device: " << options.m_DefaultComputeDevice << "\n"; + m_DeviceSpec.DefaultComputeDevice = options.m_DefaultComputeDevice; + + // If useCpuRefAsFallback is false, the reference workload factory will be prevented from creating + // operation workloads, unless the default compute device is precisely the reference backend. + m_WorkloadFactories.m_CpuRef = make_shared<RefWorkloadFactory>( + options.m_DefaultComputeDevice == Compute::CpuRef ? true : options.m_UseCpuRefAsFallback); + m_WorkloadFactories.m_CpuAcc = make_shared<NeonWorkloadFactory>(); + m_WorkloadFactories.m_GpuAcc = make_shared<ClWorkloadFactory>(); + + if (options.m_DefaultComputeDevice == Compute::GpuAcc) + { + m_WorkloadFactories.m_GpuAcc.get()->LoadOpenClRuntime(options.m_ClTunedParameters); + } +} + +TensorInfo Runtime::GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const +{ + LoadedNetwork* net = m_LoadedNetworks.at(networkId).get(); + return net->GetInputTensorInfo(layerId); +} + +TensorInfo Runtime::GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const +{ + const LoadedNetwork* net = m_LoadedNetworks.at(networkId).get(); + return net->GetOutputTensorInfo(layerId); +} + +Status Runtime::EnqueueWorkload(NetworkId networkId, + const InputTensors& inputTensors, + const OutputTensors& outputTensors) +{ + LoadedNetwork* loadedNetwork = m_LoadedNetworks.at(networkId).get(); + return loadedNetwork->EnqueueWorkload(inputTensors, outputTensors, m_WorkloadFactories); +} + +} diff --git a/src/armnn/Runtime.hpp b/src/armnn/Runtime.hpp new file mode 100644 index 0000000000..d3f3a578f3 --- /dev/null +++ b/src/armnn/Runtime.hpp @@ -0,0 +1,73 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "LoadedNetwork.hpp" +#include "armnn/INetwork.hpp" +#include "armnn/IRuntime.hpp" +#include "armnn/Tensor.hpp" +#include "backends/RefWorkloadFactory.hpp" +#include "backends/NeonWorkloadFactory.hpp" +#include "backends/ClWorkloadFactory.hpp" + +#include <unordered_map> + +namespace armnn +{ + +struct WorkloadFactories +{ + std::shared_ptr<RefWorkloadFactory> m_CpuRef; + std::shared_ptr<NeonWorkloadFactory> m_CpuAcc; + std::shared_ptr<ClWorkloadFactory> m_GpuAcc; +}; + +class Runtime final : public IRuntime +{ +public: + /// Load a complete network into the Runtime. + /// @param [out] networkIdOut Unique identifier for the network is returned in this reference. + /// @param [in] network Complete network to load into the Runtime. + /// The runtime takes ownership of the network once passed in. + /// @return armnn::Status + virtual Status LoadNetwork(NetworkId& networkIdOut, IOptimizedNetworkPtr network) override; + + virtual TensorInfo GetInputTensorInfo(NetworkId networkId, LayerBindingId layerId) const override; + virtual TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const override; + + // Evaluate network using input in inputTensors, outputs filled into outputTensors + virtual Status EnqueueWorkload(NetworkId networkId, + const InputTensors& inputTensors, + const OutputTensors& outputTensors) override; + + /// Unload a network from the Runtime. + /// At the moment this only removes the network from the m_Impl->m_Network. + /// This might need more work in the future to be AndroidNN compliant. + /// @param [in] networkId Unique identifier for the network to be unloaded. Generated in LoadNetwork(). + /// @return armnn::Status + virtual Status UnloadNetwork(NetworkId networkId) override; + + virtual const DeviceSpec& GetDeviceSpec() const override { return m_DeviceSpec; } + + /// Creates a runtime for workload execution. + /// May throw a ClRuntimeUnavailableException if @a defaultComputeDevice requires a CL runtime but + /// it cannot be setup for some reason. + Runtime(const CreationOptions& options); + +private: + friend void RuntimeLoadedNetworksReserve(armnn::Runtime* runtime); // see RuntimeTests.cpp + + int GenerateNetworkId(); + + std::unordered_map<NetworkId, std::unique_ptr<LoadedNetwork>> m_LoadedNetworks; + + WorkloadFactories m_WorkloadFactories; + + int m_NetworkIdCounter; + + DeviceSpec m_DeviceSpec; +}; + +} diff --git a/src/armnn/Tensor.cpp b/src/armnn/Tensor.cpp new file mode 100644 index 0000000000..2e04c8c617 --- /dev/null +++ b/src/armnn/Tensor.cpp @@ -0,0 +1,187 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "armnn/Tensor.hpp" +#include "armnn/Utils.hpp" +#include "armnn/Exceptions.hpp" +#include "armnn/TypesUtils.hpp" + +#include <boost/assert.hpp> +#include <boost/log/trivial.hpp> +#include <boost/numeric/conversion/cast.hpp> + +namespace armnn +{ + +// --- +// --- TensorShape +// --- + +TensorShape::TensorShape() + : m_NumDimensions(0) +{ +} + +TensorShape::TensorShape(const unsigned int numDimensions, const unsigned int* const dimensionSizes) + : m_NumDimensions(numDimensions) +{ + if (numDimensions < 1) + { + throw InvalidArgumentException("Tensor numDimensions must be greater than 0"); + } + + if (numDimensions > MaxNumOfTensorDimensions) + { + throw InvalidArgumentException("Tensor numDimensions must be less than or equal to MaxNumOfTensorDimensions"); + } + + if (dimensionSizes == nullptr) + { + throw InvalidArgumentException("Tensor dimensionSizes must not be NULL"); + } + + std::copy(dimensionSizes, dimensionSizes + numDimensions, m_Dimensions.begin()); +} + +TensorShape::TensorShape(std::initializer_list<unsigned int> dimensionSizeList) + : TensorShape(boost::numeric_cast<unsigned int>(dimensionSizeList.size()), dimensionSizeList.begin()) +{ +} + +TensorShape::TensorShape(const TensorShape& other) + : m_NumDimensions(other.m_NumDimensions) +{ + std::copy(other.m_Dimensions.cbegin(), other.m_Dimensions.cbegin() + other.m_NumDimensions, m_Dimensions.begin()); +} + +TensorShape& TensorShape::operator =(const TensorShape& other) +{ + m_NumDimensions = other.m_NumDimensions; + std::copy(other.m_Dimensions.cbegin(), other.m_Dimensions.cbegin() + other.m_NumDimensions, m_Dimensions.begin()); + return *this; +} + +bool TensorShape::operator==(const TensorShape& other) const +{ + return ((m_NumDimensions == other.m_NumDimensions) && + std::equal(m_Dimensions.cbegin(), m_Dimensions.cbegin() + m_NumDimensions, other.m_Dimensions.cbegin())); +} + +bool TensorShape::operator!=(const TensorShape& other) const +{ + return !(*this == other); +} + +unsigned int TensorShape::GetNumElements() const +{ + if (m_NumDimensions == 0) + { + return 0; + } + + unsigned int count = 1; + for (unsigned int i = 0; i < m_NumDimensions; i++) + { + count *= m_Dimensions[i]; + } + + return count; +} + +// --- +// --- TensorInfo +// --- + +TensorInfo::TensorInfo() +: m_DataType(DataType::Float32) +{ +} + +TensorInfo::TensorInfo(const TensorShape& shape, DataType dataType, + float quantizationScale, int32_t quantizationOffset) + : m_Shape(shape) + , m_DataType(dataType) +{ + m_Quantization.m_Scale = quantizationScale; + m_Quantization.m_Offset = quantizationOffset; +} + +TensorInfo::TensorInfo(unsigned int numDimensions, const unsigned int* dimensionSizes, DataType dataType, + float quantizationScale, int32_t quantizationOffset) + : m_Shape(numDimensions, dimensionSizes) + , m_DataType(dataType) +{ + m_Quantization.m_Scale = quantizationScale; + m_Quantization.m_Offset = quantizationOffset; +} + +TensorInfo::TensorInfo(const TensorInfo& other) +: m_Shape(other.m_Shape) +, m_DataType(other.m_DataType) +, m_Quantization(other.m_Quantization) +{ +} + +TensorInfo& TensorInfo::operator=(const TensorInfo& other) +{ + m_Shape = other.m_Shape; + m_DataType = other.m_DataType; + m_Quantization = other.m_Quantization; + return *this; +} + +bool TensorInfo::operator==(const TensorInfo& other) const +{ + return ((m_Shape == other.m_Shape) && + (m_DataType == other.m_DataType) && + (m_Quantization == other.m_Quantization)); +} + +bool TensorInfo::operator!=(const TensorInfo& other) const +{ + return !(*this == other); +} + +unsigned int TensorInfo::GetNumBytes() const +{ + return GetDataTypeSize(m_DataType) * GetNumElements(); +} + +// --- +// --- BaseTensor +// --- + +template<typename MemoryType> +BaseTensor<MemoryType>::BaseTensor() + : m_MemoryArea(nullptr) +{ +} + +template<typename MemoryType> +BaseTensor<MemoryType>::BaseTensor(const TensorInfo& info, MemoryType memoryArea) + : m_MemoryArea(memoryArea) + , m_Info(info) +{ +} + +template<typename MemoryType> +BaseTensor<MemoryType>::BaseTensor(const BaseTensor<MemoryType>& other) + : m_MemoryArea(other.m_MemoryArea) + , m_Info(other.GetInfo()) +{ +} + +template<typename MemoryType> +BaseTensor<MemoryType>& BaseTensor<MemoryType>::operator =(const BaseTensor<MemoryType>& other) +{ + m_Info = other.m_Info; + m_MemoryArea = other.m_MemoryArea; + return *this; +} + +// Explicit instantiations +template class BaseTensor<const void*>; +template class BaseTensor<void*>; + +} // namespace armnn diff --git a/src/armnn/Utils.cpp b/src/armnn/Utils.cpp new file mode 100644 index 0000000000..fb8f4d6f72 --- /dev/null +++ b/src/armnn/Utils.cpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "armnn/Utils.hpp" +#include "Logging.hpp" + +#include <boost/log/core.hpp> + +namespace armnn +{ + +void ConfigureLogging(bool printToStandardOutput, bool printToDebugOutput, LogSeverity severity) +{ + armnnUtils::ConfigureLogging(boost::log::core::get().get(), printToStandardOutput, printToDebugOutput, severity); +} + +// Default to logging completely disabled. +// The user of the library must enable it if they want by calling armnn::ConfigureLogging(). +struct DefaultLoggingConfiguration +{ + DefaultLoggingConfiguration() + { + ConfigureLogging(false, false, LogSeverity::Trace); + } +}; + +static DefaultLoggingConfiguration g_DefaultLoggingConfiguration; + +}
\ No newline at end of file diff --git a/src/armnn/backends/ArmComputeTensorUtils.cpp b/src/armnn/backends/ArmComputeTensorUtils.cpp new file mode 100644 index 0000000000..9f21c41a2f --- /dev/null +++ b/src/armnn/backends/ArmComputeTensorUtils.cpp @@ -0,0 +1,131 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "ArmComputeTensorUtils.hpp" +#include "ArmComputeUtils.hpp" + +#include <armnn/Descriptors.hpp> + +namespace armnn +{ +namespace armcomputetensorutils +{ + +arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType) +{ + switch(dataType) + { + case armnn::DataType::Float32: + { + return arm_compute::DataType::F32; + } + case armnn::DataType::QuantisedAsymm8: + { + return arm_compute::DataType::QASYMM8; + } + case armnn::DataType::Signed32: + { + return arm_compute::DataType::S32; + } + default: + { + BOOST_ASSERT_MSG(false, "Unknown data type"); + return arm_compute::DataType::UNKNOWN; + } + } +} + +arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape) +{ + arm_compute::TensorShape shape; + + // armnn tensors are (batch, channels, height, width) + // arm_compute tensors are (width, height, channels, batch) + for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); i++) + { + // note that our dimensions are stored in the opposite order to ACL's + shape.set(tensorShape.GetNumDimensions() - i - 1, tensorShape[i]); + + // TensorShape::set() flattens leading ones, so that batch size 1 cannot happen. + // arm_compute tensors expect this + } + + // prevent arm_compute issue where tensor is flattened to nothing + if (shape.num_dimensions() == 0) + { + shape.set_num_dimensions(1); + } + + return shape; +} + +// Utility function used to build a TensorInfo object, that can be used to initialise +// ARM Compute Tensor and CLTensor allocators. +arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo) +{ + const arm_compute::TensorShape aclTensorShape = BuildArmComputeTensorShape(tensorInfo.GetShape()); + const arm_compute::DataType aclDataType = GetArmComputeDataType(tensorInfo.GetDataType()); + const arm_compute::QuantizationInfo aclQuantizationInfo(tensorInfo.GetQuantizationScale(), + tensorInfo.GetQuantizationOffset()); + + return arm_compute::TensorInfo(aclTensorShape, 1, aclDataType, aclQuantizationInfo); +} + +arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor) +{ + using arm_compute::PoolingType; + using arm_compute::DimensionRoundingType; + using arm_compute::PadStrideInfo; + using arm_compute::PoolingLayerInfo; + + // Resolve ARM Compute layer parameters + const PoolingType poolingType = ConvertPoolingAlgorithmToAclPoolingType(descriptor.m_PoolType); + const DimensionRoundingType rounding = ConvertOutputShapeRoundingToAclDimensionRoundingType( + descriptor.m_OutputShapeRounding); + + const PadStrideInfo padStrideInfo(descriptor.m_StrideX, + descriptor.m_StrideY, + descriptor.m_PadLeft, + descriptor.m_PadRight, + descriptor.m_PadTop, + descriptor.m_PadBottom, + rounding); + + const bool excludePadding = (descriptor.m_PaddingMethod == PaddingMethod::Exclude); + + return arm_compute::PoolingLayerInfo(poolingType, descriptor.m_PoolWidth, padStrideInfo, excludePadding); +} + +arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& descriptor) +{ + const arm_compute::NormType normType = + ConvertNormalizationAlgorithmChannelToAclNormType(descriptor.m_NormChannelType); + return arm_compute::NormalizationLayerInfo(normType, + descriptor.m_NormSize, + descriptor.m_Alpha, + descriptor.m_Beta, + descriptor.m_K, + false); +} + +arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& perm) +{ + arm_compute::PermutationVector aclPerm; + + unsigned int start = 0; + while ((start == perm[start]) && (start < perm.GetSize())) + { + ++start; + } + + for (unsigned int i = start; i < perm.GetSize(); ++i) + { + aclPerm.set(i - start, perm[i] - start); + } + + return aclPerm; +} + +} // namespace armcomputetensorutils +} // namespace armnn diff --git a/src/armnn/backends/ArmComputeTensorUtils.hpp b/src/armnn/backends/ArmComputeTensorUtils.hpp new file mode 100644 index 0000000000..9a13caf495 --- /dev/null +++ b/src/armnn/backends/ArmComputeTensorUtils.hpp @@ -0,0 +1,146 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/Tensor.hpp> +#include <armnn/DescriptorsFwd.hpp> + +#include <arm_compute/core/ITensor.h> +#include <arm_compute/core/TensorInfo.h> + +#include <boost/cast.hpp> + +namespace armnn +{ +class ITensorHandle; + +namespace armcomputetensorutils +{ + +/// Utility function to map an armnn::DataType to corresponding arm_compute::DataType +arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType); + +/// Utility function used to setup an arm_compute::TensorShape object from an armnn::TensorShape +arm_compute::TensorShape BuildArmComputeTensorShape(const armnn::TensorShape& tensorShape); + +/// Utility function used to setup an arm_compute::ITensorInfo object whose dimensions are based on the given +/// armnn::ITensorInfo +arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo); + +/// Utility function used to setup an arm_compute::PoolingLayerInfo object from an armnn::Pooling2dDescriptor +arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(const Pooling2dDescriptor& descriptor); + +/// Utility function to setup an arm_compute::NormalizationLayerInfo object from an armnn::NormalizationDescriptor +arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(const NormalizationDescriptor& desc); + +/// Utility function used to setup an arm_compute::PermutationVector object from an armnn::PermutationVector +arm_compute::PermutationVector BuildArmComputePermutationVector(const armnn::PermutationVector& vector); + +/// Sets up the given ArmCompute tensor's dimensions based on the given ArmNN tensor. +template <typename Tensor> +void BuildArmComputeTensor(Tensor& tensor, const armnn::TensorInfo& tensorInfo) +{ + tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo)); +} + +template <typename Tensor> +void InitialiseArmComputeTensorEmpty(Tensor& tensor) +{ + tensor.allocator()->allocate(); +} + +// Helper function to obtain byte offset into tensor data +inline size_t GetTensorOffset(const arm_compute::ITensorInfo& info, + uint32_t batchIndex, + uint32_t channelIndex, + uint32_t y, + uint32_t x) +{ + arm_compute::Coordinates coords; + coords.set(3, boost::numeric_cast<int>(batchIndex)); + coords.set(2, boost::numeric_cast<int>(channelIndex)); + coords.set(1, boost::numeric_cast<int>(y)); + coords.set(0, boost::numeric_cast<int>(x)); + return info.offset_element_in_bytes(coords); +} + +// Helper function to obtain element offset into data buffer representing tensor data (assuming no strides) +inline size_t GetLinearBufferOffset(const arm_compute::ITensorInfo& info, + uint32_t batchIndex, + uint32_t channelIndex, + uint32_t y, + uint32_t x) +{ + const arm_compute::TensorShape& shape = info.tensor_shape(); + uint32_t width = boost::numeric_cast<uint32_t>(shape[0]); + uint32_t height = boost::numeric_cast<uint32_t>(shape[1]); + uint32_t numChannels = boost::numeric_cast<uint32_t>(shape[2]); + return ((batchIndex * numChannels + channelIndex) * height + y) * width + x; +} + +template <typename T> +void CopyArmComputeITensorData(const arm_compute::ITensor& srcTensor, T* dstData) +{ + // if MaxNumOfTensorDimensions is increased, this loop will need fixing + static_assert(MaxNumOfTensorDimensions == 4, "Please update CopyArmComputeITensorData"); + { + const arm_compute::ITensorInfo& info = *srcTensor.info(); + const arm_compute::TensorShape& shape = info.tensor_shape(); + const uint8_t* const bufferPtr = srcTensor.buffer(); + uint32_t width = boost::numeric_cast<uint32_t>(shape[0]); + uint32_t height = boost::numeric_cast<uint32_t>(shape[1]); + uint32_t numChannels = boost::numeric_cast<uint32_t>(shape[2]); + uint32_t numBatches = boost::numeric_cast<uint32_t>(shape[3]); + + for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex) + { + for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex) + { + for (unsigned int y = 0; y < height; ++y) + { + // Copy one row from arm_compute tensor buffer to linear memory buffer + // A row is the largest contiguous region we can copy, as the tensor data may be using strides + memcpy(dstData + GetLinearBufferOffset(info, batchIndex, channelIndex, y, 0), + bufferPtr + GetTensorOffset(info, batchIndex, channelIndex, y, 0), + width * sizeof(T)); + } + } + } + } +} + +template <typename T> +void CopyArmComputeITensorData(const T* srcData, arm_compute::ITensor& dstTensor) +{ + // if MaxNumOfTensorDimensions is increased, this loop will need fixing + static_assert(MaxNumOfTensorDimensions == 4, "Please update CopyArmComputeITensorData"); + { + const arm_compute::ITensorInfo& info = *dstTensor.info(); + const arm_compute::TensorShape& shape = info.tensor_shape(); + uint8_t* const bufferPtr = dstTensor.buffer(); + uint32_t width = boost::numeric_cast<uint32_t>(shape[0]); + uint32_t height = boost::numeric_cast<uint32_t>(shape[1]); + uint32_t numChannels = boost::numeric_cast<uint32_t>(shape[2]); + uint32_t numBatches = boost::numeric_cast<uint32_t>(shape[3]); + + for (unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex) + { + for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex) + { + for (unsigned int y = 0; y < height; ++y) + { + // Copy one row from linear memory buffer to arm_compute tensor buffer + // A row is the largest contiguous region we can copy, as the tensor data may be using strides + memcpy(bufferPtr + GetTensorOffset(info, batchIndex, channelIndex, y, 0), + srcData + GetLinearBufferOffset(info, batchIndex, channelIndex, y, 0), + width * sizeof(T)); + } + } + } + } +} + +} // namespace armcomputetensorutils +} // namespace armnn diff --git a/src/armnn/backends/ArmComputeUtils.hpp b/src/armnn/backends/ArmComputeUtils.hpp new file mode 100644 index 0000000000..c451e6434b --- /dev/null +++ b/src/armnn/backends/ArmComputeUtils.hpp @@ -0,0 +1,117 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#if ARMCOMPUTENEON_ENABLED || ARMCOMPUTECL_ENABLED + +#include <armnn/Tensor.hpp> +#include <armnn/Descriptors.hpp> + +#include <arm_compute/core/Types.h> + +namespace armnn +{ + +inline arm_compute::NormalizationLayerInfo +CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo& tensorInfo) +{ + const unsigned int depth = tensorInfo.GetShape()[1]; + + // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of + // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose + // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization + // parameters. + // + // Please refer to both the reference implementation of the normalization layer and the implementation of + // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below. + + // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd. + // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in + // ACL's normalization_layer_cross_map() CL function. + const uint32_t normSize = depth * 2u + 1u; + + // See ACL's NormalizationLayerInfo::scale_coeff() definition. + // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead. + const float alpha = 1.0f; + + // Don't offset the reduction + const float kappa = 0.0f; + + // pow(reduction, -0.5) = 1 / sqrt(reduction) + const float beta = 0.5f; + + return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false); +} + +inline arm_compute::ActivationLayerInfo::ActivationFunction +ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction) +{ + using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction; + + switch (armnnFunction) + { + case ActivationFunction::Linear: return AclActivationFunction::LINEAR; + // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function + case ActivationFunction::Sigmoid: return AclActivationFunction::LOGISTIC; + case ActivationFunction::ReLu: return AclActivationFunction::RELU; + case ActivationFunction::BoundedReLu: return AclActivationFunction::LU_BOUNDED_RELU; + case ActivationFunction::SoftReLu: return AclActivationFunction::SOFT_RELU; + case ActivationFunction::LeakyReLu: return AclActivationFunction::LEAKY_RELU; + case ActivationFunction::Abs: return AclActivationFunction::ABS; + case ActivationFunction::Sqrt: return AclActivationFunction::SQRT; + case ActivationFunction::Square: return AclActivationFunction::SQUARE; + case ActivationFunction::TanH: return AclActivationFunction::TANH; + default: throw InvalidArgumentException("Unsupported activation function"); + } +} + +inline arm_compute::ActivationLayerInfo +ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor& actDesc) +{ + return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function), + actDesc.m_A, actDesc.m_B); +} + +inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm) +{ + using arm_compute::PoolingType; + + switch (poolingAlgorithm) + { + case PoolingAlgorithm::Max: return PoolingType::MAX; + case PoolingAlgorithm::Average: return PoolingType::AVG; + case PoolingAlgorithm::L2: return PoolingType::L2; + default: throw InvalidArgumentException("Unsupported pooling algorithm"); + } +} + +inline arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding + rounding) +{ + using arm_compute::DimensionRoundingType; + + switch (rounding) + { + case OutputShapeRounding::Ceiling: return DimensionRoundingType::CEIL; + case OutputShapeRounding::Floor: return DimensionRoundingType::FLOOR; + default: throw InvalidArgumentException("Unsupported Output Shape Rounding type"); + } +} + +inline arm_compute::NormType +ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType) +{ + using arm_compute::NormType; + switch (channelType) + { + case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP; + case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D; + default: throw InvalidArgumentException("Unsupported normalization algorithm channel type"); + } +} + +} + +#endif // ARMCOMPUTENEON_ENABLED || ARMCOMPUTECL_ENABLED diff --git a/src/armnn/backends/ClLayerSupport.cpp b/src/armnn/backends/ClLayerSupport.cpp new file mode 100644 index 0000000000..5f0e4ea622 --- /dev/null +++ b/src/armnn/backends/ClLayerSupport.cpp @@ -0,0 +1,405 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "LayerSupportCommon.hpp" + +#include "ClLayerSupport.hpp" +#include "InternalTypes.hpp" + +#include <armnn/Descriptors.hpp> +#include <armnn/Types.hpp> +#include <armnn/Tensor.hpp> + +#include <boost/core/ignore_unused.hpp> + +#ifdef ARMCOMPUTECL_ENABLED +#include "ClWorkloads/ClAdditionFloat32Workload.hpp" +#include "ClWorkloads/ClPooling2dBaseWorkload.hpp" +#include "ClWorkloads/ClPermuteWorkload.hpp" +#include "ClWorkloads/ClNormalizationFloat32Workload.hpp" +#endif + +using namespace boost; + +namespace armnn +{ +namespace +{ +template<unsigned int FilterSize> +bool IsMatchingSize2d(const TensorInfo& weightInfo) +{ + // Width & Height must match + return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize); +} + +template<uint32_t ValidStride> +bool IsMatchingStride(uint32_t actualStride) +{ + return ValidStride == actualStride; +} + +template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides> +bool IsMatchingStride(uint32_t actualStride) +{ + return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride); +}; + +bool IsClBackendSupported(std::string* reasonIfUnsupported) +{ +#if ARMCOMPUTECL_ENABLED + return true; +#else + if (reasonIfUnsupported != nullptr) + { + *reasonIfUnsupported = "The armnn library has been built without CL support"; + } + return false; +#endif +} + +#if ARMCOMPUTECL_ENABLED +#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr) +#else +#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported) +#endif + +#if ARMCOMPUTECL_ENABLED +template<class FuncType, class... Args> +inline bool IsWorkloadSupported(FuncType&& func, std::string* reasonIfUnsupported, Args&&... args) +{ + arm_compute::Status aclStatus = func(std::forward<Args>(args)...); + const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); + if (!supported && reasonIfUnsupported) + { + *reasonIfUnsupported = aclStatus.error_description(); + } + return supported; +} + +#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ + return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__); +#else +#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ + return IsClBackendSupported(reasonIfUnsupported); +#endif + +} //namespace + +bool IsClActivationUint8Supported(std::string* reasonIfUnsupported, const ActivationDescriptor& parameters) +{ + if (parameters.m_Function != ActivationFunction::BoundedReLu) + { + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "Unsupported activation function, only BoundedReLu is supported"; + } + + return false; + } + + return true; +} + +bool IsClDepthwiseConvolution2dDescParamsSupported(std::string* reasonIfUnsupported, + const DepthwiseConvolution2dDescriptor& parameters, + const TensorInfo& weights) +{ + if (weights.GetNumDimensions() != 4) + { + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "Depwthwise convolution Weight tensor needs to be 4d"; + } + return false; + } + // weights.GetShape()[0] = channel multiplier + if (weights.GetShape()[0] != 1) + { + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "Channel multiplier only supports the value 1 in the CL backend"; + } + return false; + } + else if ((weights.GetDataType() == armnn::DataType::QuantisedAsymm8) && !IsMatchingSize2d<3>(weights)) + { + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "CL backend only supports 3x3 filtering for Depthwise Convolution on 8-bit"; + } + return false; + } + + return true; +} + +template<typename Float32Func, typename Uint8Func, typename ... Params> +bool IsSupportedForDataTypeCl(std::string* reasonIfUnsupported, + DataType dataType, + Float32Func floatFuncPtr, + Uint8Func uint8FuncPtr, + Params&&... params) +{ + return IsClBackendSupported(reasonIfUnsupported) && + IsSupportedForDataTypeGeneric(reasonIfUnsupported, + dataType, + floatFuncPtr, + uint8FuncPtr, + std::forward<Params>(params)...); +} + +bool IsActivationSupportedCl(const TensorInfo& input, + const ActivationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<const ActivationDescriptor&>, + &IsClActivationUint8Supported, + descriptor); +} + +bool IsAdditionSupportedCl(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + return FORWARD_CL_LAYER_SUPPORT_FUNC(ClAdditionFloat32Workload::IsSupported(input0, + input1, + output, + reasonIfUnsupported)); +} + +bool IsBatchNormalizationSupportedCl(const TensorInfo& input, + const BatchNormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<const BatchNormalizationDescriptor&>, + &FalseFuncU8<const BatchNormalizationDescriptor&>, + descriptor); +} + +bool IsConstantSupportedCl(const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeCl(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc) +{ + bool isSupported = false; + + bool strideXIsOneOrTwo = IsMatchingStride<1, 2>(desc.m_StrideX); + bool strideXIsThree = IsMatchingStride<3>(desc.m_StrideX); + + bool strideYIsOneOrTwo = IsMatchingStride<1, 2>(desc.m_StrideY); + bool strideYIsThree = IsMatchingStride<3>(desc.m_StrideY); + + bool strideIsOneOrTwo = strideXIsOneOrTwo && strideYIsOneOrTwo; + bool strideIsOneOrTwoOrThree = ( strideXIsOneOrTwo || strideXIsThree ) && ( strideYIsOneOrTwo || strideYIsThree ); + + // 1x1 convolution with strides of 1,2,3 + isSupported |= IsMatchingSize2d<1>(weightInfo) && ( strideIsOneOrTwoOrThree ); + + // 3x3 convolution with strides of 1,2 + isSupported |= IsMatchingSize2d<3>(weightInfo) && ( strideIsOneOrTwo ); + + // 5x5 convolution with strides of 1,2 + isSupported |= IsMatchingSize2d<5>(weightInfo) && ( strideIsOneOrTwo ); + + //fall back to normal convolution for the asymmetric padding case. + if (desc.m_PadLeft != desc.m_PadRight || + desc.m_PadTop != desc.m_PadBottom) + { + //direct convolution does not support asymmetric padding yet. + isSupported = false; + } + + return isSupported; +} + +bool IsDirectConvolution2dParamsSupportedCl(std::string* reasonIfUnsupported, + const Convolution2dDescriptor& parameters, + const TensorInfo& weightInfo) +{ + return IsClDirectConvolution2dSupported(weightInfo, parameters); +} + +bool IsConvolution2dSupportedCl(const TensorInfo& input, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<decltype(descriptor), decltype(weights)>, + &IsDirectConvolution2dParamsSupportedCl, + descriptor, + weights); +} + +bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &IsClDepthwiseConvolution2dDescParamsSupported, + &IsClDepthwiseConvolution2dDescParamsSupported, + descriptor, + weights); +} + +bool IsFullyConnectedSupportedCl(const TensorInfo& input, + const FullyConnectedDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsInputSupportedCl(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsL2NormalizationSupportedCl(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsMergerSupportedCl(const std::vector<const TensorInfo*> inputs, + const OriginsDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + inputs[0]->GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsMultiplicationSupportedCl(const TensorInfo& input0, + const TensorInfo& input1, + std::string* reasonIfUnsupported) +{ + ignore_unused(input1); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input0.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsNormalizationSupportedCl(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor); +} + +bool IsOutputSupportedCl(const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeCl(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsPermuteSupportedCl(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(input); + ignore_unused(output); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, descriptor); +} + +bool IsPooling2dSupportedCl(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor); +} + +bool IsResizeBilinearSupportedCl(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsSoftmaxSupportedCl(const TensorInfo& input, + const SoftmaxDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsSplitterSupportedCl(const TensorInfo& input, + const ViewsDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsFakeQuantizationSupportedCl(const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(input); + ignore_unused(descriptor); + return false; +} + +bool IsReshapeSupportedCl(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + ignore_unused(input); + return true; +} + +bool IsFloorSupportedCl(const TensorInfo& input, + const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + ignore_unused(output); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +} diff --git a/src/armnn/backends/ClLayerSupport.hpp b/src/armnn/backends/ClLayerSupport.hpp new file mode 100644 index 0000000000..f5b5ae8b15 --- /dev/null +++ b/src/armnn/backends/ClLayerSupport.hpp @@ -0,0 +1,102 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/DescriptorsFwd.hpp> +#include <armnn/Types.hpp> +#include <armnn/Tensor.hpp> + +namespace armnn +{ +bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc); +bool IsClActivationUint8Supported(std::string* reasonIfUnsupported, const ActivationDescriptor& parameters); +bool IsClDepthwiseConvolution2dDescParamsSupported(std::string* reasonIfUnsupported, + const DepthwiseConvolution2dDescriptor& parameters, + const TensorInfo& weights); + +bool IsActivationSupportedCl(const TensorInfo& input, + const ActivationDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsAdditionSupportedCl(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + +bool IsBatchNormalizationSupportedCl(const TensorInfo& input, + const BatchNormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsConstantSupportedCl(const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + +bool IsConvolution2dSupportedCl(const TensorInfo& input, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported = nullptr); + +bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported = nullptr); + +bool IsFullyConnectedSupportedCl(const TensorInfo& input, + const FullyConnectedDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsInputSupportedCl(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsL2NormalizationSupportedCl(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsMergerSupportedCl(const std::vector<const TensorInfo*> inputs, + const OriginsDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsMultiplicationSupportedCl(const TensorInfo& input0, + const TensorInfo& input1, + std::string* reasonIfUnsupported = nullptr); + +bool IsNormalizationSupportedCl(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsOutputSupportedCl(const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + +bool IsPermuteSupportedCl(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsPooling2dSupportedCl(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsResizeBilinearSupportedCl(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsSoftmaxSupportedCl(const TensorInfo& input, + const SoftmaxDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsSplitterSupportedCl(const TensorInfo& input, + const ViewsDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsFakeQuantizationSupportedCl(const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsReshapeSupportedCl(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsFloorSupportedCl(const TensorInfo& input, + const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); +} diff --git a/src/armnn/backends/ClTensorHandle.hpp b/src/armnn/backends/ClTensorHandle.hpp new file mode 100644 index 0000000000..49e18dad59 --- /dev/null +++ b/src/armnn/backends/ClTensorHandle.hpp @@ -0,0 +1,86 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "OutputHandler.hpp" +#include "ArmComputeTensorUtils.hpp" + +#include <arm_compute/runtime/CL/CLTensor.h> +#include <arm_compute/runtime/CL/CLSubTensor.h> +#include <arm_compute/core/TensorShape.h> +#include <arm_compute/core/Coordinates.h> + + +namespace armnn +{ + + +class IClTensorHandle : public ITensorHandle +{ +public: + virtual arm_compute::ICLTensor& GetTensor() = 0; + virtual arm_compute::ICLTensor const& GetTensor() const = 0; + virtual void Map(bool blocking = true) = 0; + virtual void UnMap() = 0; + virtual arm_compute::DataType GetDataType() const = 0; +}; + +class ClTensorHandle : public IClTensorHandle +{ +public: + ClTensorHandle(const TensorInfo& tensorInfo) + { + armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo); + } + + arm_compute::CLTensor& GetTensor() override { return m_Tensor; } + arm_compute::CLTensor const& GetTensor() const override { return m_Tensor; } + virtual void Allocate() override {armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor);}; + + virtual void Map(bool blocking = true) override {m_Tensor.map(blocking);} + virtual void UnMap() override { m_Tensor.unmap();} + + virtual ITensorHandle::Type GetType() const override { return ITensorHandle::CL;} + + virtual arm_compute::DataType GetDataType() const override + { + return m_Tensor.info()->data_type(); + } + +private: + arm_compute::CLTensor m_Tensor; + +}; + +class ClSubTensorHandle : public IClTensorHandle +{ +public: + ClSubTensorHandle(arm_compute::ICLTensor& parent, + const arm_compute::TensorShape& shape, + const arm_compute::Coordinates& coords) + : m_Tensor(&parent, shape, coords) + { + } + + arm_compute::CLSubTensor& GetTensor() override { return m_Tensor; } + arm_compute::CLSubTensor const& GetTensor() const override { return m_Tensor; } + virtual void Allocate() override {}; + + virtual void Map(bool blocking = true) override {m_Tensor.map(blocking);} + virtual void UnMap() override { m_Tensor.unmap();} + + virtual ITensorHandle::Type GetType() const override { return ITensorHandle::CL;} + + virtual arm_compute::DataType GetDataType() const override + { + return m_Tensor.info()->data_type(); + } + +private: + arm_compute::CLSubTensor m_Tensor; + +}; + +}
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloadFactory.cpp b/src/armnn/backends/ClWorkloadFactory.cpp new file mode 100644 index 0000000000..4e565a05d7 --- /dev/null +++ b/src/armnn/backends/ClWorkloadFactory.cpp @@ -0,0 +1,473 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "ClWorkloadFactory.hpp" + +#include "armnn/Exceptions.hpp" +#include "armnn/Utils.hpp" + +#include <string> +#include "CpuTensorHandle.hpp" +#include "Layer.hpp" +#include "Layers.hpp" + +#ifdef ARMCOMPUTECL_ENABLED +#include <arm_compute/core/CL/CLKernelLibrary.h> +#include <arm_compute/runtime/CL/CLScheduler.h> +#include "backends/MemCopyWorkload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "ClWorkloads.hpp" +#endif + +#include "MakeWorkloadHelper.hpp" + +#include <boost/polymorphic_cast.hpp> +#include <boost/format.hpp> + +namespace armnn +{ + +bool ClWorkloadFactory::IsLayerSupported(const Layer& layer, DataType dataType, std::string& outReasonIfUnsupported) +{ + return IWorkloadFactory::IsLayerSupported(Compute::GpuAcc, layer, dataType, outReasonIfUnsupported); +} + +#ifdef ARMCOMPUTECL_ENABLED + +void ClWorkloadFactory::LoadOpenClRuntime(IClTunedParameters* clTunedParameters) +{ + ClTunedParameters* clTunedParametersImpl = boost::polymorphic_downcast<ClTunedParameters*>(clTunedParameters); + + cl::Device device; + cl::Context context; + cl::CommandQueue commandQueue; + + try + { + device = cl::Device::getDefault(); + context = cl::Context::getDefault(); + + bool enableProfiling = false; +#if ARMNN_PROFILING_ENABLED + enableProfiling = true; +#endif + if (clTunedParametersImpl && clTunedParametersImpl->m_Mode == IClTunedParameters::Mode::UpdateTunedParameters) + { + enableProfiling = true; // Needed for the CLTuner to work. + } + + if (enableProfiling) + { + // Create a new queue with profiling enabled + commandQueue = cl::CommandQueue(context, device, CL_QUEUE_PROFILING_ENABLE); + } + else + { + // Use default queue + commandQueue = cl::CommandQueue::getDefault(); + } + } + catch (const cl::Error& clError) + { + throw ClRuntimeUnavailableException(boost::str(boost::format( + "Could not initialize the CL runtime. Error description: %1%. CL error code: %2%" + ) % clError.what() % clError.err())); + } + + // Note the first argument (path to cl source code) will be ignored as they should be embedded in the armcompute. + arm_compute::CLKernelLibrary::get().init(".", context, device); + + arm_compute::ICLTuner* tuner = nullptr; + if (clTunedParameters) + { + tuner = &clTunedParametersImpl->m_Tuner; + } + arm_compute::CLScheduler::get().init(context, commandQueue, device, tuner); +} + +std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const +{ + return std::make_unique<ClTensorHandle>(tensorInfo); +} + +std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent, + TensorShape const& subTensorShape, + unsigned int const* subTensorOrigin) const +{ + BOOST_ASSERT(parent.GetType() == ITensorHandle::CL); + + arm_compute::Coordinates coords; + arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape); + + coords.set_num_dimensions(subTensorShape.GetNumDimensions()); + for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++) + { + // arm compute indexes tensor coords in reverse order + unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1; + coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex])); + } + + return std::make_unique<ClSubTensorHandle>(static_cast<ClTensorHandle&>(parent).GetTensor(), shape, coords); +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<CopyFromCpuToClFloat32Workload, CopyFromCpuToClUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<CopyFromClToCpuFloat32Workload, CopyFromClToCpuUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClActivationFloat32Workload, ClActivationUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClSoftmaxFloat32Workload, ClSoftmaxUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClSplitterFloat32Workload, ClSplitterUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClMergerFloat32Workload, ClMergerUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateFullyConnected( + const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<ClFullyConnectedFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClPermuteFloat32Workload, ClPermuteUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClPooling2dFloat32Workload, ClPooling2dUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClConvolution2dFloat32Workload, ClConvolution2dUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthwiseConvolution2d( + const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<ClDepthwiseConvolutionFloat32Workload, ClDepthwiseConvolutionUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClNormalizationFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClAdditionFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMultiplication( + const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<ClMultiplicationFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateBatchNormalization( + const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<ClBatchNormalizationFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0]) + { + throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload"); + } + + // Create a workload that will copy tensor data from the inputs, which can have a number of different formats, + // to CL tensors. + switch (descriptor.m_Inputs[0]->GetType()) + { + case ITensorHandle::Cpu: + return MakeWorkload<CopyFromCpuToClFloat32Workload, CopyFromCpuToClUint8Workload>(descriptor, info); +#if ARMCOMPUTENEON_ENABLED + case ITensorHandle::Neon: + { + return MakeWorkload<CopyFromNeonToClFloat32Workload, CopyFromNeonToClUint8Workload>(descriptor, info); + } +#endif + default: + throw InvalidArgumentException("ClWorkloadFactory: Destination type not supported for MemCopy Workload."); + } +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateResizeBilinear( + const ResizeBilinearQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClResizeBilinearFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFakeQuantization( + const FakeQuantizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClL2NormalizationFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClConstantFloat32Workload, ClConstantUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClReshapeFloat32Workload, ClReshapeUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<ClFloorFloat32Workload, NullWorkload>(descriptor, info); +} + +#else // #if ARMCOMPUTECL_ENABLED + +void ClWorkloadFactory::LoadOpenClRuntime(IClTunedParameters* clTunedParameters) +{ + // No CL support +} + +std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const +{ + return nullptr; +} + +std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent, + TensorShape const& subTensorShape, + unsigned int const* subTensorOrigin) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthwiseConvolution2d( + const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateBatchNormalization( + const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +#endif // #if ARMCOMPUTECL_ENABLED + +armnn::IClTunedParameters* IClTunedParameters::CreateRaw(armnn::IClTunedParameters::Mode mode) +{ + return new ClTunedParameters(mode); +} + +armnn::IClTunedParametersPtr IClTunedParameters::Create(armnn::IClTunedParameters::Mode mode) +{ + return IClTunedParametersPtr(CreateRaw(mode), &IClTunedParameters::Destroy); +} + +void IClTunedParameters::Destroy(IClTunedParameters* params) +{ + delete params; +} + +ClTunedParameters::ClTunedParameters(armnn::IClTunedParameters::Mode mode) + : m_Mode(mode) +#ifdef ARMCOMPUTECL_ENABLED + , m_Tuner(mode == ClTunedParameters::Mode::UpdateTunedParameters) +#endif +{ +} + +void ClTunedParameters::Load(const char* filename) +{ +#ifdef ARMCOMPUTECL_ENABLED + try + { + m_Tuner.load_from_file(filename); + } + catch (const std::exception& e) + { + throw armnn::Exception(std::string("Failed to load tuned parameters file '") + filename + "': " + + e.what()); + } +#endif +} + +void ClTunedParameters::Save(const char* filename) const +{ +#ifdef ARMCOMPUTECL_ENABLED + try + { + m_Tuner.save_to_file(filename); + } + catch (const std::exception& e) + { + throw armnn::Exception(std::string("Failed to save tuned parameters file to '") + filename + "': " + + e.what()); + } +#endif +} + +} // namespace armnn diff --git a/src/armnn/backends/ClWorkloadFactory.hpp b/src/armnn/backends/ClWorkloadFactory.hpp new file mode 100644 index 0000000000..2477e23eeb --- /dev/null +++ b/src/armnn/backends/ClWorkloadFactory.hpp @@ -0,0 +1,129 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "WorkloadFactory.hpp" +#include "OutputHandler.hpp" +#include "armnn/IRuntime.hpp" + +#ifdef ARMCOMPUTECL_ENABLED +#include <arm_compute/runtime/CL/CLTuner.h> +#endif + +namespace cl +{ +class Context; +class CommandQueue; +class Device; +} + +namespace armnn +{ + +class IClTunedParameters; + +// ARM Compute OpenCL workload factory +class ClWorkloadFactory : public IWorkloadFactory +{ +public: + virtual ~ClWorkloadFactory(){}; + + virtual Compute GetCompute() const override { return Compute::GpuAcc; } + + static bool IsLayerSupported(const Layer& layer, DataType dataType, std::string& outReasonIfUnsupported); + + void LoadOpenClRuntime(IClTunedParameters* clTunedParameters = nullptr); + + virtual bool SupportsSubTensors() const override { return true; } + + virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent, + TensorShape const& subTensorShape, + unsigned int const* subTensorOrigin) const override; + + virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override; + + virtual std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d( + const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; +}; + +class ClTunedParameters : public IClTunedParameters +{ +public: + ClTunedParameters(armnn::IClTunedParameters::Mode mode); + + virtual void Load(const char* filename); + virtual void Save(const char* filename) const; + + Mode m_Mode; + +#ifdef ARMCOMPUTECL_ENABLED + arm_compute::CLTuner m_Tuner; +#endif +}; + +} // namespace armnn diff --git a/src/armnn/backends/ClWorkloadUtils.hpp b/src/armnn/backends/ClWorkloadUtils.hpp new file mode 100644 index 0000000000..549a0bbc25 --- /dev/null +++ b/src/armnn/backends/ClWorkloadUtils.hpp @@ -0,0 +1,39 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Workload.hpp" +#include <arm_compute/core/CL/OpenCL.h> +#include <arm_compute/runtime/CL/CLFunctions.h> +#include <arm_compute/runtime/SubTensor.h> +#include "ArmComputeTensorUtils.hpp" + +namespace armnn +{ + +template <typename T> +void CopyArmComputeClTensorData(const T* srcData, arm_compute::CLTensor& dstTensor) +{ + { + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "MapClTensorForWriting"); + dstTensor.map(true); + } + + { + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "CopyToClTensor"); + armcomputetensorutils::CopyArmComputeITensorData<T>(srcData, dstTensor); + } + + dstTensor.unmap(); +} + +template <typename T> +void InitialiseArmComputeClTensorData(arm_compute::CLTensor& clTensor, const T* data) +{ + armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor); + CopyArmComputeClTensorData<T>(data, clTensor); +} + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads.hpp b/src/armnn/backends/ClWorkloads.hpp new file mode 100644 index 0000000000..3b8cf50ace --- /dev/null +++ b/src/armnn/backends/ClWorkloads.hpp @@ -0,0 +1,35 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once +#include "backends/ClWorkloads/ClActivationFloat32Workload.hpp" +#include "backends/ClWorkloads/ClActivationUint8Workload.hpp" +#include "backends/ClWorkloads/ClAdditionFloat32Workload.hpp" +#include "backends/ClWorkloads/ClBaseConstantWorkload.hpp" +#include "backends/ClWorkloads/ClBaseMergerWorkload.hpp" +#include "backends/ClWorkloads/ClBatchNormalizationFloat32Workload.hpp" +#include "backends/ClWorkloads/ClConstantFloat32Workload.hpp" +#include "backends/ClWorkloads/ClConstantUint8Workload.hpp" +#include "backends/ClWorkloads/ClConvolution2dFloat32Workload.hpp" +#include "backends/ClWorkloads/ClConvolution2dUint8Workload.hpp" +#include "backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.hpp" +#include "backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.hpp" +#include "backends/ClWorkloads/ClFloorFloat32Workload.hpp" +#include "backends/ClWorkloads/ClFullyConnectedFloat32Workload.hpp" +#include "backends/ClWorkloads/ClL2NormalizationFloat32Workload.hpp" +#include "backends/ClWorkloads/ClMergerFloat32Workload.hpp" +#include "backends/ClWorkloads/ClMergerUint8Workload.hpp" +#include "backends/ClWorkloads/ClMultiplicationFloat32Workload.hpp" +#include "backends/ClWorkloads/ClNormalizationFloat32Workload.hpp" +#include "backends/ClWorkloads/ClPermuteWorkload.hpp" +#include "backends/ClWorkloads/ClPooling2dFloat32Workload.hpp" +#include "backends/ClWorkloads/ClPooling2dUint8Workload.hpp" +#include "backends/ClWorkloads/ClReshapeFloat32Workload.hpp" +#include "backends/ClWorkloads/ClReshapeUint8Workload.hpp" +#include "backends/ClWorkloads/ClResizeBilinearFloat32Workload.hpp" +#include "backends/ClWorkloads/ClSoftmaxFloat32Workload.hpp" +#include "backends/ClWorkloads/ClSoftmaxUint8Workload.hpp" +#include "backends/ClWorkloads/ClSplitterFloat32Workload.hpp" +#include "backends/ClWorkloads/ClSplitterUint8Workload.hpp"
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.cpp new file mode 100644 index 0000000000..fb5d78425e --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.cpp @@ -0,0 +1,33 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClActivationFloat32Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/ArmComputeUtils.hpp" + +namespace armnn +{ + +ClActivationFloat32Workload::ClActivationFloat32Workload(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<ActivationQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("ClActivationFloat32Workload", 1, 1); + + const arm_compute::ActivationLayerInfo activationLayerInfo = + ConvertActivationDescriptorToAclActivationLayerInfo(m_Data.m_Parameters); + + arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + m_ActivationLayer.configure(&input, &output, activationLayerInfo); +} + +void ClActivationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClActivationFloat32Workload_Execute"); + m_ActivationLayer.run(); +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.hpp new file mode 100644 index 0000000000..9bab4202be --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClActivationFloat32Workload.hpp @@ -0,0 +1,24 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +// Activation layer execution +class ClActivationFloat32Workload : public Float32Workload<ActivationQueueDescriptor> +{ +public: + ClActivationFloat32Workload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + mutable arm_compute::CLActivationLayer m_ActivationLayer; +}; + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.cpp new file mode 100644 index 0000000000..3671dd7187 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.cpp @@ -0,0 +1,47 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClActivationUint8Workload.hpp" +#include "backends/ClLayerSupport.hpp" + +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" +namespace armnn +{ + +ClActivationUint8Workload::ClActivationUint8Workload(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Uint8Workload<ActivationQueueDescriptor>(descriptor, info) +{ + + std::string reasonIfUnsupported; + if (!IsClActivationUint8Supported(&reasonIfUnsupported, m_Data.m_Parameters)) + { + throw InvalidArgumentException(reasonIfUnsupported); + } + + // Only BoundedReLu is supported (see IsClActivationUint8Supported) + arm_compute::ActivationLayerInfo layerInfo(arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, + m_Data.m_Parameters.m_A, + m_Data.m_Parameters.m_B); + + m_Data.ValidateInputsOutputs("ClActivationUint8Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_ActivationLayer.configure(&input, &output, layerInfo); +} + +void ClActivationUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClActivationUint8Workload_Execute"); + + m_ActivationLayer.run(); +} + +} //namespace Armnn + + diff --git a/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.hpp new file mode 100644 index 0000000000..3a9cceb298 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClActivationUint8Workload.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +// Activation layer execution +class ClActivationUint8Workload : public Uint8Workload<ActivationQueueDescriptor> +{ +public: + ClActivationUint8Workload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + mutable arm_compute::CLActivationLayer m_ActivationLayer; +}; + +} //namespace armnn + + + diff --git a/src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.cpp new file mode 100644 index 0000000000..153167f172 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.cpp @@ -0,0 +1,57 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClAdditionFloat32Workload.hpp" + +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + +namespace armnn +{ +using namespace armcomputetensorutils; + +ClAdditionFloat32Workload::ClAdditionFloat32Workload(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<AdditionQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("ClAdditionFloat32Workload", 2, 1); + + arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + m_Layer.configure(&input0, &input1, &output, ms_AclConvertPolicy); +} + +void ClAdditionFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClAdditionFloat32Workload_Execute"); + m_Layer.run(); +} + +bool ClAdditionFloat32Workload::IsSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0); + const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1); + const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); + + const arm_compute::Status aclStatus = decltype(m_Layer)::validate(&aclInput0Info, + &aclInput1Info, + &aclOutputInfo, + ms_AclConvertPolicy); + + const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); + if (!supported && reasonIfUnsupported) + { + *reasonIfUnsupported = aclStatus.error_description(); + } + + return supported; +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.hpp new file mode 100644 index 0000000000..37e50c2c86 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClAdditionFloat32Workload.hpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +class ClAdditionFloat32Workload : public Float32Workload<AdditionQueueDescriptor> +{ +public: + ClAdditionFloat32Workload(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info); + + void Execute() const override; + + static bool IsSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported); + +private: + mutable arm_compute::CLArithmeticAddition m_Layer; + static constexpr arm_compute::ConvertPolicy ms_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE; +}; + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.cpp b/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.cpp new file mode 100644 index 0000000000..4b72d92d72 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.cpp @@ -0,0 +1,54 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClBaseConstantWorkload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" + +namespace armnn +{ + +template class ClBaseConstantWorkload<DataType::Float32>; +template class ClBaseConstantWorkload<DataType::QuantisedAsymm8>; + +template<armnn::DataType dataType> +void ClBaseConstantWorkload<dataType>::Execute() const +{ + // The intermediate tensor held by the corresponding layer output handler can be initialised with the given data + // on the first inference, then reused for subsequent inferences. + // The initialisation cannot happen at workload construction time since the ACL kernel for the next layer may not + // have been configured at the time. + if (!m_RanOnce) + { + const ConstantQueueDescriptor& data = this->m_Data; + + BOOST_ASSERT(data.m_LayerOutput != nullptr); + arm_compute::CLTensor& output = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetTensor(); + + switch (dataType) + { + case DataType::Float32: + { + CopyArmComputeClTensorData(data.m_LayerOutput->GetConstTensor<float>(), output); + break; + } + case DataType::QuantisedAsymm8: + { + CopyArmComputeClTensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output); + break; + } + default: + { + BOOST_ASSERT_MSG(false, "Unknown data type"); + break; + } + } + + m_RanOnce = true; + } +} + + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.hpp b/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.hpp new file mode 100644 index 0000000000..660842f375 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ +template <armnn::DataType DataType> +class ClBaseConstantWorkload : public TypedWorkload<ConstantQueueDescriptor, DataType> +{ +public: + ClBaseConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info) + : TypedWorkload<ConstantQueueDescriptor, DataType>(descriptor, info) + , m_RanOnce(false) + { + } + + void Execute() const override; + +private: + mutable bool m_RanOnce; +}; + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClBaseMergerWorkload.hpp b/src/armnn/backends/ClWorkloads/ClBaseMergerWorkload.hpp new file mode 100644 index 0000000000..7542c62b47 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClBaseMergerWorkload.hpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +// Base class template providing an implementation of the Merger layer common to all data types +template <armnn::DataType DataType> +class ClBaseMergerWorkload : public TypedWorkload<MergerQueueDescriptor, DataType> +{ +public: + using TypedWorkload<MergerQueueDescriptor, DataType>::TypedWorkload; + + void Execute() const override + { + // With subtensors, merger is a no-op + } +}; + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClBaseSplitterWorkload.hpp b/src/armnn/backends/ClWorkloads/ClBaseSplitterWorkload.hpp new file mode 100644 index 0000000000..fef841ced2 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClBaseSplitterWorkload.hpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +// Base class template providing an implementation of the Splitter layer common to all data types +template <armnn::DataType DataType> +class ClBaseSplitterWorkload : public TypedWorkload<SplitterQueueDescriptor, DataType> +{ +public: + using TypedWorkload<SplitterQueueDescriptor, DataType>::TypedWorkload; + + void Execute() const override + { + // With subtensors, merger is a no-op + } +}; + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.cpp new file mode 100644 index 0000000000..dabd495d59 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.cpp @@ -0,0 +1,42 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClBatchNormalizationFloat32Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + +namespace armnn +{ +using namespace armcomputetensorutils; + +ClBatchNormalizationFloat32Workload::ClBatchNormalizationFloat32Workload( + const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) + : Float32Workload<BatchNormalizationQueueDescriptor>(descriptor, info) +{ + BuildArmComputeTensor(m_Mean, m_Data.m_Mean->GetTensorInfo()); + BuildArmComputeTensor(m_Variance, m_Data.m_Variance->GetTensorInfo()); + BuildArmComputeTensor(m_Gamma, m_Data.m_Gamma->GetTensorInfo()); + BuildArmComputeTensor(m_Beta, m_Data.m_Beta->GetTensorInfo()); + + m_Data.ValidateInputsOutputs("ClBatchNormalizationFloat32Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + m_Layer.configure(&input, &output, &m_Mean, &m_Variance, &m_Beta, &m_Gamma, m_Data.m_Parameters.m_Eps); + + InitialiseArmComputeClTensorData(m_Mean, m_Data.m_Mean->GetConstTensor<float>()); + InitialiseArmComputeClTensorData(m_Variance, m_Data.m_Variance->GetConstTensor<float>()); + InitialiseArmComputeClTensorData(m_Beta, m_Data.m_Beta->GetConstTensor<float>()); + InitialiseArmComputeClTensorData(m_Gamma, m_Data.m_Gamma->GetConstTensor<float>()); +} + +void ClBatchNormalizationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClBatchNormalizationFloat32Workload_Execute"); + m_Layer.run(); +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.hpp new file mode 100644 index 0000000000..ddbd0f05c0 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClBatchNormalizationFloat32Workload.hpp @@ -0,0 +1,34 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +class ClBatchNormalizationFloat32Workload : public Float32Workload<BatchNormalizationQueueDescriptor> +{ +public: + ClBatchNormalizationFloat32Workload(const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info); + + using Float32Workload<BatchNormalizationQueueDescriptor>::Float32Workload; + void Execute() const override; + +private: + mutable arm_compute::CLBatchNormalizationLayer m_Layer; + + arm_compute::CLTensor m_Mean; + arm_compute::CLTensor m_Variance; + arm_compute::CLTensor m_Gamma; + arm_compute::CLTensor m_Beta; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.cpp new file mode 100644 index 0000000000..99880d68a7 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.cpp @@ -0,0 +1,16 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClConstantFloat32Workload.hpp" +namespace armnn +{ + +void ClConstantFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClConstantFloat32Workload_Execute"); + ClBaseConstantWorkload::Execute(); +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.hpp new file mode 100644 index 0000000000..5f86d3b2b6 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClConstantFloat32Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "ClBaseConstantWorkload.hpp" + +namespace armnn +{ +class ClConstantFloat32Workload : public ClBaseConstantWorkload<DataType::Float32> +{ +public: + using ClBaseConstantWorkload<DataType::Float32>::ClBaseConstantWorkload; + void Execute() const override; +}; + + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.cpp new file mode 100644 index 0000000000..078d4261fa --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.cpp @@ -0,0 +1,16 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClConstantUint8Workload.hpp" +namespace armnn +{ + +void ClConstantUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClConstantUint8Workload_Execute"); + ClBaseConstantWorkload::Execute(); +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.hpp new file mode 100644 index 0000000000..3a53f1011e --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClConstantUint8Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "ClBaseConstantWorkload.hpp" + +namespace armnn +{ + +class ClConstantUint8Workload : public ClBaseConstantWorkload<DataType::QuantisedAsymm8> +{ +public: + using ClBaseConstantWorkload<DataType::QuantisedAsymm8>::ClBaseConstantWorkload; + void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.cpp new file mode 100644 index 0000000000..6f4069bcc0 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.cpp @@ -0,0 +1,70 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClConvolution2dFloat32Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" +#include "backends/ClLayerSupport.hpp" + +namespace armnn +{ +using namespace armcomputetensorutils; + +ClConvolution2dFloat32Workload::ClConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<Convolution2dQueueDescriptor>(descriptor, info) +{ + + // todo: check tensor shapes match + const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo(); + BuildArmComputeTensor(m_KernelTensor, weightInfo); + + arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX, + m_Data.m_Parameters.m_StrideY, + m_Data.m_Parameters.m_PadLeft, + m_Data.m_Parameters.m_PadRight, + m_Data.m_Parameters.m_PadTop, + m_Data.m_Parameters.m_PadBottom, + arm_compute::DimensionRoundingType::FLOOR); + + arm_compute::CLTensor* optionalBias = nullptr; + if (m_Data.m_Parameters.m_BiasEnabled) + { + BuildArmComputeTensor(m_BiasTensor, m_Data.m_Bias->GetTensorInfo()); + optionalBias = &m_BiasTensor; + } + + m_Data.ValidateInputsOutputs("ClConvolution2dFloat32Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_pConvolutionLayer = std::make_unique<arm_compute::CLConvolutionLayer>(); + static_cast<arm_compute::CLConvolutionLayer*>(m_pConvolutionLayer.get())->configure(&input, + &m_KernelTensor, + optionalBias, + &output, + padStrideInfo); + + BOOST_ASSERT(m_pConvolutionLayer); + + InitialiseArmComputeClTensorData(m_KernelTensor, m_Data.m_Weight->GetConstTensor<float>()); + + if (optionalBias) + { + InitialiseArmComputeClTensorData(*optionalBias, m_Data.m_Bias->GetConstTensor<float>()); + } +} + +void ClConvolution2dFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClConvolution2dFloat32Workload_Execute"); + BOOST_ASSERT(m_pConvolutionLayer); + + m_pConvolutionLayer->run(); +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.hpp new file mode 100644 index 0000000000..29931056a8 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClConvolution2dFloat32Workload.hpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ +class ClConvolution2dFloat32Workload : public Float32Workload<Convolution2dQueueDescriptor> +{ +public: + ClConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + mutable std::unique_ptr<arm_compute::IFunction> m_pConvolutionLayer; + + arm_compute::CLTensor m_KernelTensor; + arm_compute::CLTensor m_BiasTensor; +}; + +} //namespace armnn + diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp new file mode 100644 index 0000000000..a3c6ac9dca --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.cpp @@ -0,0 +1,72 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClConvolution2dUint8Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" +#include "backends/ClLayerSupport.hpp" + +namespace armnn +{ +using namespace armcomputetensorutils; + +ClConvolution2dUint8Workload::ClConvolution2dUint8Workload(const Convolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Uint8Workload<Convolution2dQueueDescriptor>(descriptor, info) +{ + + // todo: check tensor shapes match + const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo(); + BuildArmComputeTensor(m_KernelTensor, weightInfo); + + arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX, + m_Data.m_Parameters.m_StrideY, + m_Data.m_Parameters.m_PadLeft, + m_Data.m_Parameters.m_PadRight, + m_Data.m_Parameters.m_PadTop, + m_Data.m_Parameters.m_PadBottom, + arm_compute::DimensionRoundingType::FLOOR); + + arm_compute::CLTensor* optionalBias = nullptr; + if (m_Data.m_Parameters.m_BiasEnabled) + { + BuildArmComputeTensor(m_BiasTensor, m_Data.m_Bias->GetTensorInfo()); + optionalBias = &m_BiasTensor; + } + + m_Data.ValidateInputsOutputs("ClConvolution2dUint8Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + BOOST_ASSERT_MSG(IsClDirectConvolution2dSupported(weightInfo, m_Data.m_Parameters), + "Unsupported parameters for u8 convolution"); + + m_pConvolutionLayer = std::make_unique<arm_compute::CLDirectConvolutionLayer>(); + static_cast<arm_compute::CLDirectConvolutionLayer*>(m_pConvolutionLayer.get())->configure(&input, + &m_KernelTensor, + optionalBias, + &output, + padStrideInfo); + BOOST_ASSERT(m_pConvolutionLayer); + + InitialiseArmComputeClTensorData(m_KernelTensor, m_Data.m_Weight->GetConstTensor<uint8_t>()); + + if (optionalBias) + { + InitialiseArmComputeClTensorData(*optionalBias, m_Data.m_Bias->GetConstTensor<int32_t>()); + } +} + +void ClConvolution2dUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClConvolution2dUint8Workload_Execute"); + BOOST_ASSERT(m_pConvolutionLayer); + + m_pConvolutionLayer->run(); +} + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.hpp new file mode 100644 index 0000000000..b2849d773b --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClConvolution2dUint8Workload.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + + +namespace armnn +{ + +class ClConvolution2dUint8Workload : public Uint8Workload<Convolution2dQueueDescriptor> +{ +public: + ClConvolution2dUint8Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + mutable std::unique_ptr<arm_compute::IFunction> m_pConvolutionLayer; + + arm_compute::CLTensor m_KernelTensor; + arm_compute::CLTensor m_BiasTensor; +}; + +} //namespace armnn + diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.cpp new file mode 100644 index 0000000000..f31c73bc60 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.cpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClDepthwiseConvolutionFloat32Workload.hpp" +#include "ClDepthwiseConvolutionHelper.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" + +namespace armnn +{ + +ClDepthwiseConvolutionFloat32Workload::ClDepthwiseConvolutionFloat32Workload( + const DepthwiseConvolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info) +{ + InitClDepthwiseConvolutionWorkload(*this); +} + +void ClDepthwiseConvolutionFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClDepthwiseConvolutionFloat32Workload_Execute"); + BOOST_ASSERT(m_pDepthwiseConvolutionLayer); + + m_pDepthwiseConvolutionLayer->run(); +} + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.hpp new file mode 100644 index 0000000000..8711f0c515 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionFloat32Workload.hpp @@ -0,0 +1,37 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +class ClDepthwiseConvolutionFloat32Workload : public Float32Workload<DepthwiseConvolution2dQueueDescriptor> +{ +public: + ClDepthwiseConvolutionFloat32Workload(const DepthwiseConvolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info); + void Execute() const override; + +private: + typedef float KernelDataType; + typedef float BiasDataType; + + mutable std::unique_ptr<arm_compute::IFunction> m_pDepthwiseConvolutionLayer; + + arm_compute::CLTensor m_KernelTensor; + arm_compute::CLTensor m_BiasTensor; + + template <typename WorkloadType> + friend void InitClDepthwiseConvolutionWorkload(WorkloadType& workload); +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionHelper.hpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionHelper.hpp new file mode 100644 index 0000000000..cd7115773d --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionHelper.hpp @@ -0,0 +1,91 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <armnn/TypesUtils.hpp> +#include "backends/ClLayerSupport.hpp" +#include "backends/ArmComputeTensorUtils.hpp" +#include "backends/ClTensorHandle.hpp" + +namespace armnn +{ + +template <typename WorkloadType> +void InitClDepthwiseConvolutionWorkload(WorkloadType& workload) +{ + using T = typename WorkloadType::KernelDataType; + using B = typename WorkloadType::BiasDataType; + + auto& m_Data = workload.GetData(); + auto& m_KernelTensor = workload.m_KernelTensor; + auto& m_BiasTensor = workload.m_BiasTensor; + auto& m_pDepthwiseConvolutionLayer = workload.m_pDepthwiseConvolutionLayer; + + auto& weightInfo = m_Data.m_Weight->GetTensorInfo(); + + std::string reasonIfUnsupported; + if (!IsClDepthwiseConvolution2dDescParamsSupported(&reasonIfUnsupported, m_Data.m_Parameters, weightInfo)) + { + throw UnimplementedException(reasonIfUnsupported); + } + + armcomputetensorutils::BuildArmComputeTensor(m_KernelTensor, weightInfo); + + arm_compute::CLTensor* optionalBias = nullptr; + if (m_Data.m_Parameters.m_BiasEnabled) + { + armcomputetensorutils::BuildArmComputeTensor(m_BiasTensor, m_Data.m_Bias->GetTensorInfo()); + optionalBias = &m_BiasTensor; + } + + arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX, + m_Data.m_Parameters.m_StrideY, + m_Data.m_Parameters.m_PadLeft, + m_Data.m_Parameters.m_PadRight, + m_Data.m_Parameters.m_PadTop, + m_Data.m_Parameters.m_PadBottom, + arm_compute::DimensionRoundingType::FLOOR); + + std::string name = std::string("ClDepthwiseConvolution") + GetDataTypeName(GetDataType<T>()) + "Workload"; + m_Data.ValidateInputsOutputs(name, 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + //Check for optimisation opportunities. + bool use3x3Optimisation = (weightInfo.GetShape()[3] == 3) && (weightInfo.GetShape()[2] == 3); + if (use3x3Optimisation) + { + m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::CLDepthwiseConvolutionLayer3x3>(); + static_cast<arm_compute::CLDepthwiseConvolutionLayer3x3*>(m_pDepthwiseConvolutionLayer.get())->configure( + &input, + &m_KernelTensor, + optionalBias, + &output, + padStrideInfo); + } + else + { + m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::CLDepthwiseConvolutionLayer>(); + static_cast<arm_compute::CLDepthwiseConvolutionLayer*>(m_pDepthwiseConvolutionLayer.get())->configure( + &input, + &m_KernelTensor, + optionalBias, + &output, + padStrideInfo); + } + + BOOST_ASSERT(m_pDepthwiseConvolutionLayer); + + InitialiseArmComputeClTensorData(m_KernelTensor, m_Data.m_Weight->template GetConstTensor<T>()); + + if (optionalBias) + { + InitialiseArmComputeClTensorData(*optionalBias, m_Data.m_Bias->template GetConstTensor<B>()); + } +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.cpp new file mode 100644 index 0000000000..7e7c488c74 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.cpp @@ -0,0 +1,32 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClDepthwiseConvolutionUint8Workload.hpp" +#include "ClDepthwiseConvolutionHelper.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" + +namespace armnn +{ + + +ClDepthwiseConvolutionUint8Workload::ClDepthwiseConvolutionUint8Workload( + const DepthwiseConvolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Uint8Workload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info) +{ + InitClDepthwiseConvolutionWorkload(*this); +} + +void ClDepthwiseConvolutionUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClDepthwiseConvolutionUint8Workload_Execute"); + BOOST_ASSERT(m_pDepthwiseConvolutionLayer); + + m_pDepthwiseConvolutionLayer->run(); +} + +} //namespace armnn + diff --git a/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.hpp new file mode 100644 index 0000000000..ee09ff3e58 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.hpp @@ -0,0 +1,35 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +class ClDepthwiseConvolutionUint8Workload : public Uint8Workload<DepthwiseConvolution2dQueueDescriptor> +{ +public: + ClDepthwiseConvolutionUint8Workload(const DepthwiseConvolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info); + void Execute() const override; + +private: + typedef uint8_t KernelDataType; + typedef int32_t BiasDataType; + + mutable std::unique_ptr<arm_compute::IFunction> m_pDepthwiseConvolutionLayer; + + arm_compute::CLTensor m_KernelTensor; + arm_compute::CLTensor m_BiasTensor; + + template <typename WorkloadType> + friend void InitClDepthwiseConvolutionWorkload(WorkloadType& workload); +}; + +} //namespace armnn + + diff --git a/src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.cpp new file mode 100644 index 0000000000..882da50855 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.cpp @@ -0,0 +1,29 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClFloorFloat32Workload.hpp" +#include "backends/ClTensorHandle.hpp" + +namespace armnn +{ + +ClFloorFloat32Workload::ClFloorFloat32Workload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info) + : Float32Workload<FloorQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("ClFloorFloat32Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_Layer.configure(&input, &output); +} + +void ClFloorFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClFloorFloat32Workload_Execute"); + m_Layer.run(); +} + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.hpp new file mode 100644 index 0000000000..532dd29884 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClFloorFloat32Workload.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +class ClFloorFloat32Workload : public Float32Workload<FloorQueueDescriptor> +{ +public: + ClFloorFloat32Workload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info); + + void Execute() const override; + +private: + mutable arm_compute::CLFloor m_Layer; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.cpp new file mode 100644 index 0000000000..96596b9d9c --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.cpp @@ -0,0 +1,52 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClFullyConnectedFloat32Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + +namespace armnn +{ +using namespace armcomputetensorutils; + +ClFullyConnectedFloat32Workload::ClFullyConnectedFloat32Workload(const FullyConnectedQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<FullyConnectedQueueDescriptor>(descriptor, info) +{ + + BuildArmComputeTensor(m_WeightsTensor, m_Data.m_Weight->GetTensorInfo()); + + arm_compute::CLTensor* optionalBiasTensor = nullptr; + if (m_Data.m_Parameters.m_BiasEnabled) + { + BuildArmComputeTensor(m_BiasesTensor, m_Data.m_Bias->GetTensorInfo()); + optionalBiasTensor = &m_BiasesTensor; + } + + m_Data.ValidateInputsOutputs("ClFullyConnectedFloat32Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + // Construct + m_FullyConnected.configure( + &input, &m_WeightsTensor, optionalBiasTensor, &output, m_Data.m_Parameters.m_TransposeWeightMatrix); + + // Allocate + InitialiseArmComputeClTensorData(m_WeightsTensor, m_Data.m_Weight->GetConstTensor<float>()); + + if (optionalBiasTensor) + { + InitialiseArmComputeClTensorData(*optionalBiasTensor, m_Data.m_Bias->GetConstTensor<float>()); + } +} + +void ClFullyConnectedFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClFullyConnectedFloat32Workload_Execute"); + m_FullyConnected.run(); +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.hpp new file mode 100644 index 0000000000..def20e0831 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClFullyConnectedFloat32Workload.hpp @@ -0,0 +1,29 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + + +namespace armnn +{ + +class ClFullyConnectedFloat32Workload : public armnn::Float32Workload<armnn::FullyConnectedQueueDescriptor> +{ +public: + ClFullyConnectedFloat32Workload(const armnn::FullyConnectedQueueDescriptor& descriptor, + const armnn::WorkloadInfo& info); + + using armnn::Float32Workload<armnn::FullyConnectedQueueDescriptor>::m_Data; + void Execute() const override; + +private: + mutable arm_compute::CLFullyConnectedLayer m_FullyConnected; + arm_compute::CLTensor m_WeightsTensor; + arm_compute::CLTensor m_BiasesTensor; +}; + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.cpp new file mode 100644 index 0000000000..e15db74ec9 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.cpp @@ -0,0 +1,35 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClL2NormalizationFloat32Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeUtils.hpp" + +namespace armnn +{ +using namespace armcomputetensorutils; + +ClL2NormalizationFloat32Workload::ClL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<L2NormalizationQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("ClL2NormalizationFloat32Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + m_Layer.configure(&input, &output, CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0])); +} + +void ClL2NormalizationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClL2NormalizationFloat32Workload_Execute"); + m_Layer.run(); +} + +} //namespace armnn + + + diff --git a/src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.hpp new file mode 100644 index 0000000000..848803e2f0 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClL2NormalizationFloat32Workload.hpp @@ -0,0 +1,29 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +class ClL2NormalizationFloat32Workload : public Float32Workload<L2NormalizationQueueDescriptor> +{ +public: + ClL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info); + + void Execute() const override; + +private: + // Purposely not a CLL2Normalize function. See constructor. + mutable arm_compute::CLNormalizationLayer m_Layer; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.cpp new file mode 100644 index 0000000000..4d2d708a0e --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.cpp @@ -0,0 +1,19 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClMergerFloat32Workload.hpp" + + +namespace armnn +{ + +void ClMergerFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClMergerFloat32Workload_Execute"); + ClBaseMergerWorkload::Execute(); +} + +} //namespace armnn + diff --git a/src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.hpp new file mode 100644 index 0000000000..9808d30ccf --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClMergerFloat32Workload.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "ClBaseMergerWorkload.hpp" + +namespace armnn +{ + +class ClMergerFloat32Workload : public ClBaseMergerWorkload<armnn::DataType::Float32> +{ +public: + using ClBaseMergerWorkload<armnn::DataType::Float32>::ClBaseMergerWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn + + diff --git a/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.cpp new file mode 100644 index 0000000000..94a1d3c593 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.cpp @@ -0,0 +1,18 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClMergerUint8Workload.hpp" + + +namespace armnn +{ + +void ClMergerUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClMergerUint8Workload_Execute"); + ClBaseMergerWorkload<DataType::QuantisedAsymm8>::Execute(); +} + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.hpp new file mode 100644 index 0000000000..1ddbb2ac52 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClMergerUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "ClBaseMergerWorkload.hpp" + +namespace armnn +{ + +class ClMergerUint8Workload : public ClBaseMergerWorkload<armnn::DataType::QuantisedAsymm8> +{ +public: + using ClBaseMergerWorkload<armnn::DataType::QuantisedAsymm8>::ClBaseMergerWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn + diff --git a/src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.cpp new file mode 100644 index 0000000000..405d109aa1 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.cpp @@ -0,0 +1,39 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClMultiplicationFloat32Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" + +namespace armnn +{ + +ClMultiplicationFloat32Workload::ClMultiplicationFloat32Workload(const MultiplicationQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<MultiplicationQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("ClMultiplicationFloat32Workload", 2, 1); + + arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + // Construct + m_PixelWiseMultiplication.configure(&input0, + &input1, + &output, + 1.0f, + arm_compute::ConvertPolicy::SATURATE, + arm_compute::RoundingPolicy::TO_NEAREST_EVEN); +} + +void ClMultiplicationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClMultiplicationFloat32Workload_Execute"); + + // Execute the layer + m_PixelWiseMultiplication.run(); +} + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.hpp new file mode 100644 index 0000000000..8e387118e8 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClMultiplicationFloat32Workload.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ +class ClMultiplicationFloat32Workload : public Float32Workload<MultiplicationQueueDescriptor> +{ +public: + ClMultiplicationFloat32Workload(const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info); + + using Float32Workload<MultiplicationQueueDescriptor>::Float32Workload; + void Execute() const override; + +private: + mutable arm_compute::CLPixelWiseMultiplication m_PixelWiseMultiplication; +}; + +} //namespace armnn + + + diff --git a/src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.cpp new file mode 100644 index 0000000000..a163ec2883 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.cpp @@ -0,0 +1,49 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClNormalizationFloat32Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ClLayerSupport.hpp" +#include "backends/ArmComputeUtils.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + +namespace armnn +{ + +arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo& input, const TensorInfo& output, + const NormalizationDescriptor& descriptor) +{ + const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + arm_compute::NormalizationLayerInfo layerInfo = + armcomputetensorutils::BuildArmComputeNormalizationLayerInfo(descriptor); + + return arm_compute::CLNormalizationLayer::validate(&aclInputInfo, &aclOutputInfo, layerInfo); +} + +ClNormalizationFloat32Workload::ClNormalizationFloat32Workload(const NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<NormalizationQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("ClNormalizationFloat32Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + arm_compute::NormalizationLayerInfo normalizationInfo = + armcomputetensorutils::BuildArmComputeNormalizationLayerInfo(m_Data.m_Parameters); + + m_NormalizationLayer.configure(&input, &output, normalizationInfo); +}; + +void ClNormalizationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClNormalizationFloat32Workload_Execute"); + m_NormalizationLayer.run(); +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.hpp new file mode 100644 index 0000000000..cbd5fa92a9 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClNormalizationFloat32Workload.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +arm_compute::Status ClNormalizationWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor); + +class ClNormalizationFloat32Workload : public Float32Workload<NormalizationQueueDescriptor> +{ +public: + ClNormalizationFloat32Workload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + mutable arm_compute::CLNormalizationLayer m_NormalizationLayer; +}; + +} //namespace armnn + diff --git a/src/armnn/backends/ClWorkloads/ClPermuteWorkload.cpp b/src/armnn/backends/ClWorkloads/ClPermuteWorkload.cpp new file mode 100644 index 0000000000..3147e95b2e --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClPermuteWorkload.cpp @@ -0,0 +1,54 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClPermuteWorkload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + +#include <arm_compute/core/Error.h> + +namespace armnn +{ + +arm_compute::Status ClPermuteWorkloadValidate(const PermuteDescriptor& descriptor) +{ + const armnn::PermutationVector& perm = descriptor.m_DimMappings; + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(!perm.IsEqual({ 0U, 3U, 1U, 2U }) + && !perm.IsEqual({ 0U, 2U, 3U, 1U }) + && !perm.IsEqual({ 3U, 2U, 0U, 1U }), + "Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported"); + + return arm_compute::Status{}; +} + +template <armnn::DataType DataType> +ClPermuteWorkload<DataType>::ClPermuteWorkload(const PermuteQueueDescriptor& descriptor, + const WorkloadInfo& info) + : TypedWorkload<PermuteQueueDescriptor, DataType>(descriptor, info) +{ + using armcomputetensorutils::BuildArmComputePermutationVector; + + m_Data.ValidateInputsOutputs(GetName(), 1, 1); + + const arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings; + + // Run the layer + m_PermuteFunction.configure(&input, &output, BuildArmComputePermutationVector(mappings)); +} + +template <armnn::DataType DataType> +void ClPermuteWorkload<DataType>::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, GetName() + "_Execute"); + m_PermuteFunction.run(); +} + +template class ClPermuteWorkload<DataType::Float32>; +template class ClPermuteWorkload<DataType::QuantisedAsymm8>; + +} // namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClPermuteWorkload.hpp b/src/armnn/backends/ClWorkloads/ClPermuteWorkload.hpp new file mode 100644 index 0000000000..430c59524e --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClPermuteWorkload.hpp @@ -0,0 +1,42 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +#include <armnn/TypesUtils.hpp> +#include <arm_compute/runtime/CL/functions/CLPermute.h> + +#include <string> + +namespace armnn +{ + +arm_compute::Status ClPermuteWorkloadValidate(const PermuteDescriptor& descriptor); + +template <armnn::DataType DataType> +class ClPermuteWorkload : public TypedWorkload<PermuteQueueDescriptor, DataType> +{ +public: + static const std::string& GetName() + { + static const std::string name = std::string("ClPermute") + GetDataTypeName(DataType) + "Workload"; + return name; + } + + ClPermuteWorkload(const PermuteQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + using TypedWorkload<PermuteQueueDescriptor, DataType>::m_Data; + mutable arm_compute::CLPermute m_PermuteFunction; +}; + +using ClPermuteFloat32Workload = ClPermuteWorkload<DataType::Float32>; +using ClPermuteUint8Workload = ClPermuteWorkload<DataType::QuantisedAsymm8>; + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.cpp b/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.cpp new file mode 100644 index 0000000000..dbdc06f174 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.cpp @@ -0,0 +1,47 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClPooling2dBaseWorkload.hpp" +#include "backends/ClLayerSupport.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/ArmComputeUtils.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + +namespace armnn +{ +using namespace armcomputetensorutils; + +arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor) +{ + const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); + + arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(descriptor); + + return arm_compute::CLPoolingLayer::validate(&aclInputInfo, &aclOutputInfo, layerInfo); +} + +template <armnn::DataType dataType> +ClPooling2dBaseWorkload<dataType>::ClPooling2dBaseWorkload( + const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info, const std::string& name) + : TypedWorkload<Pooling2dQueueDescriptor, dataType>(descriptor, info) +{ + m_Data.ValidateInputsOutputs(name, 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(m_Data.m_Parameters); + + // Run the layer + m_PoolingLayer.configure(&input, &output, layerInfo); +} + +template class ClPooling2dBaseWorkload<DataType::Float32>; +template class ClPooling2dBaseWorkload<DataType::QuantisedAsymm8>; + +} diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.hpp b/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.hpp new file mode 100644 index 0000000000..828f000505 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClPooling2dBaseWorkload.hpp @@ -0,0 +1,31 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +arm_compute::Status ClPooling2dWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor); + +// Base class template providing an implementation of the Pooling2d layer common to all data types +template <armnn::DataType dataType> +class ClPooling2dBaseWorkload : public TypedWorkload<Pooling2dQueueDescriptor, dataType> +{ +public: + using TypedWorkload<Pooling2dQueueDescriptor, dataType>::m_Data; + + ClPooling2dBaseWorkload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info, + const std::string& name); + +protected: + mutable arm_compute::CLPoolingLayer m_PoolingLayer; +}; + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.cpp new file mode 100644 index 0000000000..a7f5855b8a --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.cpp @@ -0,0 +1,24 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClPooling2dFloat32Workload.hpp" + +namespace armnn +{ + +ClPooling2dFloat32Workload::ClPooling2dFloat32Workload(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : ClPooling2dBaseWorkload<DataType::Float32>(descriptor, info, "ClPooling2dFloat32Workload") +{ +} + +void ClPooling2dFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClPooling2dFloat32Workload_Execute"); + m_PoolingLayer.run(); +} + +} //namespace armnn + diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.hpp new file mode 100644 index 0000000000..3456a2cff8 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClPooling2dFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" +#include "backends/ClWorkloads//ClPooling2dBaseWorkload.hpp" + +namespace armnn +{ +class ClPooling2dFloat32Workload : public ClPooling2dBaseWorkload<DataType::Float32> +{ +public: + ClPooling2dFloat32Workload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +}; + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.cpp new file mode 100644 index 0000000000..2d2109e252 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.cpp @@ -0,0 +1,25 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClPooling2dUint8Workload.hpp" + +namespace armnn +{ + +ClPooling2dUint8Workload::ClPooling2dUint8Workload(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : ClPooling2dBaseWorkload<DataType::QuantisedAsymm8>(descriptor, info, "ClPooling2dUint8Workload") +{ +} + +void ClPooling2dUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClPooling2dUint8Workload_Execute"); + m_PoolingLayer.run(); +} + +} //namespace armnn + + diff --git a/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.hpp new file mode 100644 index 0000000000..0875c7486c --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClPooling2dUint8Workload.hpp @@ -0,0 +1,24 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" +#include "backends/ClWorkloads/ClPooling2dBaseWorkload.hpp" + +namespace armnn +{ + +class ClPooling2dUint8Workload : public ClPooling2dBaseWorkload<DataType::QuantisedAsymm8> +{ +public: + ClPooling2dUint8Workload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +}; + +} //namespace armnn + + diff --git a/src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.cpp new file mode 100644 index 0000000000..7b4ad4415b --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.cpp @@ -0,0 +1,31 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClReshapeFloat32Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" + +namespace armnn +{ + +ClReshapeFloat32Workload::ClReshapeFloat32Workload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info) + : Float32Workload<ReshapeQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("ClReshapeFloat32Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_Layer.configure(&input, &output); +} + +void ClReshapeFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClReshapeFloat32Workload_Execute"); + m_Layer.run(); +} + +} //namespace armnn + diff --git a/src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.hpp new file mode 100644 index 0000000000..e344ee08ad --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClReshapeFloat32Workload.hpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +class ClReshapeFloat32Workload : public Float32Workload<ReshapeQueueDescriptor> +{ +public: + ClReshapeFloat32Workload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info); + + void Execute() const override; + +private: + mutable arm_compute::CLReshapeLayer m_Layer; +}; + +} //namespace armnn + + diff --git a/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.cpp new file mode 100644 index 0000000000..36cc1dec17 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.cpp @@ -0,0 +1,29 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClReshapeUint8Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" + +namespace armnn +{ +ClReshapeUint8Workload::ClReshapeUint8Workload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info) + : Uint8Workload<ReshapeQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("ClReshapeUint8Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + m_Layer.configure(&input, &output); +} + +void ClReshapeUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClReshapeUint8Workload_Execute"); + + m_Layer.run(); +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.hpp new file mode 100644 index 0000000000..9e4199098c --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClReshapeUint8Workload.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +// Reshape +class ClReshapeUint8Workload : public Uint8Workload<ReshapeQueueDescriptor> +{ +public: + ClReshapeUint8Workload( const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info); + + void Execute() const override; + +private: + mutable arm_compute::CLReshapeLayer m_Layer; +}; + +} //namespace armnn + + diff --git a/src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.cpp new file mode 100644 index 0000000000..d71011a2e3 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.cpp @@ -0,0 +1,36 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClResizeBilinearFloat32Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ClLayerSupport.hpp" +#include "backends/ArmComputeUtils.hpp" + +namespace armnn +{ + +ClResizeBilinearFloat32Workload::ClResizeBilinearFloat32Workload(const ResizeBilinearQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<ResizeBilinearQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("ClResizeBilinearFloat32Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_ResizeBilinearLayer.configure(&input, &output, arm_compute::InterpolationPolicy::BILINEAR, + arm_compute::BorderMode::REPLICATE, arm_compute::PixelValue(0.f), + arm_compute::SamplingPolicy::TOP_LEFT); +}; + +void ClResizeBilinearFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClResizeBilinearFloat32Workload_Execute"); + m_ResizeBilinearLayer.run(); +} + + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.hpp new file mode 100644 index 0000000000..5f70e71619 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClResizeBilinearFloat32Workload.hpp @@ -0,0 +1,23 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +class ClResizeBilinearFloat32Workload : public Float32Workload<ResizeBilinearQueueDescriptor> +{ +public: + ClResizeBilinearFloat32Workload(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + mutable arm_compute::CLScale m_ResizeBilinearLayer; +}; + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.cpp new file mode 100644 index 0000000000..257e76a4df --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.cpp @@ -0,0 +1,29 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClSoftmaxFloat32Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" + +namespace armnn +{ + +ClSoftmaxFloat32Workload::ClSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) + : Float32Workload<SoftmaxQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("ClSoftmaxFloat32Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta); +} + +void ClSoftmaxFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClSoftmaxFloat32Workload_Execute"); + m_SoftmaxLayer.run(); +} + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.hpp new file mode 100644 index 0000000000..a26bbe851d --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClSoftmaxFloat32Workload.hpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ + +class ClSoftmaxFloat32Workload : public Float32Workload<SoftmaxQueueDescriptor> +{ +public: + ClSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + mutable arm_compute::CLSoftmaxLayer m_SoftmaxLayer; +}; + +} //namespace armnn + + + diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.cpp new file mode 100644 index 0000000000..9e856fea94 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.cpp @@ -0,0 +1,39 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClSoftmaxUint8Workload.hpp" +#include "backends/ClTensorHandle.hpp" +#include "backends/CpuTensorHandle.hpp" + +namespace armnn +{ + +ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) + : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("ClSoftmaxUint8Workload", 1, 1); + + arm_compute::ICLTensor& input = static_cast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ICLTensor& output = static_cast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + const auto outputQuantization = output.info()->quantization_info(); + + if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0)) + { + throw InvalidArgumentException( + "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); + } + + m_SoftmaxLayer.configure(&input, &output, descriptor.m_Parameters.m_Beta); +} + +void ClSoftmaxUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClSoftmaxUint8Workload_Execute"); + + m_SoftmaxLayer.run(); +} + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.hpp new file mode 100644 index 0000000000..07ee6256d8 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClSoftmaxUint8Workload.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/ClWorkloadUtils.hpp" + +namespace armnn +{ +// Softmax +class ClSoftmaxUint8Workload : public Uint8Workload<SoftmaxQueueDescriptor> +{ +public: + ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info); + + void Execute() const override; +private: + + mutable arm_compute::CLSoftmaxLayer m_SoftmaxLayer; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.cpp b/src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.cpp new file mode 100644 index 0000000000..6221d56766 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.cpp @@ -0,0 +1,17 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClSplitterFloat32Workload.hpp" + +namespace armnn +{ + +void ClSplitterFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClSplitterFloat32Workload_Execute"); + ClBaseSplitterWorkload::Execute(); +} + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.hpp b/src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.hpp new file mode 100644 index 0000000000..cfc7eaa3c2 --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClSplitterFloat32Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "ClBaseSplitterWorkload.hpp" + +namespace armnn +{ + +class ClSplitterFloat32Workload : public ClBaseSplitterWorkload<DataType::Float32> +{ +public: + using ClBaseSplitterWorkload<DataType::Float32>::ClBaseSplitterWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.cpp b/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.cpp new file mode 100644 index 0000000000..3aa470894c --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.cpp @@ -0,0 +1,17 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ClSplitterUint8Workload.hpp" + +namespace armnn +{ + +void ClSplitterUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "ClSplitterUint8Workload_Execute"); + ClBaseSplitterWorkload::Execute(); +} + +} //namespace armnn diff --git a/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.hpp b/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.hpp new file mode 100644 index 0000000000..ed8b3cc69e --- /dev/null +++ b/src/armnn/backends/ClWorkloads/ClSplitterUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "ClBaseSplitterWorkload.hpp" + +namespace armnn +{ +class ClSplitterUint8Workload : public ClBaseSplitterWorkload<DataType::QuantisedAsymm8> +{ +public: + using ClBaseSplitterWorkload<DataType::QuantisedAsymm8>::ClBaseSplitterWorkload; + virtual void Execute() const override; +}; +} //namespace armnn + + + diff --git a/src/armnn/backends/CpuTensorHandle.cpp b/src/armnn/backends/CpuTensorHandle.cpp new file mode 100644 index 0000000000..dd8176c9ec --- /dev/null +++ b/src/armnn/backends/CpuTensorHandle.cpp @@ -0,0 +1,107 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "armnn/Exceptions.hpp" +#include "CpuTensorHandle.hpp" + +#include <cstring> + +namespace armnn +{ + +ConstCpuTensorHandle::ConstCpuTensorHandle(const TensorInfo& tensorInfo) +: m_TensorInfo(tensorInfo) +, m_Memory(nullptr) +{ +} + +template <> +const void* ConstCpuTensorHandle::GetConstTensor() const +{ + return m_Memory; +} + +CpuTensorHandle::CpuTensorHandle(const TensorInfo& tensorInfo) +: ConstCpuTensorHandle(tensorInfo) +, m_MutableMemory(nullptr) +{ +} + +template <> +void* CpuTensorHandle::GetTensor() const +{ + return m_MutableMemory; +} + +ScopedCpuTensorHandle::ScopedCpuTensorHandle(const TensorInfo& tensorInfo) +: CpuTensorHandle(tensorInfo) +{ +} + +ScopedCpuTensorHandle::ScopedCpuTensorHandle(const ConstTensor& tensor) +: ScopedCpuTensorHandle(tensor.GetInfo()) +{ + CopyFrom(tensor.GetMemoryArea(), tensor.GetNumBytes()); +} + +ScopedCpuTensorHandle::ScopedCpuTensorHandle(const ScopedCpuTensorHandle& other) +: CpuTensorHandle(other.GetTensorInfo()) +{ + CopyFrom(other); +} + +ScopedCpuTensorHandle& ScopedCpuTensorHandle::operator=(const ScopedCpuTensorHandle& other) +{ + ::operator delete(GetTensor<void>()); + SetMemory(nullptr); + CopyFrom(other); + return *this; +} + +ScopedCpuTensorHandle::~ScopedCpuTensorHandle() +{ + ::operator delete(GetTensor<void>()); +} + +void ScopedCpuTensorHandle::Allocate() +{ + if (GetTensor<void>() == nullptr) + { + SetMemory(::operator new(GetTensorInfo().GetNumBytes())); + } + else + { + throw InvalidArgumentException("CpuTensorHandle::Allocate Trying to allocate a CpuTensorHandle" + "that already has allocated memory."); + } +} + +void ScopedCpuTensorHandle::CopyFrom(const ScopedCpuTensorHandle& other) +{ + CopyFrom(other.GetTensor<void>(), other.GetTensorInfo().GetNumBytes()); +} + +void ScopedCpuTensorHandle::CopyFrom(const void* srcMemory, unsigned int numBytes) +{ + BOOST_ASSERT(GetTensor<void>() == nullptr); + BOOST_ASSERT(GetTensorInfo().GetNumBytes() == numBytes); + + if (srcMemory) + { + Allocate(); + memcpy(GetTensor<void>(), srcMemory, numBytes); + } +} + +void PassthroughCpuTensorHandle::Allocate() +{ + throw InvalidArgumentException("PassthroughCpuTensorHandle::Allocate() should never be called"); +} + +void ConstPassthroughCpuTensorHandle::Allocate() +{ + throw InvalidArgumentException("ConstPassthroughCpuTensorHandle::Allocate() should never be called"); +} + +} // namespace armnn diff --git a/src/armnn/backends/CpuTensorHandle.hpp b/src/armnn/backends/CpuTensorHandle.hpp new file mode 100644 index 0000000000..4bf4439083 --- /dev/null +++ b/src/armnn/backends/CpuTensorHandle.hpp @@ -0,0 +1,142 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once +#include "CpuTensorHandleFwd.hpp" + +#include "armnn/TypesUtils.hpp" + +#include "OutputHandler.hpp" + +namespace armnn +{ + +// Abstract tensor handle wrapping a CPU-readable region of memory, interpreting it as tensor data. +class ConstCpuTensorHandle : public ITensorHandle +{ +public: + template <typename T> + const T* GetConstTensor() const + { + BOOST_ASSERT(GetTensorInfo().GetDataType() == GetDataType<T>()); + return reinterpret_cast<const T*>(m_Memory); + } + + const TensorInfo& GetTensorInfo() const + { + return m_TensorInfo; + } + + virtual ITensorHandle::Type GetType() const override + { + return ITensorHandle::Cpu; + } + +protected: + ConstCpuTensorHandle(const TensorInfo& tensorInfo); + + void SetConstMemory(const void* mem) { m_Memory = mem; } + +private: + ConstCpuTensorHandle(const ConstCpuTensorHandle& other) = delete; + ConstCpuTensorHandle& operator=(const ConstCpuTensorHandle& other) = delete; + + TensorInfo m_TensorInfo; + const void* m_Memory; +}; + +// Abstract specialization of ConstCpuTensorHandle that allows write access to the same data +class CpuTensorHandle : public ConstCpuTensorHandle +{ +public: + template <typename T> + T* GetTensor() const + { + BOOST_ASSERT(GetTensorInfo().GetDataType() == GetDataType<T>()); + return reinterpret_cast<T*>(m_MutableMemory); + } + +protected: + CpuTensorHandle(const TensorInfo& tensorInfo); + + void SetMemory(void* mem) + { + m_MutableMemory = mem; + SetConstMemory(m_MutableMemory); + } + +private: + + CpuTensorHandle(const CpuTensorHandle& other) = delete; + CpuTensorHandle& operator=(const CpuTensorHandle& other) = delete; + void* m_MutableMemory; +}; + +// A CpuTensorHandle that owns the wrapped memory region. +class ScopedCpuTensorHandle : public CpuTensorHandle +{ +public: + explicit ScopedCpuTensorHandle(const TensorInfo& tensorInfo); + + // Copies contents from Tensor + explicit ScopedCpuTensorHandle(const ConstTensor& tensor); + + ScopedCpuTensorHandle(const ScopedCpuTensorHandle& other); + ScopedCpuTensorHandle& operator=(const ScopedCpuTensorHandle& other); + ~ScopedCpuTensorHandle(); + + virtual void Allocate() override; + +private: + void CopyFrom(const ScopedCpuTensorHandle& other); + void CopyFrom(const void* srcMemory, unsigned int numBytes); +}; + +// A CpuTensorHandle that wraps an already allocated memory region. +// +// Clients must make sure the passed in memory region stays alive for the lifetime of +// the PassthroughCpuTensorHandle instance. +// +// Note there is no polymorphism to/from ConstPassthroughCpuTensorHandle +class PassthroughCpuTensorHandle : public CpuTensorHandle +{ +public: + PassthroughCpuTensorHandle(const TensorInfo& tensorInfo, void* mem) + : CpuTensorHandle(tensorInfo) + { + SetMemory(mem); + } + + virtual void Allocate() override; +}; + +// A ConstCpuTensorHandle that wraps an already allocated memory region. +// +// This allows users to pass in const memory to a network. +// Clients must make sure the passed in memory region stays alive for the lifetime of +// the PassthroughCpuTensorHandle instance. +// +// Note there is no polymorphism to/from PassthroughCpuTensorHandle +class ConstPassthroughCpuTensorHandle : public ConstCpuTensorHandle +{ +public: + ConstPassthroughCpuTensorHandle(const TensorInfo& tensorInfo, const void* mem) + : ConstCpuTensorHandle(tensorInfo) + { + SetConstMemory(mem); + } + + virtual void Allocate() override; +}; + + +// template specializations + +template <> +const void* ConstCpuTensorHandle::GetConstTensor() const; + +template <> +void* CpuTensorHandle::GetTensor() const; + +} // namespace armnn diff --git a/src/armnn/backends/CpuTensorHandleFwd.hpp b/src/armnn/backends/CpuTensorHandleFwd.hpp new file mode 100644 index 0000000000..93e9a7948c --- /dev/null +++ b/src/armnn/backends/CpuTensorHandleFwd.hpp @@ -0,0 +1,16 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +namespace armnn +{ + +class ConstCpuTensorHandle; +class CpuTensorHandle; +class ScopedCpuTensorHandle; +class PassthroughCpuTensorHandle; +class ConstPassthroughCpuTensorHandle; + +} // namespace armnn diff --git a/src/armnn/backends/ITensorHandle.hpp b/src/armnn/backends/ITensorHandle.hpp new file mode 100644 index 0000000000..b95dcc65e0 --- /dev/null +++ b/src/armnn/backends/ITensorHandle.hpp @@ -0,0 +1,25 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +namespace armnn +{ + +class ITensorHandle +{ +public: + enum Type + { + Cpu, + CL, + Neon + }; + + virtual ~ITensorHandle(){} + virtual void Allocate() = 0; + virtual ITensorHandle::Type GetType() const = 0; +}; + +} diff --git a/src/armnn/backends/MakeWorkloadHelper.hpp b/src/armnn/backends/MakeWorkloadHelper.hpp new file mode 100644 index 0000000000..a8729eb07c --- /dev/null +++ b/src/armnn/backends/MakeWorkloadHelper.hpp @@ -0,0 +1,59 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +namespace armnn +{ +namespace +{ + +// Make a workload of the specified WorkloadType +template<typename WorkloadType> +struct MakeWorkloadForType +{ + template<typename QueueDescriptorType> + static std::unique_ptr<WorkloadType> Func(const QueueDescriptorType& descriptor, const WorkloadInfo& info) + { + return std::make_unique<WorkloadType>(descriptor, info); + } +}; + +// Specialization for void workload type used for unsupported workloads. +template<> +struct MakeWorkloadForType<NullWorkload> +{ + template<typename QueueDescriptorType> + static std::unique_ptr<NullWorkload> Func(const QueueDescriptorType& descriptor, const WorkloadInfo& info) + { + return nullptr; + } +}; + +// Makes a workload for one the specified types based on the data type requirements of the tensorinfo. +// Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos. +template <typename Float32Workload, typename Uint8Workload, typename QueueDescriptorType> +std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info) +{ + const DataType dataType = !info.m_InputTensorInfos.empty() ? + info.m_InputTensorInfos[0].GetDataType() + : info.m_OutputTensorInfos[0].GetDataType(); + + BOOST_ASSERT(info.m_InputTensorInfos.empty() || info.m_OutputTensorInfos.empty() + || info.m_InputTensorInfos[0].GetDataType() == info.m_OutputTensorInfos[0].GetDataType()); + + switch (dataType) + { + case DataType::Float32: + return MakeWorkloadForType<Float32Workload>::Func(descriptor, info); + case DataType::QuantisedAsymm8: + return MakeWorkloadForType<Uint8Workload>::Func(descriptor, info); + default: + BOOST_ASSERT_MSG(false, "Unknown DataType."); + return nullptr; + } +} + +} //namespace +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/MemCopyWorkload.cpp b/src/armnn/backends/MemCopyWorkload.cpp new file mode 100644 index 0000000000..09ffd9a08a --- /dev/null +++ b/src/armnn/backends/MemCopyWorkload.cpp @@ -0,0 +1,256 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "MemCopyWorkload.hpp" +#include "backends/CpuTensorHandle.hpp" + +#if ARMCOMPUTECL_ENABLED +#include "backends/ClTensorHandle.hpp" +#endif + +#if ARMCOMPUTENEON_ENABLED +#include "backends/NeonTensorHandle.hpp" +#endif + +#include <cstring> +#include <boost/cast.hpp> + +namespace armnn +{ + +namespace +{ + +template <typename SrcTensorHandleType, typename DstTensorHandleType> +void GatherTensorHandlePairs(const MemCopyQueueDescriptor& descriptor, + std::vector<std::pair<SrcTensorHandleType*, DstTensorHandleType*>>& tensorHandlePairs) +{ + const unsigned int numInputs = boost::numeric_cast<unsigned int>(descriptor.m_Inputs.size()); + tensorHandlePairs.reserve(numInputs); + + for (unsigned int i = 0; i < numInputs; ++i) + { + SrcTensorHandleType* const srcTensorHandle = boost::polymorphic_downcast<SrcTensorHandleType*>( + descriptor.m_Inputs[i]); + DstTensorHandleType* const dstTensorHandle = boost::polymorphic_downcast<DstTensorHandleType*>( + descriptor.m_Outputs[i]); + + tensorHandlePairs.emplace_back(srcTensorHandle, dstTensorHandle); + } +} + +void CopyFromCpuToCpu(const ConstCpuTensorHandle& srcHandle, CpuTensorHandle& dstHandle) +{ + const unsigned int numBytes = srcHandle.GetTensorInfo().GetNumBytes(); + const void* const input = srcHandle.GetConstTensor<void>(); + void* const output = dstHandle.GetTensor<void>(); + std::memcpy(output, input, numBytes); +} + +#if ARMCOMPUTECL_ENABLED || ARMCOMPUTENEON_ENABLED + +#include "backends/ArmComputeTensorUtils.hpp" + +template <armnn::DataType DataType> +void CopyFromCpuToAclBackend(const ConstCpuTensorHandle& srcHandle, arm_compute::ITensor& dstAclTensor) +{ + using T = ResolveType<DataType>; + armnn::armcomputetensorutils::CopyArmComputeITensorData(srcHandle.GetConstTensor<T>(), dstAclTensor); +} + +template <armnn::DataType DataType> +void CopyFromAclBackendToCpu(const arm_compute::ITensor& srcAclTensor, CpuTensorHandle& dstHandle) +{ + using T = ResolveType<DataType>; + armnn::armcomputetensorutils::CopyArmComputeITensorData(srcAclTensor, dstHandle.GetTensor<T>()); +} + +#endif // ARMCOMPUTECL_ENABLED || ARMCOMPUTENEON_ENABLED + +} + +template <armnn::DataType DataType> +CopyFromCpuToCpuWorkload<DataType>::CopyFromCpuToCpuWorkload(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) + : TypedWorkload<MemCopyQueueDescriptor, DataType>(descriptor, info) +{ + GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); +} + +template <armnn::DataType DataType> +void CopyFromCpuToCpuWorkload<DataType>::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "CopyFromCpuToCpuWorkload_Execute"); + + for (const auto& pair : m_TensorHandlePairs) + { + CopyFromCpuToCpu(*pair.first, *pair.second); + } +} + +template class CopyFromCpuToCpuWorkload<DataType::Float32>; +template class CopyFromCpuToCpuWorkload<DataType::QuantisedAsymm8>; + +#if ARMCOMPUTECL_ENABLED + +template <armnn::DataType DataType> +CopyFromCpuToClWorkload<DataType>::CopyFromCpuToClWorkload(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) + : TypedWorkload<MemCopyQueueDescriptor, DataType>(descriptor, info) +{ + GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); +} + +template <armnn::DataType DataType> +void CopyFromCpuToClWorkload<DataType>::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "CopyFromCpuToClWorkload_Execute"); + + for (const auto& pair : m_TensorHandlePairs) + { + IClTensorHandle& handle = *pair.second; + + handle.Map(true); + CopyFromCpuToAclBackend<DataType>(*pair.first, handle.GetTensor()); + handle.UnMap(); + } +} + +template class CopyFromCpuToClWorkload<DataType::Float32>; +template class CopyFromCpuToClWorkload<DataType::QuantisedAsymm8>; + + +template <armnn::DataType DataType> +CopyFromClToCpuWorkload<DataType>::CopyFromClToCpuWorkload(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) + : TypedWorkload<MemCopyQueueDescriptor, DataType>(descriptor, info) +{ + GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); +} + +template <armnn::DataType DataType> +void CopyFromClToCpuWorkload<DataType>::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "CopyFromClToCpuWorkload_Execute"); + + for (const auto& pair : m_TensorHandlePairs) + { + IClTensorHandle& handle = *pair.first; + + handle.Map(true); + CopyFromAclBackendToCpu<DataType>(handle.GetTensor(), *pair.second); + handle.UnMap(); + } +} + +template class CopyFromClToCpuWorkload<DataType::Float32>; +template class CopyFromClToCpuWorkload<DataType::QuantisedAsymm8>; + +#endif // ARMCOMPUTECL_ENABLED + +#if ARMCOMPUTENEON_ENABLED + +template <armnn::DataType DataType> +CopyFromCpuToNeonWorkload<DataType>::CopyFromCpuToNeonWorkload(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) + : TypedWorkload<MemCopyQueueDescriptor, DataType>(descriptor, info) +{ + GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); +} + +template <armnn::DataType DataType> +void CopyFromCpuToNeonWorkload<DataType>::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "CopyFromCpuToNeonWorkload_Execute"); + + for (const auto& pair : m_TensorHandlePairs) + { + CopyFromCpuToAclBackend<DataType>(*pair.first, pair.second->GetTensor()); + } +} + +template class CopyFromCpuToNeonWorkload<DataType::Float32>; +template class CopyFromCpuToNeonWorkload<DataType::QuantisedAsymm8>; + +template <armnn::DataType DataType> +CopyFromNeonToCpuWorkload<DataType>::CopyFromNeonToCpuWorkload(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) + : TypedWorkload<MemCopyQueueDescriptor, DataType>(descriptor, info) +{ + GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); +} + +template <armnn::DataType DataType> +void CopyFromNeonToCpuWorkload<DataType>::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "CopyFromNeonToCpuWorkload_Execute"); + + for (const auto& pair : m_TensorHandlePairs) + { + CopyFromAclBackendToCpu<DataType>(pair.first->GetTensor(), *pair.second); + } +} + +template class CopyFromNeonToCpuWorkload<DataType::Float32>; +template class CopyFromNeonToCpuWorkload<DataType::QuantisedAsymm8>; + +#endif // ARMCOMPUTENEON_ENABLED + +#if ARMCOMPUTECL_ENABLED && ARMCOMPUTENEON_ENABLED + +template <armnn::DataType DataType> +CopyFromNeonToClWorkload<DataType>::CopyFromNeonToClWorkload(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) + : TypedWorkload<MemCopyQueueDescriptor, DataType>(descriptor, info) +{ + GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); +} + +template <armnn::DataType DataType> +void CopyFromNeonToClWorkload<DataType>::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "CopyFromNeonToClWorkload_Execute"); + + for (const auto& pair : m_TensorHandlePairs) + { + IClTensorHandle& handle = *pair.second; + + handle.Map(true); + handle.GetTensor().copy_from(pair.first->GetTensor()); + handle.UnMap(); + } +} + +template class CopyFromNeonToClWorkload<DataType::Float32>; +template class CopyFromNeonToClWorkload<DataType::QuantisedAsymm8>; + +template <armnn::DataType DataType> +CopyFromClToNeonWorkload<DataType>::CopyFromClToNeonWorkload(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) + : TypedWorkload<MemCopyQueueDescriptor, DataType>(descriptor, info) +{ + GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); +} + +template <armnn::DataType DataType> +void CopyFromClToNeonWorkload<DataType>::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "CopyFromClToNeonWorkload_Execute"); + + for (const auto& pair : m_TensorHandlePairs) + { + IClTensorHandle& handle = *pair.first; + + handle.Map(true); + pair.second->GetTensor().copy_from(handle.GetTensor()); + handle.UnMap(); + } +} + +template class CopyFromClToNeonWorkload<DataType::Float32>; +template class CopyFromClToNeonWorkload<DataType::QuantisedAsymm8>; + +#endif // ARMCOMPUTECL_ENABLED && ARMCOMPUTENEON_ENABLED + +} diff --git a/src/armnn/backends/MemCopyWorkload.hpp b/src/armnn/backends/MemCopyWorkload.hpp new file mode 100644 index 0000000000..7fcaf138c3 --- /dev/null +++ b/src/armnn/backends/MemCopyWorkload.hpp @@ -0,0 +1,136 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "CpuTensorHandleFwd.hpp" +#include "backends/Workload.hpp" + +#include <utility> + +namespace armnn +{ + +template <armnn::DataType DataType> +class CopyFromCpuToCpuWorkload : public TypedWorkload<MemCopyQueueDescriptor, DataType> +{ +public: + CopyFromCpuToCpuWorkload(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + using TensorHandlePair = std::pair<const ConstCpuTensorHandle*, CpuTensorHandle*>; + std::vector<TensorHandlePair> m_TensorHandlePairs; +}; + +using CopyFromCpuToCpuFloat32Workload = CopyFromCpuToCpuWorkload<DataType::Float32>; +using CopyFromCpuToCpuUint8Workload = CopyFromCpuToCpuWorkload<DataType::QuantisedAsymm8>; + +#if ARMCOMPUTECL_ENABLED + +class IClTensorHandle; + +template <armnn::DataType DataType> +class CopyFromCpuToClWorkload : public TypedWorkload<MemCopyQueueDescriptor, DataType> +{ +public: + CopyFromCpuToClWorkload(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + using TensorHandlePair = std::pair<const ConstCpuTensorHandle*, IClTensorHandle*>; + std::vector<TensorHandlePair> m_TensorHandlePairs; +}; + +using CopyFromCpuToClFloat32Workload = CopyFromCpuToClWorkload<DataType::Float32>; +using CopyFromCpuToClUint8Workload = CopyFromCpuToClWorkload<DataType::QuantisedAsymm8>; + +template <armnn::DataType DataType> +class CopyFromClToCpuWorkload : public TypedWorkload<MemCopyQueueDescriptor, DataType> +{ +public: + CopyFromClToCpuWorkload(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + using TensorHandlePair = std::pair<IClTensorHandle*, CpuTensorHandle*>; + std::vector<TensorHandlePair> m_TensorHandlePairs; +}; + +using CopyFromClToCpuFloat32Workload = CopyFromClToCpuWorkload<DataType::Float32>; +using CopyFromClToCpuUint8Workload = CopyFromClToCpuWorkload<DataType::QuantisedAsymm8>; + +#endif // ARMCOMPUTECL_ENABLED + +#if ARMCOMPUTENEON_ENABLED + +class INeonTensorHandle; + +template <armnn::DataType DataType> +class CopyFromCpuToNeonWorkload : public TypedWorkload<MemCopyQueueDescriptor, DataType> +{ +public: + CopyFromCpuToNeonWorkload(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +protected: + using TensorHandlePair = std::pair<const ConstCpuTensorHandle*, INeonTensorHandle*>; + std::vector<TensorHandlePair> m_TensorHandlePairs; +}; + +using CopyFromCpuToNeonFloat32Workload = CopyFromCpuToNeonWorkload<DataType::Float32>; +using CopyFromCpuToNeonUint8Workload = CopyFromCpuToNeonWorkload<DataType::QuantisedAsymm8>; + +template <armnn::DataType DataType> +class CopyFromNeonToCpuWorkload : public TypedWorkload<MemCopyQueueDescriptor, DataType> +{ +public: + CopyFromNeonToCpuWorkload(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +protected: + using TensorHandlePair = std::pair<const INeonTensorHandle*, CpuTensorHandle*>; + std::vector<TensorHandlePair> m_TensorHandlePairs; +}; + +using CopyFromNeonToCpuFloat32Workload = CopyFromNeonToCpuWorkload<DataType::Float32>; +using CopyFromNeonToCpuUint8Workload = CopyFromNeonToCpuWorkload<DataType::QuantisedAsymm8>; + +#endif + +#if ARMCOMPUTECL_ENABLED && ARMCOMPUTENEON_ENABLED + +template <armnn::DataType DataType> +class CopyFromNeonToClWorkload : public TypedWorkload<MemCopyQueueDescriptor, DataType> +{ +public: + CopyFromNeonToClWorkload(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + using TensorHandlePair = std::pair<const INeonTensorHandle*, IClTensorHandle*>; + std::vector<TensorHandlePair> m_TensorHandlePairs; +}; + +using CopyFromNeonToClFloat32Workload = CopyFromNeonToClWorkload<DataType::Float32>; +using CopyFromNeonToClUint8Workload = CopyFromNeonToClWorkload<DataType::QuantisedAsymm8>; + +template <armnn::DataType DataType> +class CopyFromClToNeonWorkload : public TypedWorkload<MemCopyQueueDescriptor, DataType> +{ +public: + CopyFromClToNeonWorkload(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + using TensorHandlePair = std::pair<IClTensorHandle*, INeonTensorHandle*>; + std::vector<TensorHandlePair> m_TensorHandlePairs; +}; + +using CopyFromClToNeonFloat32Workload = CopyFromClToNeonWorkload<DataType::Float32>; +using CopyFromClToNeonUint8Workload = CopyFromClToNeonWorkload<DataType::QuantisedAsymm8>; + +#endif + +} diff --git a/src/armnn/backends/NeonLayerSupport.cpp b/src/armnn/backends/NeonLayerSupport.cpp new file mode 100644 index 0000000000..382b15e277 --- /dev/null +++ b/src/armnn/backends/NeonLayerSupport.cpp @@ -0,0 +1,398 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonLayerSupport.hpp" + +#include "LayerSupportCommon.hpp" +#include "InternalTypes.hpp" + +#include <armnn/Descriptors.hpp> +#include <armnn/Types.hpp> +#include <armnn/Tensor.hpp> + +#include <boost/core/ignore_unused.hpp> + +#ifdef ARMCOMPUTENEON_ENABLED +#include "NeonWorkloads/NeonPooling2dBaseWorkload.hpp" +#include "NeonWorkloads/NeonPermuteWorkload.hpp" +#endif + +using namespace boost; + +namespace armnn +{ +bool IsNeonActivationUint8Supported(std::string* reasonIfUnsupported, const ActivationDescriptor& parameters) +{ + if (parameters.m_Function != ActivationFunction::BoundedReLu) + { + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "Unsupported activation function, only BoundedReLu is supported)"; + } + + return false; + } + + return true; +} + +bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc) +{ + // See arm_compute::NEDirectConvolutionLayer documentation for the supported cases, + // and complement with NEDirectConvolutionLayerKernel::configure() implementation + + // Only 1x1 is using direct convolution. Performance results and details are in: + // https://jira.arm.com/browse/IVGCVSW-1003 + // Measurements were taken as of clframework: f105ab972135bcd21304883eff040d7e587099bc + + const bool dataTypeSupported = (weightInfo.GetDataType() == armnn::DataType::Float32); + + // Strides: 1|2|3 + const bool strideSupported = (desc.m_StrideX == 1 || desc.m_StrideX == 2 || desc.m_StrideX == 3) && + (desc.m_StrideY == 1 || desc.m_StrideY == 2 || desc.m_StrideY == 3); + + auto paddingLargerThan = [](const Convolution2dDescriptor& desc, unsigned int value) + { + return desc.m_PadLeft > value || desc.m_PadRight > value || desc.m_PadTop > value || desc.m_PadBottom > value; + }; + + // Supported sizes and padding + const bool sizeAndPaddingSupported = + // Pad > 0 not supported for 1x1 weights + (weightInfo.GetShape()[2] == 1 && weightInfo.GetShape()[3] == 1 && !paddingLargerThan(desc, 0u)); + + const bool preferDirectConvolution = dataTypeSupported && + strideSupported && + sizeAndPaddingSupported && + // NEDirectConvolutionLayerKernel doesn't support NULL bias + desc.m_BiasEnabled; + return preferDirectConvolution; +} + +bool IsNeonNormalizationDescParamsSupported(std::string* reasonIfUnsupported, const NormalizationDescriptor& parameters) +{ + if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness) + { + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "Unsupported normalisation method type, only LocalBrightness is supported"; + } + return false; + } + if (parameters.m_NormSize % 2 == 0) + { + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "Normalization size must be an odd number."; + } + return false; + } + + return true; +} + +bool IsNeonBackendSupported(std::string* reasonIfUnsupported) +{ +#if ARMCOMPUTENEON_ENABLED + return true; +#else + if (reasonIfUnsupported != nullptr) + { + *reasonIfUnsupported = "The armnn library has been built without NEON support"; + } + return false; +#endif +} + +template<typename Float32Func, typename Uint8Func, typename ... Params> +bool IsSupportedForDataTypeNeon(std::string* reasonIfUnsupported, + DataType dataType, + Float32Func floatFuncPtr, + Uint8Func uint8FuncPtr, + Params&&... params) +{ + return IsNeonBackendSupported(reasonIfUnsupported) && + IsSupportedForDataTypeGeneric(reasonIfUnsupported, + dataType, + floatFuncPtr, + uint8FuncPtr, + std::forward<Params>(params)...); +} + +#if ARMCOMPUTENEON_ENABLED +template<class FuncType, class... Args> +inline bool IsWorkloadSupported(FuncType& func, std::string* reasonIfUnsupported, Args&&... args) +{ + arm_compute::Status aclStatus = func(std::forward<Args>(args)...); + const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); + if (!supported && reasonIfUnsupported) + { + *reasonIfUnsupported = aclStatus.error_description(); + } + return supported; +} + +#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ + return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__); +#else +#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ + return IsNeonBackendSupported(reasonIfUnsupported); +#endif + +bool IsActivationSupportedNeon(const TensorInfo& input, + const ActivationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<const ActivationDescriptor&>, + &IsNeonActivationUint8Supported, + descriptor); +} + +bool IsNeonDepthwiseConvolution2dDescParamsSupported(std::string* reasonIfUnsupported, + const DepthwiseConvolution2dDescriptor& parameters, + const TensorInfo& weights) +{ + ignore_unused(weights); + + if (parameters.m_StrideX < 1 || parameters.m_StrideX > 3) + { + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "m_StrideX can only be 1, 2 or 3"; + } + return false; + } + + // weights.GetShape()[0] = channel multiplier + if (weights.GetShape()[0] != 1) + { + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "Channel multiplier only supports the value 1 in the NEON backend"; + } + return false; + } + + if (parameters.m_PadLeft != parameters.m_PadRight || parameters.m_PadTop != parameters.m_PadBottom) + { + if (reasonIfUnsupported) + { + *reasonIfUnsupported = "Asymmetric padding for depthwise convolution currently not supported " + "in Neon backend"; + } + return false; + } + + return true; +} + +bool IsAdditionSupportedNeon(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + ignore_unused(input1); + ignore_unused(output); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input0.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsBatchNormalizationSupportedNeon(const TensorInfo& input, + const BatchNormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsConstantSupportedNeon(const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsConvolution2dSupportedNeon(const TensorInfo& input, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &IsNeonDepthwiseConvolution2dDescParamsSupported, + &IsNeonDepthwiseConvolution2dDescParamsSupported, + descriptor, + weights); +} + +bool IsFullyConnectedSupportedNeon(const TensorInfo& input, + const FullyConnectedDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsInputSupportedNeon(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsL2NormalizationSupportedNeon(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFunc<>); +} + +bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs, + const OriginsDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + inputs[0]->GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsMultiplicationSupportedNeon(const TensorInfo& input0, + const TensorInfo& input1, + std::string* reasonIfUnsupported) +{ + ignore_unused(input1); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input0.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsNormalizationSupportedNeon(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &IsNeonNormalizationDescParamsSupported, + &FalseFuncU8<const NormalizationDescriptor&>, + descriptor); +} + +bool IsOutputSupportedNeon(const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsPermuteSupportedNeon(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor); +} + +bool IsPooling2dSupportedNeon(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor); +} + +bool IsResizeBilinearSupportedNeon(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + ignore_unused(input); + return false; +} + +bool IsSoftmaxSupportedNeon(const TensorInfo& input, + const SoftmaxDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsSplitterSupportedNeon(const TensorInfo& input, + const ViewsDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsFakeQuantizationSupportedNeon(const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(input); + ignore_unused(descriptor); + return false; +} + +bool IsReshapeSupportedNeon(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsFloorSupportedNeon(const TensorInfo& input, + const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + ignore_unused(output); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +} diff --git a/src/armnn/backends/NeonLayerSupport.hpp b/src/armnn/backends/NeonLayerSupport.hpp new file mode 100644 index 0000000000..b2ac49ae0d --- /dev/null +++ b/src/armnn/backends/NeonLayerSupport.hpp @@ -0,0 +1,109 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/DescriptorsFwd.hpp> +#include <armnn/Types.hpp> +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +bool IsNeonActivationUint8Supported(std::string* reasonIfUnsupported, const ActivationDescriptor& parameters); + +bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc); + +bool IsNeonNormalizationDescParamsSupported(std::string* reasonIfUnsupported, + const NormalizationDescriptor& parameters); + +bool IsActivationSupportedNeon(const TensorInfo& input, + const ActivationDescriptor& descriptor, + std::string* reasonIfUnsupported); + +bool IsNeonDepthwiseConvolution2dDescParamsSupported(std::string* reasonIfUnsupported, + const DepthwiseConvolution2dDescriptor& parameters, + const TensorInfo& weights); + +bool IsAdditionSupportedNeon(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported); + +bool IsBatchNormalizationSupportedNeon(const TensorInfo& input, + const BatchNormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsConstantSupportedNeon(const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + +bool IsConvolution2dSupportedNeon(const TensorInfo& input, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported = nullptr); + +bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported = nullptr); + +bool IsFullyConnectedSupportedNeon(const TensorInfo& input, + const FullyConnectedDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsInputSupportedNeon(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsL2NormalizationSupportedNeon(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs, + const OriginsDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsMultiplicationSupportedNeon(const TensorInfo& input0, + const TensorInfo& input1, + std::string* reasonIfUnsupported = nullptr); + +bool IsNormalizationSupportedNeon(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsOutputSupportedNeon(const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + +bool IsPermuteSupportedNeon(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsPooling2dSupportedNeon(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsResizeBilinearSupportedNeon(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsSoftmaxSupportedNeon(const TensorInfo& input, + const SoftmaxDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsSplitterSupportedNeon(const TensorInfo& input, + const ViewsDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsFakeQuantizationSupportedNeon(const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsReshapeSupportedNeon(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsFloorSupportedNeon(const TensorInfo& input, + const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + +} diff --git a/src/armnn/backends/NeonTensorHandle.hpp b/src/armnn/backends/NeonTensorHandle.hpp new file mode 100644 index 0000000000..684a5e1bfc --- /dev/null +++ b/src/armnn/backends/NeonTensorHandle.hpp @@ -0,0 +1,80 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "OutputHandler.hpp" +#include "ArmComputeTensorUtils.hpp" + +#include <arm_compute/runtime/Tensor.h> +#include <arm_compute/runtime/SubTensor.h> +#include <arm_compute/core/TensorShape.h> +#include <arm_compute/core/Coordinates.h> + + +namespace armnn +{ + +class INeonTensorHandle : public ITensorHandle +{ +public: + virtual arm_compute::ITensor& GetTensor() = 0; + virtual arm_compute::ITensor const& GetTensor() const = 0; + virtual arm_compute::DataType GetDataType() const = 0; +}; + +class NeonTensorHandle : public INeonTensorHandle +{ +public: + NeonTensorHandle(const TensorInfo& tensorInfo) + { + armnn::armcomputetensorutils::BuildArmComputeTensor(m_Tensor, tensorInfo); + } + + arm_compute::ITensor& GetTensor() override { return m_Tensor; } + arm_compute::ITensor const& GetTensor() const override { return m_Tensor; } + virtual void Allocate() override + { + armnn::armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_Tensor); + }; + + virtual ITensorHandle::Type GetType() const override { return ITensorHandle::Neon; } + + virtual arm_compute::DataType GetDataType() const override + { + return m_Tensor.info()->data_type(); + } + +private: + arm_compute::Tensor m_Tensor; +}; + +class NeonSubTensorHandle : public INeonTensorHandle +{ +public: + NeonSubTensorHandle(arm_compute::ITensor& parent, + const arm_compute::TensorShape& shape, + const arm_compute::Coordinates& coords) + : m_Tensor(&parent, shape, coords) + { + } + + arm_compute::ITensor& GetTensor() override { return m_Tensor; } + arm_compute::ITensor const& GetTensor() const override { return m_Tensor; } + virtual void Allocate() override + { + }; + + virtual ITensorHandle::Type GetType() const override { return ITensorHandle::Neon; } + + virtual arm_compute::DataType GetDataType() const override + { + return m_Tensor.info()->data_type(); + } + +private: + arm_compute::SubTensor m_Tensor; +}; + +} diff --git a/src/armnn/backends/NeonWorkloadFactory.cpp b/src/armnn/backends/NeonWorkloadFactory.cpp new file mode 100644 index 0000000000..384284114f --- /dev/null +++ b/src/armnn/backends/NeonWorkloadFactory.cpp @@ -0,0 +1,360 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "NeonWorkloadFactory.hpp" +#include "armnn/Utils.hpp" +#include "CpuTensorHandle.hpp" +#include "Layer.hpp" +#include "Layers.hpp" + +#ifdef ARMCOMPUTENEON_ENABLED +#include "MemCopyWorkload.hpp" +#include "NeonTensorHandle.hpp" +#include "NeonWorkloadUtils.hpp" +#include "NeonWorkloads.hpp" +#endif + +#include "MakeWorkloadHelper.hpp" + +#include <boost/polymorphic_cast.hpp> + +namespace armnn +{ + +bool NeonWorkloadFactory::IsLayerSupported(const Layer& layer, DataType dataType, std::string& outReasonIfUnsupported) +{ + return IWorkloadFactory::IsLayerSupported(Compute::CpuAcc, layer, dataType, outReasonIfUnsupported); +} + +#ifdef ARMCOMPUTENEON_ENABLED + +std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent, + TensorShape const& subTensorShape, + unsigned int const* subTensorOrigin) const +{ + BOOST_ASSERT(parent.GetType() == ITensorHandle::Neon); + + const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape); + + arm_compute::Coordinates coords; + coords.set_num_dimensions(subTensorShape.GetNumDimensions()); + for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++) + { + // arm compute indexes tensor coords in reverse order + unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1; + coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex])); + } + + return std::make_unique<NeonSubTensorHandle>(boost::polymorphic_downcast<INeonTensorHandle*>(&parent)->GetTensor(), + shape, coords); +} + +std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const +{ + return std::make_unique<NeonTensorHandle>(tensorInfo); +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<CopyFromCpuToNeonFloat32Workload, CopyFromCpuToNeonUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<CopyFromNeonToCpuFloat32Workload, CopyFromNeonToCpuUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NeonActivationFloat32Workload, NeonActivationUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NeonSoftmaxFloat32Workload, NeonSoftmaxUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NeonSplitterFloat32Workload, NeonSplitterUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NeonMergerFloat32Workload, NeonMergerUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateFullyConnected( + const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<NeonFullyConnectedFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NeonPermuteFloat32Workload, NeonPermuteUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NeonPooling2dFloat32Workload, NeonPooling2dUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConvolution2d( + const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<NeonConvolution2dFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthwiseConvolution2d( + const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<NeonDepthwiseConvolutionFloat32Workload, NeonDepthwiseConvolutionUint8Workload>( + descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateNormalization( + const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<NeonNormalizationFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NeonAdditionFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMultiplication( + const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<NeonMultiplicationFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization( + const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<NeonBatchNormalizationFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0]) + { + throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload"); + } + + // Create a workload that will copy tensor data from the inputs, which can have a number of different formats, + // to Neon tensors. + switch (descriptor.m_Inputs[0]->GetType()) + { + case ITensorHandle::Cpu: + return MakeWorkload<CopyFromCpuToNeonFloat32Workload, CopyFromCpuToNeonUint8Workload>(descriptor, info); +#if ARMCOMPUTECL_ENABLED + case ITensorHandle::CL: + { + return MakeWorkload<CopyFromClToNeonFloat32Workload, CopyFromClToNeonUint8Workload>(descriptor, info); + } +#endif + default: + throw InvalidArgumentException("NeonWorkloadFactory: Destination type not supported for MemCopy Workload."); + } +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear( + const ResizeBilinearQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFakeQuantization( + const FakeQuantizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NeonL2NormalizationFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NeonConstantFloat32Workload, NeonConstantUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NeonReshapeFloat32Workload, NeonReshapeUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<NeonFloorFloat32Workload, NullWorkload>(descriptor, info); +} + +#else // Compiled without ArmCompute libs + +std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent, + TensorShape const& subTensorShape, + unsigned int const* subTensorOrigin) const +{ + return nullptr; +} + +std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthwiseConvolution2d( + const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateBatchNormalization(const BatchNormalizationQueueDescriptor& data, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& data, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFakeQuantization( + const FakeQuantizationQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return nullptr; +} + +#endif + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloadFactory.hpp b/src/armnn/backends/NeonWorkloadFactory.hpp new file mode 100644 index 0000000000..0e39cfe8b1 --- /dev/null +++ b/src/armnn/backends/NeonWorkloadFactory.hpp @@ -0,0 +1,100 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "WorkloadFactory.hpp" +#include "OutputHandler.hpp" + +#include <boost/core/ignore_unused.hpp> + +namespace armnn +{ + +// Neon workload factory +class NeonWorkloadFactory : public IWorkloadFactory +{ +public: + virtual ~NeonWorkloadFactory() { }; + + virtual Compute GetCompute() const override { return Compute::CpuAcc; } + + static bool IsLayerSupported(const Layer& layer, DataType dataType, std::string& outReasonIfUnsupported); + + virtual bool SupportsSubTensors() const override { return true; } + + virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent, + TensorShape const& subTensorShape, + unsigned int const* subTensorOrigin) const override; + + virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override; + + virtual std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d( + const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloadUtils.cpp b/src/armnn/backends/NeonWorkloadUtils.cpp new file mode 100644 index 0000000000..0a108a8d38 --- /dev/null +++ b/src/armnn/backends/NeonWorkloadUtils.cpp @@ -0,0 +1,43 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "NeonWorkloadUtils.hpp" +#include "backends/ArmComputeTensorUtils.hpp" +#include "backends/ArmComputeUtils.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/NeonTensorHandle.hpp" + +#include "armnn/Utils.hpp" +#include "armnn/Exceptions.hpp" + +#include "Layers.hpp" + +#include <cstring> +#include <boost/assert.hpp> +#include <boost/cast.hpp> +#include <boost/format.hpp> + +#include "Profiling.hpp" + +#include "NeonLayerSupport.hpp" +#include "../../../include/armnn/Types.hpp" + +using namespace armnn::armcomputetensorutils; + +namespace armnn +{ + +// Allocate a tensor and copy the contents in data to the tensor contents +template<typename T> +void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const T* data) +{ + InitialiseArmComputeTensorEmpty(tensor); + CopyArmComputeITensorData(data, tensor); +} + +template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const float* data); +template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const uint8_t* data); +template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const int32_t* data); + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloadUtils.hpp b/src/armnn/backends/NeonWorkloadUtils.hpp new file mode 100644 index 0000000000..ec7688237a --- /dev/null +++ b/src/armnn/backends/NeonWorkloadUtils.hpp @@ -0,0 +1,25 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Workload.hpp" + +#include "backends/NeonTensorHandle.hpp" + +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include <arm_compute/runtime/SubTensor.h> + +#include <boost/cast.hpp> + +namespace armnn +{ +class Layer; + +template<typename T> +void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const T* data); + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads.hpp b/src/armnn/backends/NeonWorkloads.hpp new file mode 100644 index 0000000000..7e9e885adc --- /dev/null +++ b/src/armnn/backends/NeonWorkloads.hpp @@ -0,0 +1,35 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once +#include "backends/NeonWorkloads/NeonActivationFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonActivationUint8Workload.hpp" +#include "backends/NeonWorkloads/NeonAdditionFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonBaseConstantWorkload.hpp" +#include "backends/NeonWorkloads/NeonBaseMergerWorkload.hpp" +#include "backends/NeonWorkloads/NeonBaseSplitterWorkload.hpp" +#include "backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonConstantFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonConstantUint8Workload.hpp" +#include "backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.hpp" +#include "backends/NeonWorkloads/NeonFloorFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonMergerFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonMergerUint8Workload.hpp" +#include "backends/NeonWorkloads/NeonMultiplicationFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonPermuteWorkload.hpp" +#include "backends/NeonWorkloads/NeonPooling2dBaseWorkload.hpp" +#include "backends/NeonWorkloads/NeonPooling2dFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonPooling2dUint8Workload.hpp" +#include "backends/NeonWorkloads/NeonReshapeFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonReshapeUint8Workload.hpp" +#include "backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonSoftmaxUint8Workload.hpp" +#include "backends/NeonWorkloads/NeonSplitterFloat32Workload.hpp" +#include "backends/NeonWorkloads/NeonSplitterUint8Workload.hpp" diff --git a/src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.cpp new file mode 100644 index 0000000000..39e55d5761 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.cpp @@ -0,0 +1,34 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonActivationFloat32Workload.hpp" +#include "backends/ArmComputeUtils.hpp" + + +namespace armnn +{ +NeonActivationFloat32Workload::NeonActivationFloat32Workload(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<ActivationQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonActivationFloat32Workload", 1, 1); + + const arm_compute::ActivationLayerInfo activationLayerInfo = + ConvertActivationDescriptorToAclActivationLayerInfo(m_Data.m_Parameters); + + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_ActivationLayer.configure(&input, &output, activationLayerInfo); +} + +void NeonActivationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonActivationFloat32Workload_Execute"); + m_ActivationLayer.run(); +} + +} //namespace armnn + diff --git a/src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.hpp new file mode 100644 index 0000000000..6fa83ea2f6 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonActivationFloat32Workload.hpp @@ -0,0 +1,24 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ +class NeonActivationFloat32Workload : public Float32Workload<ActivationQueueDescriptor> +{ +public: + NeonActivationFloat32Workload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + mutable arm_compute::NEActivationLayer m_ActivationLayer; +}; +} //namespace armnn + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonActivationUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonActivationUint8Workload.cpp new file mode 100644 index 0000000000..27c37e9425 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonActivationUint8Workload.cpp @@ -0,0 +1,42 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonActivationUint8Workload.hpp" +#include "backends/ArmComputeUtils.hpp" +#include "backends/NeonLayerSupport.hpp" + +namespace armnn +{ +NeonActivationUint8Workload::NeonActivationUint8Workload(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Uint8Workload<ActivationQueueDescriptor>(descriptor, info) +{ + + std::string reasonIfUnsupported; + if (!IsNeonActivationUint8Supported(&reasonIfUnsupported, m_Data.m_Parameters)) + { + throw InvalidArgumentException(reasonIfUnsupported); + } + + // Only BoundedReLu is supported (see IsNeonActivationUint8Supported) + arm_compute::ActivationLayerInfo layerInfo(arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, + m_Data.m_Parameters.m_A, + m_Data.m_Parameters.m_B); + + m_Data.ValidateInputsOutputs("NeonActivationUint8Workload", 1, 1); + + arm_compute::ITensor& input = static_cast<NeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = static_cast<NeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_ActivationLayer.configure(&input, &output, layerInfo); +} + +void NeonActivationUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonActivationUint8Workload_Execute"); + + m_ActivationLayer.run(); +} +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonActivationUint8Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonActivationUint8Workload.hpp new file mode 100644 index 0000000000..af655db3d6 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonActivationUint8Workload.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonActivationUint8Workload : public Uint8Workload<ActivationQueueDescriptor> +{ +public: + NeonActivationUint8Workload(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NEActivationLayer m_ActivationLayer; +}; + +} //namespace armnn + + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.cpp new file mode 100644 index 0000000000..d1fb64093d --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.cpp @@ -0,0 +1,32 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonAdditionFloat32Workload.hpp" +#include "backends/CpuTensorHandle.hpp" + +namespace armnn +{ + +NeonAdditionFloat32Workload::NeonAdditionFloat32Workload(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<AdditionQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonAdditionFloat32Workload", 2, 1); + + arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_AddLayer.configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE); +} + +void NeonAdditionFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonAdditionFloat32Workload_Execute"); + m_AddLayer.run(); +} + +} //namespace armnn + diff --git a/src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.hpp new file mode 100644 index 0000000000..5b75b502a3 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonAdditionFloat32Workload.hpp @@ -0,0 +1,25 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ +class NeonAdditionFloat32Workload : public Float32Workload<AdditionQueueDescriptor> +{ +public: + NeonAdditionFloat32Workload(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NEArithmeticAddition m_AddLayer; +}; + +} //namespace armnn + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonBaseConstantWorkload.hpp b/src/armnn/backends/NeonWorkloads/NeonBaseConstantWorkload.hpp new file mode 100644 index 0000000000..247ebfc5dd --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonBaseConstantWorkload.hpp @@ -0,0 +1,72 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/ArmComputeTensorUtils.hpp> +#include <backends/CpuTensorHandle.hpp> +#include <backends/NeonTensorHandle.hpp> +#include <backends/Workload.hpp> + +#include <boost/cast.hpp> + +namespace armnn +{ + +// Base class template providing an implementation of the Constant layer common to all data types +template <armnn::DataType DataFormat> +class NeonBaseConstantWorkload : public TypedWorkload<ConstantQueueDescriptor, DataFormat> +{ +public: + NeonBaseConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info) + : TypedWorkload<ConstantQueueDescriptor, DataFormat>(descriptor, info) + , m_RanOnce(false) + { + } + + virtual void Execute() const override + { + using namespace armcomputetensorutils; + + // The intermediate tensor held by the corresponding layer output handler can be initialised with the + // given data on the first inference, then reused for subsequent inferences. + // The initialisation cannot happen at workload construction time since the ACL kernel for the next layer + // may not have been configured at the time. + if (!m_RanOnce) + { + const ConstantQueueDescriptor& data = this->m_Data; + + BOOST_ASSERT(data.m_LayerOutput != nullptr); + arm_compute::ITensor& output = + boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor(); + + switch (DataFormat) + { + case DataType::Float32: + { + CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<float>(), output); + break; + } + case DataType::QuantisedAsymm8: + { + CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output); + break; + } + default: + { + BOOST_ASSERT_MSG(false, "Unknown data type"); + break; + } + } + + m_RanOnce = true; + } + } + +private: + mutable bool m_RanOnce; +}; + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonBaseMergerWorkload.hpp b/src/armnn/backends/NeonWorkloads/NeonBaseMergerWorkload.hpp new file mode 100644 index 0000000000..24640c7adb --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonBaseMergerWorkload.hpp @@ -0,0 +1,25 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/Workload.hpp> + +namespace armnn +{ +// Base class template providing an implementation of the Merger layer common to all data types +template <armnn::DataType DataType> +class NeonBaseMergerWorkload : public TypedWorkload<MergerQueueDescriptor, DataType> +{ +public: + using TypedWorkload<MergerQueueDescriptor, DataType>::TypedWorkload; + + virtual void Execute() const override + { + // With subtensors, merger is a no-op + } +}; + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonBaseSplitterWorkload.hpp b/src/armnn/backends/NeonWorkloads/NeonBaseSplitterWorkload.hpp new file mode 100644 index 0000000000..769905b48b --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonBaseSplitterWorkload.hpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/Workload.hpp> + +namespace armnn +{ + +// Base class template providing an implementation of the Splitter layer common to all data types +template <armnn::DataType DataType> +class NeonBaseSplitterWorkload : public TypedWorkload<SplitterQueueDescriptor, DataType> +{ +public: + using TypedWorkload<SplitterQueueDescriptor, DataType>::TypedWorkload; + + virtual void Execute() const override + { + // With subtensors, splitter is a no-op + } +}; + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.cpp new file mode 100644 index 0000000000..f107c8137f --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.cpp @@ -0,0 +1,45 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonBatchNormalizationFloat32Workload.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + +namespace armnn +{ +using namespace armcomputetensorutils; + +NeonBatchNormalizationFloat32Workload::NeonBatchNormalizationFloat32Workload( + const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) + : Float32Workload<BatchNormalizationQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonBatchNormalizationFloat32Workload", 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + BuildArmComputeTensor(m_Mean, m_Data.m_Mean->GetTensorInfo()); + BuildArmComputeTensor(m_Variance, m_Data.m_Variance->GetTensorInfo()); + BuildArmComputeTensor(m_Gamma, m_Data.m_Gamma->GetTensorInfo()); + BuildArmComputeTensor(m_Beta, m_Data.m_Beta->GetTensorInfo()); + + m_Layer.configure( + &input, &output, &m_Mean, &m_Variance, &m_Beta, &m_Gamma, m_Data.m_Parameters.m_Eps); + + InitialiseArmComputeTensorData(m_Mean, m_Data.m_Mean->GetConstTensor<float>()); + InitialiseArmComputeTensorData(m_Variance, m_Data.m_Variance->GetConstTensor<float>()); + InitialiseArmComputeTensorData(m_Gamma, m_Data.m_Gamma->GetConstTensor<float>()); + InitialiseArmComputeTensorData(m_Beta, m_Data.m_Beta->GetConstTensor<float>()); +} + +void NeonBatchNormalizationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonBatchNormalizationFloat32Workload_Execute"); + m_Layer.run(); +} + +} //namespace armnn + + diff --git a/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.hpp new file mode 100644 index 0000000000..2050d42859 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonBatchNormalizationFloat32Workload.hpp @@ -0,0 +1,32 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonBatchNormalizationFloat32Workload : public Float32Workload<BatchNormalizationQueueDescriptor> +{ +public: + NeonBatchNormalizationFloat32Workload(const BatchNormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NEBatchNormalizationLayer m_Layer; + + arm_compute::Tensor m_Mean; + arm_compute::Tensor m_Variance; + arm_compute::Tensor m_Gamma; + arm_compute::Tensor m_Beta; +}; + +} //namespace armnn + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.cpp new file mode 100644 index 0000000000..8b203fbf3a --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.cpp @@ -0,0 +1,17 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonConstantFloat32Workload.hpp" + +namespace armnn +{ + +void NeonConstantFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonConstantFloat32Workload_Execute"); + NeonBaseConstantWorkload::Execute(); +} + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.hpp new file mode 100644 index 0000000000..4ea4dfe127 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonConstantFloat32Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "NeonBaseConstantWorkload.hpp" + +namespace armnn +{ + +class NeonConstantFloat32Workload : public NeonBaseConstantWorkload<DataType::Float32> +{ +public: + using NeonBaseConstantWorkload<DataType::Float32>::NeonBaseConstantWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonConstantUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonConstantUint8Workload.cpp new file mode 100644 index 0000000000..f6dfaeb7a7 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonConstantUint8Workload.cpp @@ -0,0 +1,17 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonConstantUint8Workload.hpp" + +namespace armnn +{ + +void NeonConstantUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonConstantUint8Workload_Execute"); + NeonBaseConstantWorkload::Execute(); +} + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonConstantUint8Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonConstantUint8Workload.hpp new file mode 100644 index 0000000000..729bb35499 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonConstantUint8Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "NeonBaseConstantWorkload.hpp" + +namespace armnn +{ + +class NeonConstantUint8Workload : public NeonBaseConstantWorkload<DataType::QuantisedAsymm8> +{ +public: + using NeonBaseConstantWorkload<DataType::QuantisedAsymm8>::NeonBaseConstantWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp new file mode 100644 index 0000000000..5099965a24 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.cpp @@ -0,0 +1,88 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" +#include "backends/NeonLayerSupport.hpp" + +#include "NeonConvolution2dBaseWorkload.hpp" + +namespace armnn +{ + +template<armnn::DataType dataType> +NeonConvolution2dBaseWorkload<dataType>::NeonConvolution2dBaseWorkload(const Convolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : TypedWorkload<Convolution2dQueueDescriptor, dataType>(descriptor, info) +{ + using arm_compute::NEDirectConvolutionLayer; + using namespace armcomputetensorutils; + + ValidateData(); + + // todo: check tensor shapes match + + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + BuildArmComputeTensor(m_KernelTensor, m_Data.m_Weight->GetTensorInfo()); + + arm_compute::Tensor* optionalBiasTensor = nullptr; + if (m_Data.m_Parameters.m_BiasEnabled) + { + BuildArmComputeTensor(m_BiasTensor, m_Data.m_Bias->GetTensorInfo()); + optionalBiasTensor = &m_BiasTensor; + } + + arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX, + m_Data.m_Parameters.m_StrideY, + m_Data.m_Parameters.m_PadLeft, + m_Data.m_Parameters.m_PadRight, + m_Data.m_Parameters.m_PadTop, + m_Data.m_Parameters.m_PadBottom, + arm_compute::DimensionRoundingType::FLOOR); + + const bool preferDirectConvolution = + IsNeonDirectConvolutionPreferred(m_Data.m_Weight->GetTensorInfo(), + m_Data.m_Parameters); + + if (preferDirectConvolution) + { + auto directConvolutionLayer = std::make_unique<arm_compute::NEDirectConvolutionLayer>(); + directConvolutionLayer->configure(&input, + &m_KernelTensor, + optionalBiasTensor, + &output, + padStrideInfo); + m_ConvolutionLayer.reset(directConvolutionLayer.release()); + } + else + { + auto convolutionLayer = std::make_unique<arm_compute::NEConvolutionLayer>(); + convolutionLayer->configure(&input, + &m_KernelTensor, + optionalBiasTensor, + &output, + padStrideInfo); + m_ConvolutionLayer.reset(convolutionLayer.release()); + } + BOOST_ASSERT(m_ConvolutionLayer); + + using Type = ResolveType<dataType>; + + InitialiseArmComputeTensorData(m_KernelTensor, m_Data.m_Weight->template GetConstTensor<Type>()); + if (m_Data.m_Parameters.m_BiasEnabled) + { + InitialiseArmComputeTensorData(m_BiasTensor, m_Data.m_Bias->template GetConstTensor<Type>()); + } +} + +// Generate known implementations for linker +template class NeonConvolution2dBaseWorkload<DataType::Float32>; +template class NeonConvolution2dBaseWorkload<DataType::QuantisedAsymm8>; + +} //namespace armnn + + diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp new file mode 100644 index 0000000000..37740511ba --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dBaseWorkload.hpp @@ -0,0 +1,31 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include <backends/Workload.hpp> +#include <backends/NeonWorkloadUtils.hpp> + +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" +#include "backends/NeonLayerSupport.hpp" + +namespace armnn +{ + +template<armnn::DataType dataType> +class NeonConvolution2dBaseWorkload : public TypedWorkload<Convolution2dQueueDescriptor, dataType> +{ +public: + using TypedWorkload<Convolution2dQueueDescriptor, dataType>::m_Data; + + NeonConvolution2dBaseWorkload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info); + + virtual void ValidateData() const {}; + +protected: + std::unique_ptr<arm_compute::IFunction> m_ConvolutionLayer; + arm_compute::Tensor m_KernelTensor; + arm_compute::Tensor m_BiasTensor; +}; +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp new file mode 100644 index 0000000000..b4650ac011 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.cpp @@ -0,0 +1,36 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonConvolution2dFloat32Workload.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" +#include "backends/NeonLayerSupport.hpp" + +namespace armnn +{ +using namespace armcomputetensorutils; + +NeonConvolution2dFloat32Workload::NeonConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : NeonConvolution2dBaseWorkload(descriptor, info) +{} + + +void NeonConvolution2dFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonConvolution2dFloat32Workload_Execute"); + m_ConvolutionLayer->run(); +} + +void NeonConvolution2dFloat32Workload::ValidateData() const +{ + m_Data.ValidateInputsOutputs("NeonConvolution2dFloat32Workload", 1, 1); +} + + + +} //namespace armnn + + diff --git a/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp new file mode 100644 index 0000000000..f4d95d623f --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonConvolution2dFloat32Workload.hpp @@ -0,0 +1,25 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> +#include "NeonConvolution2dBaseWorkload.hpp" + +namespace armnn +{ +class NeonConvolution2dFloat32Workload : public NeonConvolution2dBaseWorkload<DataType::Float32> +{ +public: + NeonConvolution2dFloat32Workload(const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info); + + void Execute() const override; + void ValidateData() const override; +}; +} //namespace armnn + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.cpp new file mode 100644 index 0000000000..11e31c727a --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.cpp @@ -0,0 +1,91 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonDepthwiseConvolutionFloat32Workload.hpp" +#include "backends/NeonLayerSupport.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + + +namespace armnn +{ +using namespace armcomputetensorutils; + +NeonDepthwiseConvolutionFloat32Workload::NeonDepthwiseConvolutionFloat32Workload( + const DepthwiseConvolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info) +{ + const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo(); + + std::string reasonIfUnsupported; + if (!IsNeonDepthwiseConvolution2dDescParamsSupported(&reasonIfUnsupported, m_Data.m_Parameters, weightInfo)) + { + throw UnimplementedException(reasonIfUnsupported); + } + + BuildArmComputeTensor(m_KernelTensor, weightInfo); + + arm_compute::Tensor* optionalBias = nullptr; + if (m_Data.m_Parameters.m_BiasEnabled) + { + BuildArmComputeTensor(m_BiasTensor, m_Data.m_Bias->GetTensorInfo()); + optionalBias = &m_BiasTensor; + } + + arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX, + m_Data.m_Parameters.m_StrideY, + m_Data.m_Parameters.m_PadLeft, + m_Data.m_Parameters.m_PadRight, + m_Data.m_Parameters.m_PadTop, + m_Data.m_Parameters.m_PadBottom, + arm_compute::DimensionRoundingType::FLOOR); + + m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionFloat32Workload", 1, 1); + + arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + bool use3x3Optimisation = weightInfo.GetShape()[3] == 3 && weightInfo.GetShape()[2] == 3; + if (use3x3Optimisation) + { + m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer3x3>(); + static_cast<arm_compute::NEDepthwiseConvolutionLayer3x3*>( + m_pDepthwiseConvolutionLayer.get())->configure(&input, + &m_KernelTensor, + optionalBias, + &output, + padStrideInfo); + } + else + { + m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer>(); + static_cast<arm_compute::NEDepthwiseConvolutionLayer*>( + m_pDepthwiseConvolutionLayer.get())->configure(&input, + &m_KernelTensor, + optionalBias, + &output, + padStrideInfo); + } + + BOOST_ASSERT(m_pDepthwiseConvolutionLayer); + + InitialiseArmComputeTensorData(m_KernelTensor, m_Data.m_Weight->GetConstTensor<float>()); + + if (optionalBias) + { + InitialiseArmComputeTensorData(*optionalBias, m_Data.m_Bias->GetConstTensor<float>()); + } +} + +void NeonDepthwiseConvolutionFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "NeonDepthwiseConvolutionFloat32Workload_Execute"); + BOOST_ASSERT(m_pDepthwiseConvolutionLayer); + + m_pDepthwiseConvolutionLayer->run(); +} + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.hpp new file mode 100644 index 0000000000..f9e295f568 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionFloat32Workload.hpp @@ -0,0 +1,31 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonDepthwiseConvolutionFloat32Workload : public Float32Workload<DepthwiseConvolution2dQueueDescriptor> +{ +public: + NeonDepthwiseConvolutionFloat32Workload(const DepthwiseConvolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable std::unique_ptr<arm_compute::IFunction> m_pDepthwiseConvolutionLayer; + + arm_compute::Tensor m_KernelTensor; + arm_compute::Tensor m_BiasTensor; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.cpp new file mode 100644 index 0000000000..bd034c4f80 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.cpp @@ -0,0 +1,91 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonDepthwiseConvolutionUint8Workload.hpp" +#include "backends/NeonLayerSupport.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + + +namespace armnn +{ +using namespace armcomputetensorutils; + +NeonDepthwiseConvolutionUint8Workload::NeonDepthwiseConvolutionUint8Workload( + const DepthwiseConvolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Uint8Workload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info) +{ + const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo(); + + std::string reasonIfUnsupported; + if (!IsNeonDepthwiseConvolution2dDescParamsSupported(&reasonIfUnsupported, m_Data.m_Parameters, weightInfo)) + { + throw UnimplementedException(reasonIfUnsupported); + } + + BuildArmComputeTensor(m_KernelTensor, weightInfo); + + arm_compute::Tensor* optionalBias = nullptr; + if (m_Data.m_Parameters.m_BiasEnabled) + { + BuildArmComputeTensor(m_BiasTensor, m_Data.m_Bias->GetTensorInfo()); + optionalBias = &m_BiasTensor; + } + + arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX, + m_Data.m_Parameters.m_StrideY, + m_Data.m_Parameters.m_PadLeft, + m_Data.m_Parameters.m_PadRight, + m_Data.m_Parameters.m_PadTop, + m_Data.m_Parameters.m_PadBottom, + arm_compute::DimensionRoundingType::FLOOR); + + m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionUint8Workload", 1, 1); + + arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + bool use3x3Optimisation = weightInfo.GetShape()[3] == 3 && weightInfo.GetShape()[2] == 3; + if (use3x3Optimisation) + { + m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer3x3>(); + static_cast<arm_compute::NEDepthwiseConvolutionLayer3x3*>( + m_pDepthwiseConvolutionLayer.get())->configure(&input, + &m_KernelTensor, + optionalBias, + &output, + padStrideInfo); + } + else + { + m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer>(); + static_cast<arm_compute::NEDepthwiseConvolutionLayer*>( + m_pDepthwiseConvolutionLayer.get())->configure(&input, + &m_KernelTensor, + optionalBias, + &output, + padStrideInfo); + } + + BOOST_ASSERT(m_pDepthwiseConvolutionLayer); + + InitialiseArmComputeTensorData(m_KernelTensor, m_Data.m_Weight->GetConstTensor<uint8_t>()); + + if (optionalBias) + { + InitialiseArmComputeTensorData(*optionalBias, m_Data.m_Bias->GetConstTensor<int32_t>()); + } +} + +void NeonDepthwiseConvolutionUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::GpuAcc, "NeonDepthwiseConvolutionUint8Workload_Execute"); + BOOST_ASSERT(m_pDepthwiseConvolutionLayer); + + m_pDepthwiseConvolutionLayer->run(); +} + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.hpp new file mode 100644 index 0000000000..9cf272e9f5 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonDepthwiseConvolutionUint8Workload.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonDepthwiseConvolutionUint8Workload : public Uint8Workload<DepthwiseConvolution2dQueueDescriptor> +{ +public: + NeonDepthwiseConvolutionUint8Workload(const DepthwiseConvolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable std::unique_ptr<arm_compute::IFunction> m_pDepthwiseConvolutionLayer; + + arm_compute::Tensor m_KernelTensor; + arm_compute::Tensor m_BiasTensor; +}; + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.cpp new file mode 100644 index 0000000000..a5eec5cadb --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.cpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonFloorFloat32Workload.hpp" + +namespace armnn +{ +NeonFloorFloat32Workload::NeonFloorFloat32Workload(const FloorQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<FloorQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonFloorFloat32Workload", 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_Layer.configure(&input, &output); +} + +void NeonFloorFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonFloorFloat32Workload_Execute"); + m_Layer.run(); +} +} //namespace armnn + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.hpp new file mode 100644 index 0000000000..f876f1e1bb --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonFloorFloat32Workload.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonFloorFloat32Workload : public Float32Workload<FloorQueueDescriptor> +{ +public: + NeonFloorFloat32Workload(const FloorQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NEFloor m_Layer; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp new file mode 100644 index 0000000000..54c4e4333c --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.cpp @@ -0,0 +1,54 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonFullyConnectedFloat32Workload.hpp" +#include "backends/CpuTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + + +namespace armnn +{ +using namespace armcomputetensorutils; + +NeonFullyConnectedFloat32Workload::NeonFullyConnectedFloat32Workload(const FullyConnectedQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<FullyConnectedQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonFullyConnectedFloat32Workload", 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + BuildArmComputeTensor(m_WeightsTensor, m_Data.m_Weight->GetTensorInfo()); + + arm_compute::Tensor* optionalBiasTensor = nullptr; + if (m_Data.m_Parameters.m_BiasEnabled) + { + BuildArmComputeTensor(m_BiasesTensor, m_Data.m_Bias->GetTensorInfo()); + optionalBiasTensor = &m_BiasesTensor; + } + + // Construct + m_FullyConnectedLayer.configure( + &input, &m_WeightsTensor, optionalBiasTensor, &output, m_Data.m_Parameters.m_TransposeWeightMatrix); + + // Allocate + InitialiseArmComputeTensorData(m_WeightsTensor, m_Data.m_Weight->GetConstTensor<float>()); + + if (optionalBiasTensor) + { + InitialiseArmComputeTensorData(*optionalBiasTensor, m_Data.m_Bias->GetConstTensor<float>()); + } +} + +void NeonFullyConnectedFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonFullyConnectedFloat32Workload_Execute"); + m_FullyConnectedLayer.run(); +} + +} //namespace armnn + + diff --git a/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp new file mode 100644 index 0000000000..f9230f1d93 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonFullyConnectedFloat32Workload.hpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonFullyConnectedFloat32Workload : public Float32Workload<FullyConnectedQueueDescriptor> +{ +public: + NeonFullyConnectedFloat32Workload(const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NEFullyConnectedLayer m_FullyConnectedLayer; + arm_compute::Tensor m_WeightsTensor; + arm_compute::Tensor m_BiasesTensor; +}; + +} //namespace armnn + + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp new file mode 100644 index 0000000000..085f58a219 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.cpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonL2NormalizationFloat32Workload.hpp" +#include "backends/ArmComputeUtils.hpp" + + +namespace armnn +{ + +NeonL2NormalizationFloat32Workload::NeonL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<L2NormalizationQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonL2NormalizationFloat32Workload", 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + m_Layer.configure(&input, &output, CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0])); +} + +void NeonL2NormalizationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonL2NormalizationFloat32Workload_Execute"); + m_Layer.run(); +} + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp new file mode 100644 index 0000000000..6cab28366a --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonL2NormalizationFloat32Workload.hpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ +class NeonL2NormalizationFloat32Workload : public Float32Workload<L2NormalizationQueueDescriptor> +{ +public: + NeonL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + // Purposely not a NEL2Normalize function. See constructor. + mutable arm_compute::NENormalizationLayer m_Layer; +}; +} //namespace armnn + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.cpp new file mode 100644 index 0000000000..7520e8768e --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.cpp @@ -0,0 +1,17 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonMergerFloat32Workload.hpp" + +namespace armnn +{ + +void NeonMergerFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "ClMergerFloat32Workload_Execute"); + NeonBaseMergerWorkload::Execute(); +} + +} // namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.hpp new file mode 100644 index 0000000000..5c889c2af0 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonMergerFloat32Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "NeonBaseMergerWorkload.hpp" + +namespace armnn +{ + +class NeonMergerFloat32Workload : public NeonBaseMergerWorkload<DataType::Float32> +{ +public: + using NeonBaseMergerWorkload<DataType::Float32>::NeonBaseMergerWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonMergerUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonMergerUint8Workload.cpp new file mode 100644 index 0000000000..51578e5bff --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonMergerUint8Workload.cpp @@ -0,0 +1,17 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonMergerUint8Workload.hpp" + +namespace armnn +{ + +void NeonMergerUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "ClMergerUint8Workload_Execute"); + NeonBaseMergerWorkload::Execute(); +} + +} // namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonMergerUint8Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonMergerUint8Workload.hpp new file mode 100644 index 0000000000..fd1e6b72b9 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonMergerUint8Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "NeonBaseMergerWorkload.hpp" + +namespace armnn +{ + +class NeonMergerUint8Workload : public NeonBaseMergerWorkload<DataType::QuantisedAsymm8> +{ +public: + using NeonBaseMergerWorkload<DataType::QuantisedAsymm8>::NeonBaseMergerWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.cpp new file mode 100644 index 0000000000..58ce7b74ba --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.cpp @@ -0,0 +1,41 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonMultiplicationFloat32Workload.hpp" + + +namespace armnn +{ + +NeonMultiplicationFloat32Workload::NeonMultiplicationFloat32Workload(const MultiplicationQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<MultiplicationQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonMultiplicationFloat32Workload", 2, 1); + + arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + // At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it, + // when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be + // ignored for F32 tensors. + m_PixelWiseMultiplication.configure(&input1, + &input2, + &output, + 1.0f, + arm_compute::ConvertPolicy::SATURATE, + arm_compute::RoundingPolicy::TO_ZERO); +} + +void NeonMultiplicationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonMultiplicationFloat32Workload_Execute"); + m_PixelWiseMultiplication.run(); +} + +} //namespace armnn + + diff --git a/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.hpp new file mode 100644 index 0000000000..ed5ead3700 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonMultiplicationFloat32Workload.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonMultiplicationFloat32Workload : public Float32Workload<MultiplicationQueueDescriptor> +{ +public: + NeonMultiplicationFloat32Workload(const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NEPixelWiseMultiplication m_PixelWiseMultiplication; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp new file mode 100644 index 0000000000..739390d5a1 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.cpp @@ -0,0 +1,54 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonNormalizationFloat32Workload.hpp" +#include "backends/NeonLayerSupport.hpp" +#include "backends/ArmComputeUtils.hpp" + +namespace armnn +{ + +NeonNormalizationFloat32Workload::NeonNormalizationFloat32Workload(const NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<NormalizationQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonNormalizationFloat32Workload", 1, 1); + std::string reasonIfUnsupported; + if (!IsNeonNormalizationDescParamsSupported(&reasonIfUnsupported, m_Data.m_Parameters)) + { + throw UnimplementedException(reasonIfUnsupported); + } + + // input and output tensors have to have the same dimensionality + if (info.m_InputTensorInfos[0].GetShape()[1] != info.m_OutputTensorInfos[0].GetShape()[1] + || info.m_InputTensorInfos[0].GetShape()[0] != info.m_OutputTensorInfos[0].GetShape()[0] + || info.m_InputTensorInfos[0].GetShape()[3] != info.m_OutputTensorInfos[0].GetShape()[3] + || info.m_InputTensorInfos[0].GetShape()[2] != info.m_OutputTensorInfos[0].GetShape()[2]) + { + throw InvalidArgumentException("Normalization requires input and output tensors to have equal dimensionality."); + } + + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + const arm_compute::NormType normType = + ConvertNormalizationAlgorithmChannelToAclNormType(m_Data.m_Parameters.m_NormChannelType); + arm_compute::NormalizationLayerInfo normalizationInfo(normType, + m_Data.m_Parameters.m_NormSize, + m_Data.m_Parameters.m_Alpha, + m_Data.m_Parameters.m_Beta, + m_Data.m_Parameters.m_K, + false); + + m_NormalizationLayer.configure(&input, &output, normalizationInfo); +} + +void NeonNormalizationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonNormalizationFloat32Workload_Execute"); + m_NormalizationLayer.run(); +} + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp new file mode 100644 index 0000000000..12a0fa80b2 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloat32Workload.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonNormalizationFloat32Workload : public Float32Workload<NormalizationQueueDescriptor> +{ +public: + NeonNormalizationFloat32Workload(const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NENormalizationLayer m_NormalizationLayer; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonPermuteWorkload.cpp b/src/armnn/backends/NeonWorkloads/NeonPermuteWorkload.cpp new file mode 100644 index 0000000000..e0a0457422 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonPermuteWorkload.cpp @@ -0,0 +1,54 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonPermuteWorkload.hpp" +#include "backends/NeonTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + +#include <arm_compute/core/Error.h> + +namespace armnn +{ + +arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor) +{ + const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); + const armnn::PermutationVector& mappings = descriptor.m_DimMappings; + + return arm_compute::NEPermute::validate(&aclInputInfo, &aclOutputInfo, + armcomputetensorutils::BuildArmComputePermutationVector(mappings)); +} + +template <armnn::DataType DataType> +NeonPermuteWorkload<DataType>::NeonPermuteWorkload(const PermuteQueueDescriptor& descriptor, + const WorkloadInfo& info) + : TypedWorkload<PermuteQueueDescriptor, DataType>(descriptor, info) +{ + using armcomputetensorutils::BuildArmComputePermutationVector; + + m_Data.ValidateInputsOutputs(GetName(), 1, 1); + + const arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings; + + // Run the layer + m_PermuteFunction.configure(&input, &output, BuildArmComputePermutationVector(mappings)); +} + +template <armnn::DataType DataType> +void NeonPermuteWorkload<DataType>::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, GetName() + "_Execute"); + m_PermuteFunction.run(); +} + +template class NeonPermuteWorkload<DataType::Float32>; +template class NeonPermuteWorkload<DataType::QuantisedAsymm8>; + +} // namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonPermuteWorkload.hpp b/src/armnn/backends/NeonWorkloads/NeonPermuteWorkload.hpp new file mode 100644 index 0000000000..56e8719d6c --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonPermuteWorkload.hpp @@ -0,0 +1,42 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +#include <armnn/TypesUtils.hpp> +#include <arm_compute/runtime/NEON/functions/NEPermute.h> + +#include <string> + +namespace armnn +{ +arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo& input, const TensorInfo& output, + const PermuteDescriptor& descriptor); + +template <armnn::DataType DataType> +class NeonPermuteWorkload : public TypedWorkload<PermuteQueueDescriptor, DataType> +{ +public: + static const std::string& GetName() + { + static const std::string name = std::string("NeonPermute") + GetDataTypeName(DataType) + "Workload"; + return name; + } + + NeonPermuteWorkload(const PermuteQueueDescriptor& descriptor, const WorkloadInfo& info); + void Execute() const override; + +private: + using TypedWorkload<PermuteQueueDescriptor, DataType>::m_Data; + mutable arm_compute::NEPermute m_PermuteFunction; +}; + +using NeonPermuteFloat32Workload = NeonPermuteWorkload<DataType::Float32>; +using NeonPermuteUint8Workload = NeonPermuteWorkload<DataType::QuantisedAsymm8>; + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonPooling2dBaseWorkload.cpp b/src/armnn/backends/NeonWorkloads/NeonPooling2dBaseWorkload.cpp new file mode 100644 index 0000000000..6d6a492155 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonPooling2dBaseWorkload.cpp @@ -0,0 +1,47 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonPooling2dBaseWorkload.hpp" +#include "backends/NeonLayerSupport.hpp" +#include "backends/NeonTensorHandle.hpp" +#include "backends/ArmComputeUtils.hpp" +#include "backends/ArmComputeTensorUtils.hpp" + +namespace armnn +{ +using namespace armcomputetensorutils; + +arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor) +{ + const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); + + arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(descriptor); + + return arm_compute::NEPoolingLayer::validate(&aclInputInfo, &aclOutputInfo, layerInfo); +} + +template <armnn::DataType dataType> +NeonPooling2dBaseWorkload<dataType>::NeonPooling2dBaseWorkload( + const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info, const std::string& name) + : TypedWorkload<Pooling2dQueueDescriptor, dataType>(descriptor, info) +{ + m_Data.ValidateInputsOutputs(name, 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(m_Data.m_Parameters); + + m_PoolingLayer.configure(&input, &output, layerInfo); +} + +template class NeonPooling2dBaseWorkload<DataType::Float32>; +template class NeonPooling2dBaseWorkload<DataType::QuantisedAsymm8>; + +} //namespace armnn + diff --git a/src/armnn/backends/NeonWorkloads/NeonPooling2dBaseWorkload.hpp b/src/armnn/backends/NeonWorkloads/NeonPooling2dBaseWorkload.hpp new file mode 100644 index 0000000000..9461982f86 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonPooling2dBaseWorkload.hpp @@ -0,0 +1,37 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor); + +// Base class template providing an implementation of the Pooling2d layer common to all data types +template <armnn::DataType dataType> +class NeonPooling2dBaseWorkload : public TypedWorkload<Pooling2dQueueDescriptor, dataType> +{ +public: + using TypedWorkload<Pooling2dQueueDescriptor, dataType>::m_Data; + + NeonPooling2dBaseWorkload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info, + const std::string& name); + +protected: + mutable arm_compute::NEPoolingLayer m_PoolingLayer; +}; + + +} //namespace armnn + + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.cpp new file mode 100644 index 0000000000..ba2aa20924 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.cpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonPooling2dFloat32Workload.hpp" + + + +namespace armnn +{ + +NeonPooling2dFloat32Workload::NeonPooling2dFloat32Workload(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : NeonPooling2dBaseWorkload<armnn::DataType::Float32>(descriptor, info, "NeonPooling2dFloat32Workload") +{ +} + +void NeonPooling2dFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonPooling2dFloat32Workload_Execute"); + m_PoolingLayer.run(); +} + +} //namespace armnn + diff --git a/src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.hpp new file mode 100644 index 0000000000..6cfc9cc96f --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonPooling2dFloat32Workload.hpp @@ -0,0 +1,24 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> +#include "NeonPooling2dBaseWorkload.hpp" + +namespace armnn +{ + +class NeonPooling2dFloat32Workload : public NeonPooling2dBaseWorkload<armnn::DataType::Float32> +{ +public: + NeonPooling2dFloat32Workload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; +}; + +} //namespace armnn + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonPooling2dUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonPooling2dUint8Workload.cpp new file mode 100644 index 0000000000..0778794081 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonPooling2dUint8Workload.cpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonPooling2dUint8Workload.hpp" + + + +namespace armnn +{ + +NeonPooling2dUint8Workload::NeonPooling2dUint8Workload(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) + : NeonPooling2dBaseWorkload<armnn::DataType::QuantisedAsymm8>(descriptor, info, "NeonPooling2dUint8Workload") +{ +} + +void NeonPooling2dUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonPooling2dUint8Workload_Execute"); + m_PoolingLayer.run(); +} + +} //namespace armnn + diff --git a/src/armnn/backends/NeonWorkloads/NeonPooling2dUint8Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonPooling2dUint8Workload.hpp new file mode 100644 index 0000000000..fa8182125b --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonPooling2dUint8Workload.hpp @@ -0,0 +1,25 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <armnn/Types.hpp> +#include "NeonPooling2dBaseWorkload.hpp" + +namespace armnn +{ + +class NeonPooling2dUint8Workload : public NeonPooling2dBaseWorkload<armnn::DataType::QuantisedAsymm8> +{ +public: + NeonPooling2dUint8Workload(const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.cpp new file mode 100644 index 0000000000..317d16f6bd --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.cpp @@ -0,0 +1,32 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonReshapeFloat32Workload.hpp" + + + +namespace armnn +{ + +NeonReshapeFloat32Workload::NeonReshapeFloat32Workload(const ReshapeQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<ReshapeQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonReshapeFloat32Workload", 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_Layer.configure(&input, &output); +} + +void NeonReshapeFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonReshapeFloat32Workload_Execute"); + m_Layer.run(); +} + +} //namespace armnn + diff --git a/src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.hpp new file mode 100644 index 0000000000..27f4aea9e7 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonReshapeFloat32Workload.hpp @@ -0,0 +1,29 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonReshapeFloat32Workload : public Float32Workload<ReshapeQueueDescriptor> +{ +public: + NeonReshapeFloat32Workload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info); + + virtual void Execute() const override; + +private: + mutable arm_compute::NEReshapeLayer m_Layer; +}; + +} //namespace armnn + + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonReshapeUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonReshapeUint8Workload.cpp new file mode 100644 index 0000000000..06f57c1e0f --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonReshapeUint8Workload.cpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonReshapeUint8Workload.hpp" + + + + +namespace armnn +{ +NeonReshapeUint8Workload::NeonReshapeUint8Workload(const ReshapeQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Uint8Workload<ReshapeQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonReshapeUint8Workload", 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_Layer.configure(&input, &output); +} + +void NeonReshapeUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonReshapeUint8Workload_Execute"); + m_Layer.run(); +} +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonReshapeUint8Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonReshapeUint8Workload.hpp new file mode 100644 index 0000000000..66b7d914b1 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonReshapeUint8Workload.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonReshapeUint8Workload : public Uint8Workload<ReshapeQueueDescriptor> +{ +public: + NeonReshapeUint8Workload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NEReshapeLayer m_Layer; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp new file mode 100644 index 0000000000..229562ece2 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.cpp @@ -0,0 +1,31 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonSoftmaxFloat32Workload.hpp" + +namespace armnn +{ +NeonSoftmaxFloat32Workload::NeonSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info) + : Float32Workload<SoftmaxQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonSoftmaxFloat32Workload", 1, 1); + + // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_SoftmaxLayer.configure(&input, &output, m_Data.m_Parameters.m_Beta); +} + +void NeonSoftmaxFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonSoftmaxFloat32Workload_Execute"); + m_SoftmaxLayer.run(); +} +} //namespace armnn + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp new file mode 100644 index 0000000000..c466a0f9c6 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxFloat32Workload.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonSoftmaxFloat32Workload : public Float32Workload<SoftmaxQueueDescriptor> +{ +public: + NeonSoftmaxFloat32Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NESoftmaxLayer m_SoftmaxLayer; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp new file mode 100644 index 0000000000..a66b0343ff --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp @@ -0,0 +1,38 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonSoftmaxUint8Workload.hpp" + + + +namespace armnn +{ +NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) + : Uint8Workload<SoftmaxQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + const auto outputQuantization = output.info()->quantization_info(); + + if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0)) + { + throw InvalidArgumentException( + "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); + } + + m_SoftmaxLayer.configure(&input, &output, descriptor.m_Parameters.m_Beta); +} + +void NeonSoftmaxUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "ClSoftmaxUint8Workload_Execute"); + + m_SoftmaxLayer.run(); +} +} //namespace armnn + diff --git a/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.hpp new file mode 100644 index 0000000000..bccd82a850 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <backends/NeonWorkloadUtils.hpp> + +namespace armnn +{ + +class NeonSoftmaxUint8Workload : public Uint8Workload<SoftmaxQueueDescriptor> +{ +public: + NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NESoftmaxLayer m_SoftmaxLayer; +}; + +} //namespace armnn + + + + diff --git a/src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.cpp new file mode 100644 index 0000000000..13701d2ed3 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.cpp @@ -0,0 +1,17 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonSplitterFloat32Workload.hpp" + +namespace armnn +{ + +void NeonSplitterFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonSplitterFloat32Workload_Execute"); + NeonBaseSplitterWorkload::Execute(); +} + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.hpp new file mode 100644 index 0000000000..432f5de4eb --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonSplitterFloat32Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "NeonBaseSplitterWorkload.hpp" + +namespace armnn +{ + +class NeonSplitterFloat32Workload : public NeonBaseSplitterWorkload<DataType::Float32> +{ +public: + using NeonBaseSplitterWorkload<DataType::Float32>::NeonBaseSplitterWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.cpp new file mode 100644 index 0000000000..90d24d3ffd --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.cpp @@ -0,0 +1,17 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "NeonSplitterUint8Workload.hpp" + +namespace armnn +{ + +void NeonSplitterUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuAcc, "NeonSplitterUint8Workload_Execute"); + NeonBaseSplitterWorkload::Execute(); +} + +} //namespace armnn diff --git a/src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.hpp b/src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.hpp new file mode 100644 index 0000000000..1c97c74e02 --- /dev/null +++ b/src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "NeonBaseSplitterWorkload.hpp" + +namespace armnn +{ + +class NeonSplitterUint8Workload : public NeonBaseSplitterWorkload<DataType::QuantisedAsymm8> +{ +public: + using NeonBaseSplitterWorkload<DataType::QuantisedAsymm8>::NeonBaseSplitterWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/OutputHandler.cpp b/src/armnn/backends/OutputHandler.cpp new file mode 100644 index 0000000000..54afe565a9 --- /dev/null +++ b/src/armnn/backends/OutputHandler.cpp @@ -0,0 +1,41 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "OutputHandler.hpp" + +#include <boost/assert.hpp> +#include <boost/log/trivial.hpp> + +#include "backends/WorkloadFactory.hpp" +#include "backends/WorkloadDataCollector.hpp" +#include "backends/ITensorHandle.hpp" + +namespace armnn +{ + +void OutputHandler::SetTensorInfo(const TensorInfo& tensorInfo) +{ + m_TensorInfo = tensorInfo; + m_bTensorInfoSet = true; +} + +void OutputHandler::CreateTensorHandles(const IWorkloadFactory& factory) +{ + m_TensorHandle = factory.CreateTensorHandle(m_TensorInfo); +} + +void OutputHandler::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const +{ + dataCollector.Push(m_TensorHandle.get(), m_TensorInfo); +} + +void OutputHandler::AllocateTensors() +{ + if (m_TensorHandle) + { + m_TensorHandle->Allocate(); + } +} + +} // namespace armnn diff --git a/src/armnn/backends/OutputHandler.hpp b/src/armnn/backends/OutputHandler.hpp new file mode 100644 index 0000000000..9cc87c6095 --- /dev/null +++ b/src/armnn/backends/OutputHandler.hpp @@ -0,0 +1,66 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "backends/WorkloadDataFwd.hpp" + +#include <string> +#include <vector> + +#include <memory> +#include <set> + +#include <boost/assert.hpp> + +#include "armnn/INetwork.hpp" +#include "armnn/Types.hpp" +#include "armnn/Descriptors.hpp" +#include "armnn/Tensor.hpp" +#include "ITensorHandle.hpp" + +namespace armnn +{ + +class ITensorHandle; +class IWorkloadFactory; +class OutputSlot; +class WorkloadDataCollector; + +class OutputHandler +{ +public: + /// @brief Sets the TensorInfo used by this output handler. + /// @param tensorInfo TensorInfo for the output. + void SetTensorInfo(const TensorInfo& tensorInfo); + + /// @brief Create tensor handlers used by the intermediate tensors. Does not allocate memory. + /// @param factory Factory to be used for handler creation. + void CreateTensorHandles(const IWorkloadFactory& factory); + + /// @brief Get the matching TensorInfo for the output + /// @return Reference to the output TensorInfo. + const TensorInfo& GetTensorInfo() const { return m_TensorInfo; } + + /// @brief Get the allocated tensor memory. + /// @return Pointer to the tensor memory + ITensorHandle* GetData() const { return m_TensorHandle.get(); } + + /// Fill the outputs for a given queue descriptor + void CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const; + + void SetData(std::unique_ptr<ITensorHandle> data) { m_TensorHandle = std::move(data); } + + /// @brief Allocate memory for all the tensors assigned to the handlers + void AllocateTensors(); + + /// @brief Returns true if SetTensorInfo() has been called at least once on this. + bool IsTensorInfoSet() const { return m_bTensorInfoSet; } +private: + std::unique_ptr<ITensorHandle> m_TensorHandle; + TensorInfo m_TensorInfo; + bool m_bTensorInfoSet = false; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefLayerSupport.cpp b/src/armnn/backends/RefLayerSupport.cpp new file mode 100644 index 0000000000..964c18e8ea --- /dev/null +++ b/src/armnn/backends/RefLayerSupport.cpp @@ -0,0 +1,262 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "LayerSupportCommon.hpp" +#include "RefLayerSupport.hpp" +#include <armnn/Descriptors.hpp> +#include <armnn/Types.hpp> +#include <armnn/Tensor.hpp> + +#include <boost/core/ignore_unused.hpp> + +#include "InternalTypes.hpp" + +using namespace boost; + +namespace armnn +{ + +template<typename Float32Func, typename Uint8Func, typename ... Params> +bool IsSupportedForDataTypeRef(std::string* reasonIfUnsupported, + DataType dataType, + Float32Func floatFuncPtr, + Uint8Func uint8FuncPtr, + Params&&... params) +{ + return IsSupportedForDataTypeGeneric(reasonIfUnsupported, + dataType, + floatFuncPtr, + uint8FuncPtr, + std::forward<Params>(params)...); +} + +bool IsActivationSupportedRef(const TensorInfo& input, + const ActivationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsAdditionSupportedRef(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + ignore_unused(input1); + ignore_unused(output); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input0.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsBatchNormalizationSupportedRef(const TensorInfo& input, + const BatchNormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsConstantSupportedRef(const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeRef(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsConvolution2dSupportedRef(const TensorInfo& input, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + ignore_unused(weights); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsFullyConnectedSupportedRef(const TensorInfo& input, + const FullyConnectedDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsInputSupportedRef(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsL2NormalizationSupportedRef(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsMergerSupportedRef(const std::vector<const TensorInfo*> inputs, + const OriginsDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + inputs[0]->GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsMultiplicationSupportedRef(const TensorInfo& input0, + const TensorInfo& input1, + std::string* reasonIfUnsupported) +{ + ignore_unused(input1); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input0.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsNormalizationSupportedRef(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsOutputSupportedRef(const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeRef(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsPermuteSupportedRef(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsPooling2dSupportedRef(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsResizeBilinearSupportedRef(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsSoftmaxSupportedRef(const TensorInfo& input, + const SoftmaxDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsSplitterSupportedRef(const TensorInfo& input, + const ViewsDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsFakeQuantizationSupportedRef(const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + ignore_unused(descriptor); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +bool IsReshapeSupportedRef(const TensorInfo& input, + std::string* reasonIfUnsupported) +{ + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); +} + +bool IsFloorSupportedRef(const TensorInfo& input, + const TensorInfo& output, + std::string* reasonIfUnsupported) +{ + ignore_unused(output); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); +} + +} diff --git a/src/armnn/backends/RefLayerSupport.hpp b/src/armnn/backends/RefLayerSupport.hpp new file mode 100644 index 0000000000..4a329aef34 --- /dev/null +++ b/src/armnn/backends/RefLayerSupport.hpp @@ -0,0 +1,98 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/DescriptorsFwd.hpp> +#include <armnn/Types.hpp> +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +bool IsActivationSupportedRef(const TensorInfo& input, + const ActivationDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsAdditionSupportedRef(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + +bool IsBatchNormalizationSupportedRef(const TensorInfo& input, + const BatchNormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsConstantSupportedRef(const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + +bool IsConvolution2dSupportedRef(const TensorInfo& input, + const Convolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported = nullptr); + +bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input, + const DepthwiseConvolution2dDescriptor& descriptor, + const TensorInfo& weights, + std::string* reasonIfUnsupported = nullptr); + +bool IsFullyConnectedSupportedRef(const TensorInfo& input, + const FullyConnectedDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsInputSupportedRef(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsL2NormalizationSupportedRef(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsMergerSupportedRef(const std::vector<const TensorInfo*> inputs, + const OriginsDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsMultiplicationSupportedRef(const TensorInfo& input0, + const TensorInfo& input1, + std::string* reasonIfUnsupported = nullptr); + +bool IsNormalizationSupportedRef(const TensorInfo& input, + const TensorInfo& output, + const NormalizationDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsOutputSupportedRef(const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + +bool IsPermuteSupportedRef(const TensorInfo& input, + const TensorInfo& output, + const PermuteDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsPooling2dSupportedRef(const TensorInfo& input, + const TensorInfo& output, + const Pooling2dDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsResizeBilinearSupportedRef(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsSoftmaxSupportedRef(const TensorInfo& input, + const SoftmaxDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsSplitterSupportedRef(const TensorInfo& input, + const ViewsDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsFakeQuantizationSupportedRef(const TensorInfo& input, + const FakeQuantizationDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + +bool IsReshapeSupportedRef(const TensorInfo& input, + std::string* reasonIfUnsupported = nullptr); + +bool IsFloorSupportedRef(const TensorInfo& input, + const TensorInfo& output, + std::string* reasonIfUnsupported = nullptr); + +} diff --git a/src/armnn/backends/RefWorkloadFactory.cpp b/src/armnn/backends/RefWorkloadFactory.cpp new file mode 100644 index 0000000000..46502d8142 --- /dev/null +++ b/src/armnn/backends/RefWorkloadFactory.cpp @@ -0,0 +1,231 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "CpuTensorHandle.hpp" +#include "RefWorkloadFactory.hpp" +#include "RefWorkloads.hpp" +#include "Layer.hpp" +#include "Layers.hpp" +#include "MemCopyWorkload.hpp" +#include "MakeWorkloadHelper.hpp" + +#include <boost/log/trivial.hpp> + +namespace armnn +{ + +template <typename F32Workload, typename U8Workload, typename QueueDescriptorType> +std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor, + const WorkloadInfo& info) const +{ + if (!IsOperationQueueDescriptor(descriptor) || m_OperationWorkloadsAllowed) + { + return armnn::MakeWorkload<F32Workload, U8Workload>(descriptor, info); + } + else + { + return std::unique_ptr<IWorkload>(); + } +} + +RefWorkloadFactory::RefWorkloadFactory(bool operationWorkloadsAllowed) + : m_OperationWorkloadsAllowed(operationWorkloadsAllowed) +{ +} + +bool RefWorkloadFactory::IsLayerSupported(const Layer& layer, DataType dataType, std::string& outReasonIfUnsupported) +{ + return IWorkloadFactory::IsLayerSupported(Compute::CpuRef, layer, dataType, outReasonIfUnsupported); +} + +std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const +{ + return std::make_unique<ScopedCpuTensorHandle>(tensorInfo); +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (info.m_InputTensorInfos.empty() ) + { + throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length"); + } + if (info.m_OutputTensorInfos.empty()) + { + throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length"); + } + + if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes()) + { + throw InvalidArgumentException("RefWorkloadFactory::CreateInput: data input and output differ in byte count."); + } + + return MakeWorkload<CopyFromCpuToCpuFloat32Workload, CopyFromCpuToCpuUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (info.m_InputTensorInfos.empty() ) + { + throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length"); + } + if (info.m_OutputTensorInfos.empty()) + { + throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length"); + } + if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes()) + { + throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output differ in byte count."); + } + + return MakeWorkload<CopyFromCpuToCpuFloat32Workload, CopyFromCpuToCpuUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefActivationFloat32Workload, RefActivationUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefSoftmaxFloat32Workload, RefSoftmaxUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefSplitterFloat32Workload, RefSplitterUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefMergerFloat32Workload, RefMergerUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateFullyConnected( + const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<RefFullyConnectedFloat32Workload, RefFullyConnectedUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefPermuteFloat32Workload, RefPermuteUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefPooling2dFloat32Workload, RefPooling2dUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateConvolution2d( + const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<RefConvolution2dFloat32Workload, RefConvolution2dUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDepthwiseConvolution2d( + const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<RefDepthwiseConvolution2dFloat32Workload, + RefDepthwiseConvolution2dUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateNormalization( + const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<RefNormalizationFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefAdditionFloat32Workload, RefAdditionUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMultiplication( + const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<RefMultiplicationFloat32Workload, RefMultiplicationUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateBatchNormalization( + const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return MakeWorkload<RefBatchNormalizationFloat32Workload, RefBatchNormalizationUint8Workload>(descriptor, info); +} + +std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (descriptor.m_Inputs.empty()) + { + throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor."); + } + // Create a workload that will copy tensor data from the inputs, which can have a number of different formats, + // to CPU tensors. + switch (descriptor.m_Inputs[0]->GetType()) + { +#if ARMCOMPUTECL_ENABLED + case ITensorHandle::CL: + { + return MakeWorkload<CopyFromClToCpuFloat32Workload, CopyFromClToCpuUint8Workload>(descriptor, info); + } +#endif +#if ARMCOMPUTENEON_ENABLED + case ITensorHandle::Neon: + { + return MakeWorkload<CopyFromNeonToCpuFloat32Workload, CopyFromNeonToCpuUint8Workload>(descriptor, info); + } +#endif + default: + throw InvalidArgumentException("RefWorkloadFactory: Destination type not supported for MemCopy Workload."); + return nullptr; + } +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefResizeBilinearFloat32Workload, RefResizeBilinearUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization( + const FakeQuantizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefFakeQuantizationFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefL2NormalizationFloat32Workload, NullWorkload>(descriptor, info); +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefConstantFloat32Workload, RefConstantUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefReshapeFloat32Workload, RefReshapeUint8Workload>(descriptor, info); +} + +std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return MakeWorkload<RefFloorFloat32Workload, NullWorkload>(descriptor, info); +} + +} // namespace armnn diff --git a/src/armnn/backends/RefWorkloadFactory.hpp b/src/armnn/backends/RefWorkloadFactory.hpp new file mode 100644 index 0000000000..3fab490ad8 --- /dev/null +++ b/src/armnn/backends/RefWorkloadFactory.hpp @@ -0,0 +1,124 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "WorkloadFactory.hpp" +#include "OutputHandler.hpp" + +#include <boost/core/ignore_unused.hpp> + +namespace armnn +{ + +template <typename QueueDescriptorType> +constexpr bool IsOperationQueueDescriptor(const QueueDescriptorType&) { return true; } + +template <> +constexpr bool IsOperationQueueDescriptor(const MemCopyQueueDescriptor&) { return false; } + +template <> +constexpr bool IsOperationQueueDescriptor(const ConstantQueueDescriptor&) { return false; } + +template <> +constexpr bool IsOperationQueueDescriptor(const PermuteQueueDescriptor&) { return false; } + +// Reference workload factory +class RefWorkloadFactory : public IWorkloadFactory +{ +public: + explicit RefWorkloadFactory(bool operationWorkloadsAllowed = true); + virtual ~RefWorkloadFactory() { }; + + virtual Compute GetCompute() const override { return Compute::CpuRef; } + + static bool IsLayerSupported(const Layer& layer, DataType dataType, std::string& outReasonIfUnsupported); + + virtual bool SupportsSubTensors() const override { return false; } + + virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent, + TensorShape const& subTensorShape, + unsigned int const* subTensorOrigin) const override + { + boost::ignore_unused(parent, subTensorShape, subTensorOrigin); + return nullptr; + }; + + virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override; + + virtual std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d( + const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + + virtual std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + +private: + + template <typename F32Workload, typename U8Workload, typename QueueDescriptorType> + std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor, const WorkloadInfo& info) const; + + const bool m_OperationWorkloadsAllowed; +}; + +} // namespace armnn diff --git a/src/armnn/backends/RefWorkloads.hpp b/src/armnn/backends/RefWorkloads.hpp new file mode 100644 index 0000000000..ed4fa840da --- /dev/null +++ b/src/armnn/backends/RefWorkloads.hpp @@ -0,0 +1,54 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/RefWorkloads/RefConstantUint8Workload.hpp" +#include "backends/RefWorkloads/Addition.hpp" +#include "backends/RefWorkloads/ConvImpl.hpp" +#include "backends/RefWorkloads/RefMultiplicationUint8Workload.hpp" +#include "backends/RefWorkloads/RefBaseConstantWorkload.hpp" +#include "backends/RefWorkloads/RefConvolution2dUint8Workload.hpp" +#include "backends/RefWorkloads/RefSplitterUint8Workload.hpp" +#include "backends/RefWorkloads/RefResizeBilinearUint8Workload.hpp" +#include "backends/RefWorkloads/RefL2NormalizationFloat32Workload.hpp" +#include "backends/RefWorkloads/Multiplication.hpp" +#include "backends/RefWorkloads/RefActivationUint8Workload.hpp" +#include "backends/RefWorkloads/RefPooling2dFloat32Workload.hpp" +#include "backends/RefWorkloads/RefWorkloadUtils.hpp" +#include "backends/RefWorkloads/RefMergerUint8Workload.hpp" +#include "backends/RefWorkloads/RefFullyConnectedFloat32Workload.hpp" +#include "backends/RefWorkloads/Softmax.hpp" +#include "backends/RefWorkloads/RefMergerFloat32Workload.hpp" +#include "backends/RefWorkloads/TensorBufferArrayView.hpp" +#include "backends/RefWorkloads/RefBatchNormalizationFloat32Workload.hpp" +#include "backends/RefWorkloads/Splitter.hpp" +#include "backends/RefWorkloads/RefFullyConnectedUint8Workload.hpp" +#include "backends/RefWorkloads/RefReshapeFloat32Workload.hpp" +#include "backends/RefWorkloads/RefDepthwiseConvolution2dUint8Workload.hpp" +#include "backends/RefWorkloads/FullyConnected.hpp" +#include "backends/RefWorkloads/RefFloorFloat32Workload.hpp" +#include "backends/RefWorkloads/RefSoftmaxFloat32Workload.hpp" +#include "backends/RefWorkloads/RefSoftmaxUint8Workload.hpp" +#include "backends/RefWorkloads/RefReshapeUint8Workload.hpp" +#include "backends/RefWorkloads/RefResizeBilinearFloat32Workload.hpp" +#include "backends/RefWorkloads/RefAdditionUint8Workload.hpp" +#include "backends/RefWorkloads/RefMultiplicationFloat32Workload.hpp" +#include "backends/RefWorkloads/RefBatchNormalizationUint8Workload.hpp" +#include "backends/RefWorkloads/ResizeBilinear.hpp" +#include "backends/RefWorkloads/RefNormalizationFloat32Workload.hpp" +#include "backends/RefWorkloads/RefDepthwiseConvolution2dFloat32Workload.hpp" +#include "backends/RefWorkloads/RefPooling2dUint8Workload.hpp" +#include "backends/RefWorkloads/BatchNormImpl.hpp" +#include "backends/RefWorkloads/Activation.hpp" +#include "backends/RefWorkloads/Merger.hpp" +#include "backends/RefWorkloads/RefSplitterFloat32Workload.hpp" +#include "backends/RefWorkloads/RefConstantFloat32Workload.hpp" +#include "backends/RefWorkloads/RefActivationFloat32Workload.hpp" +#include "backends/RefWorkloads/RefConvolution2dFloat32Workload.hpp" +#include "backends/RefWorkloads/RefAdditionFloat32Workload.hpp" +#include "backends/RefWorkloads/Pooling2d.hpp" +#include "backends/RefWorkloads/RefFakeQuantizationFloat32Workload.hpp" +#include "backends/RefWorkloads/RefPermuteWorkload.hpp" diff --git a/src/armnn/backends/RefWorkloads/Activation.cpp b/src/armnn/backends/RefWorkloads/Activation.cpp new file mode 100644 index 0000000000..ede283cbf9 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Activation.cpp @@ -0,0 +1,91 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "Activation.hpp" + +#include <boost/log/trivial.hpp> + +#include <cmath> + +namespace armnn +{ + +void Activation(const float* in, + float* out, + const TensorInfo& tensorInfo, + ActivationFunction function, + float a, + float b) +{ + for (size_t i = 0; i<tensorInfo.GetNumElements(); i++) + { + float input = in[i]; + float output; + + // compute the result of the activation function + switch (function) + { + case ActivationFunction::Linear: + { + output = a * input + b; + break; + } + case ActivationFunction::Sigmoid: + { + output = 1.f / (1.f + expf(-input)); + break; + } + case ActivationFunction::ReLu: + { + output = std::max(0.f, input); + break; + } + case ActivationFunction::BoundedReLu: + { + output = std::min(a, std::max(b, input)); + break; + } + case ActivationFunction::SoftReLu: + { + output = logf(1.0f + expf(input)); + break; + } + case ActivationFunction::LeakyReLu: + { + output = input > 0.0f ? input : (input * a); + break; + } + case ActivationFunction::Abs: + { + output = input < 0 ? -input : input; + break; + } + case ActivationFunction::Sqrt: + { + output = sqrtf(input); + break; + } + case ActivationFunction::Square: + { + output = input * input; + break; + } + case ActivationFunction::TanH: + { + output = a * tanhf(b * input); + break; + } + default: + { + BOOST_LOG_TRIVIAL(error) << "Unsupported activation function"; + return; + } + } + + out[i] = output; + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Activation.hpp b/src/armnn/backends/RefWorkloads/Activation.hpp new file mode 100644 index 0000000000..874441c862 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Activation.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include <armnn/Tensor.hpp> +#include <armnn/Types.hpp> + +namespace armnn +{ + +/// Performs the ActivationFunction elementwise on the inputs to give the outputs +void Activation(const float* in, + float* out, + const TensorInfo& tensorInfo, + ActivationFunction function, + float a, + float b); + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Addition.cpp b/src/armnn/backends/RefWorkloads/Addition.cpp new file mode 100644 index 0000000000..c26f82ecc2 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Addition.cpp @@ -0,0 +1,44 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "Addition.hpp" +#include "Broadcast.hpp" + +#include <functional> + +namespace armnn +{ + +namespace +{ + +void ElementwiseAddition(unsigned int numElements, const float* inData0, const float* inData1, float* outData) +{ + for (unsigned int i = 0; i < numElements; ++i) + { + outData[i] = inData0[i] + inData1[i]; + } +} + +} // namespace + +void Addition(const TensorShape& inShape0, + const TensorShape& inShape1, + const TensorShape& outShape, + const float* inData0, + const float* inData1, + float* outData) +{ + if (inShape0 == inShape1) + { + ElementwiseAddition(inShape0.GetNumElements(), inData0, inData1, outData); + } + else + { + BroadcastLoop(inShape0, inShape1, outShape).Unroll(std::plus<float>(), 0, inData0, inData1, outData); + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Addition.hpp b/src/armnn/backends/RefWorkloads/Addition.hpp new file mode 100644 index 0000000000..e62d63ec14 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Addition.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +void Addition(const TensorShape& inShape0, + const TensorShape& inShape1, + const TensorShape& outShape, + const float* inData0, + const float* inData1, + float* outData); + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/BatchNormImpl.hpp b/src/armnn/backends/RefWorkloads/BatchNormImpl.hpp new file mode 100644 index 0000000000..f40a277d17 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/BatchNormImpl.hpp @@ -0,0 +1,56 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "RefWorkloadUtils.hpp" + +#include <armnn/Tensor.hpp> + +#include <cmath> + +namespace armnn +{ + +template<typename NormData> +static void BatchNormImpl(NormData data, + const float* varIn, + const float* meanIn, + const float* gammaIn, + const float* betaIn, + float * outputData, + const float * inputData) +{ + const TensorInfo& inputInfo0 = GetTensorInfo(data.m_Inputs[0]); + for (unsigned int c = 0; c < inputInfo0.GetShape()[1]; c++) + { + float var = varIn[c]; + float mean = meanIn[c]; + float gamma = gammaIn[c]; + float beta = betaIn[c]; + + float mult = gamma / sqrtf(var + data.m_Parameters.m_Eps); + float add = beta - mult * mean; + + for (unsigned int n = 0; n < inputInfo0.GetShape()[0]; n++) + { + for (unsigned int j = 0; j < inputInfo0.GetShape()[2]; j++) + { + for (unsigned int i = 0; i < inputInfo0.GetShape()[3]; i++) + { + unsigned int index = i + + j*inputInfo0.GetShape()[3] + + c*inputInfo0.GetShape()[3] * inputInfo0.GetShape()[2] + + n*inputInfo0.GetShape()[3] * inputInfo0.GetShape()[2] + * inputInfo0.GetShape()[1]; + + outputData[index] = mult * inputData[index] + add; + } + } + } + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Broadcast.cpp b/src/armnn/backends/RefWorkloads/Broadcast.cpp new file mode 100644 index 0000000000..90ccb48616 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Broadcast.cpp @@ -0,0 +1,33 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "Broadcast.hpp" + +namespace armnn +{ + +BroadcastLoop::BroadcastLoop(const TensorShape& inShape0, const TensorShape& inShape1, const TensorShape& outShape) +: m_DimData(outShape.GetNumDimensions()) +{ + const unsigned int numDims = GetNumDimensions(); + + unsigned int sIn0 = 1; + unsigned int sIn1 = 1; + unsigned int sOut = 1; + + for (unsigned int j = numDims - 1, k = 0; k < numDims ; k++, j--) + { + m_DimData[j].m_DimSize = outShape[j]; + m_DimData[j].m_Stride1 = (inShape0[j] > 1) ? sIn0 : 0; + m_DimData[j].m_Stride2 = (inShape1[j] > 1) ? sIn1 : 0; + m_DimData[j].m_StrideOut = sOut; + + sIn0 *= inShape0[j]; + sIn1 *= inShape1[j]; + sOut *= outShape[j]; + } +} + +} // namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Broadcast.hpp b/src/armnn/backends/RefWorkloads/Broadcast.hpp new file mode 100644 index 0000000000..b65b57f7a1 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Broadcast.hpp @@ -0,0 +1,58 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include <armnn/Tensor.hpp> + +#include <functional> + +namespace armnn +{ + +struct BroadcastLoop +{ + BroadcastLoop(const TensorShape& inShape0, const TensorShape& inShape1, const TensorShape& outShape); + + unsigned int GetNumDimensions() + { + return static_cast<unsigned int>(m_DimData.size()); + } + + template <typename T0, typename T1, typename U, typename Func> + void Unroll(Func operationFunc, + unsigned int dimension, + const T0* inData0, + const T1* inData1, + U* outData) + { + if (dimension >= GetNumDimensions()) + { + *outData = operationFunc(*inData0, *inData1); + return; + } + + for (unsigned int i = 0; i < m_DimData[dimension].m_DimSize; i++) + { + Unroll(operationFunc, dimension + 1, inData0, inData1, outData); + + inData0 += m_DimData[dimension].m_Stride1; + inData1 += m_DimData[dimension].m_Stride2; + outData += m_DimData[dimension].m_StrideOut; + } + } + +private: + // Struct to hold the dimension data + struct BroadcastDimensionData + { + unsigned int m_DimSize; + unsigned int m_StrideOut; + unsigned int m_Stride1; + unsigned int m_Stride2; + }; + + std::vector<BroadcastDimensionData> m_DimData; +}; + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/RefWorkloads/ConvImpl.cpp b/src/armnn/backends/RefWorkloads/ConvImpl.cpp new file mode 100644 index 0000000000..9ebadacddb --- /dev/null +++ b/src/armnn/backends/RefWorkloads/ConvImpl.cpp @@ -0,0 +1,71 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ConvImpl.hpp" + +#include <boost/assert.hpp> + +#include <cmath> +#include <limits> + +namespace armnn +{ + +QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multiplier) +{ + BOOST_ASSERT(multiplier >= 0.0f && multiplier < 1.0f); + if (multiplier == 0.0f) + { + m_Multiplier = 0; + m_RightShift = 0; + } + else + { + const double q = std::frexp(multiplier, &m_RightShift); + m_RightShift = -m_RightShift; + int64_t qFixed = static_cast<int64_t>(std::round(q * (1ll << 31))); + BOOST_ASSERT(qFixed <= (1ll << 31)); + if (qFixed == (1ll << 31)) + { + qFixed /= 2; + --m_RightShift; + } + BOOST_ASSERT(m_RightShift >= 0); + BOOST_ASSERT(qFixed <= std::numeric_limits<int32_t>::max()); + m_Multiplier = static_cast<int32_t>(qFixed); + } +} + +int32_t QuantizedMultiplierSmallerThanOne::operator*(int32_t rhs) const +{ + int32_t x = SaturatingRoundingDoublingHighMul(rhs, m_Multiplier); + return RoundingDivideByPOT(x, m_RightShift); +} + +int32_t QuantizedMultiplierSmallerThanOne::SaturatingRoundingDoublingHighMul(int32_t a, int32_t b) +{ + // Check for overflow + if (a == b && a == std::numeric_limits<int32_t>::min()) + { + return std::numeric_limits<int32_t>::max(); + } + int64_t a_64(a); + int64_t b_64(b); + int64_t ab_64 = a_64 * b_64; + int32_t nudge = ab_64 >= 0 ? (1 << 30) : (1 - (1 << 30)); + int32_t ab_x2_high32 = static_cast<std::int32_t>((ab_64 + nudge) / (1ll << 31)); + return ab_x2_high32; +} + +int32_t QuantizedMultiplierSmallerThanOne::RoundingDivideByPOT(int32_t x, int exponent) +{ + BOOST_ASSERT(exponent >= 0 && exponent <= 31); + int32_t mask = (1 << exponent) - 1; + int32_t remainder = x & mask; + int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0); + return (x >> exponent) + (remainder > threshold ? 1 : 0); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/ConvImpl.hpp b/src/armnn/backends/RefWorkloads/ConvImpl.hpp new file mode 100644 index 0000000000..ecc5b14687 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/ConvImpl.hpp @@ -0,0 +1,184 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "RefWorkloadUtils.hpp" + +#include <armnn/Tensor.hpp> + +#include <boost/assert.hpp> +#include <boost/numeric/conversion/cast.hpp> + +#include <cmath> +#include <limits> + +namespace armnn +{ + +/// Performs multiplication of a integer with a multiplier which is less than one, +/// using quantized integer arithmetic which is consistent with AndroidNN's CPU executor. +struct QuantizedMultiplierSmallerThanOne +{ +public: + /// Constructs a QuantizedMultiplierSmallerThanOne which will multiply by the given multiplier. + /// This stores the appropriate integer quantities (derived from the given multiplier) for later use. + /// The implementation of this function is adapted from Android NN's QuantizeMultiplierSmallerThanOne(). + QuantizedMultiplierSmallerThanOne(float multiplier); + + /// The implementation of this function is adapted from Android NN's MultiplyByQuantizedMultiplierSmallerThanOne() + int32_t operator*(int32_t rhs) const; + +private: + /// The implementation of this function is adapted from gemmlowp's SaturatingRoundingDoublingHighMul() + static int32_t SaturatingRoundingDoublingHighMul(int32_t a, int32_t b); + + /// The implementation of this function is adapted from gemmlowp's RoundingDivideByPOT() + static int32_t RoundingDivideByPOT(int32_t x, int exponent); + + int32_t m_Multiplier; + int32_t m_RightShift; +}; + +/// an implementation shared by normal and depthwise convolution +template<typename ConvData, typename InputType, typename BiasType, typename AccumulatorType> +static void ConvImpl(ConvData data, + const InputType* inputData, + float inputScale, + int32_t inputOffset, + const InputType* filterData, + float filterScale, + int32_t filterOffset, + const BiasType* biasData, + InputType* outputData, + float outputScale, + int32_t outputOffset, + bool depthwise = false) +{ + const TensorInfo& inputInfo0 = GetTensorInfo(data.m_Inputs[0]); + const TensorInfo& outputInfo0 = GetTensorInfo(data.m_Outputs[0]); + const TensorInfo& filterInfo = data.m_Weight->GetTensorInfo(); + + unsigned int depthMult = depthwise ? filterInfo.GetShape()[0] : 1; + unsigned int channelsInput = filterInfo.GetShape()[1]; + unsigned int channelsOutput = depthwise ? channelsInput * depthMult : filterInfo.GetShape()[0]; + + BOOST_ASSERT(data.m_Parameters.m_BiasEnabled == false || biasData != nullptr); + + unsigned int batchSize = outputInfo0.GetShape()[0]; + unsigned int heightOutput = outputInfo0.GetShape()[2]; + unsigned int widthOutput = outputInfo0.GetShape()[3]; + unsigned int heightInput = inputInfo0.GetShape()[2]; + unsigned int widthInput = inputInfo0.GetShape()[3]; + + unsigned int heightFilter = filterInfo.GetShape()[2]; + unsigned int widthFilter = filterInfo.GetShape()[3]; + + unsigned int paddingTop = data.m_Parameters.m_PadTop; + unsigned int paddingLeft = data.m_Parameters.m_PadLeft; + unsigned int hStride = data.m_Parameters.m_StrideY; + unsigned int xStride = data.m_Parameters.m_StrideX; + + // the world's least efficient convolution + for (unsigned int batchIdx = 0; batchIdx < batchSize; batchIdx++) + { + for (unsigned int cOutput = 0; cOutput < channelsOutput; cOutput++) + { + for (unsigned int yOutput = 0; yOutput < heightOutput; yOutput++) + { + for (unsigned int xOutput = 0; xOutput < widthOutput; xOutput++) + { + // this loop goes over each output element + AccumulatorType sum = AccumulatorType(); + + // for depthwise, each output channel corresponds to exactly one input channel + // for normal, must loop over each input channel + for (unsigned int cInput = 0; cInput < (depthwise ? 1 : channelsInput); cInput++) + { + unsigned int depthwiseMultiplierIdx = 0; + if (depthwise) + { + cInput = cOutput / depthMult; + depthwiseMultiplierIdx = cOutput % depthMult; + } + + for (unsigned int yFilter = 0; yFilter < heightFilter; yFilter++) + { + for (unsigned int xFilter = 0; xFilter < widthFilter; xFilter++) + { + // this loop goes over each input element for each output element + + unsigned int filterIndex; + + // since dimensionality of kernel depends on depthwiseness, so does index + if (depthwise) + { + filterIndex = depthwiseMultiplierIdx * widthFilter * heightFilter * channelsInput + + cInput * widthFilter * heightFilter + + yFilter * widthFilter + + xFilter; + } + else + { + filterIndex = cOutput * widthFilter * heightFilter * channelsInput + + cInput * widthFilter * heightFilter + + yFilter * widthFilter + + xFilter; + } + AccumulatorType filterValue = filterData[filterIndex] - + boost::numeric_cast<AccumulatorType>(filterOffset); + + unsigned int yInput = yOutput * hStride + yFilter; + unsigned int xInput = xOutput * xStride + xFilter; + + AccumulatorType inputValue; + + // check if we're in the padding + if (yInput < paddingTop || yInput >= heightInput + paddingTop || + xInput < paddingLeft || xInput >= widthInput + paddingLeft ) + { + inputValue = AccumulatorType(); + } + else + { + inputValue = inputData[batchIdx * widthInput * heightInput * channelsInput + + widthInput * heightInput * cInput + + widthInput * (yInput - paddingTop) + + xInput - paddingLeft] - + boost::numeric_cast<AccumulatorType>(inputOffset); + } + sum += filterValue * inputValue; + } + } + } + + if (data.m_Parameters.m_BiasEnabled) + { + sum += biasData[cOutput]; + } + + if (outputScale != 0.0f) + { + float multiplier = (inputScale * filterScale) / outputScale; + // Apply the multiplier to sum, but do so using some quantized arithmetic which is consistent + // with the AndroidNN CPU implementation. This should be (roughly) equivalent to: + // sum = std::round(multiplier * sum + outputOffset); + sum = boost::numeric_cast<AccumulatorType>( + QuantizedMultiplierSmallerThanOne(multiplier) * boost::numeric_cast<int32_t>(sum)) + + boost::numeric_cast<AccumulatorType>(outputOffset); + sum = std::min<AccumulatorType>(std::max<AccumulatorType>(sum, 0), 255); + } + + outputData[batchIdx * widthOutput * heightOutput * channelsOutput + + widthOutput * heightOutput * cOutput + + widthOutput * yOutput + + xOutput] = boost::numeric_cast<InputType>(sum); + } + } + } + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/FullyConnected.cpp b/src/armnn/backends/RefWorkloads/FullyConnected.cpp new file mode 100644 index 0000000000..8ba11d19c6 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/FullyConnected.cpp @@ -0,0 +1,62 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "FullyConnected.hpp" + +#include <boost/assert.hpp> + +namespace armnn +{ + +void FullyConnected(const float* inputData, + float* outputData, + const TensorInfo& inputTensorInfo, + const TensorInfo& outputTensorInfo, + const float* weightData, + const float* biasData, + bool transposeWeights) +{ + unsigned int N = outputTensorInfo.GetShape()[1]; // Output Vector Size + + BOOST_ASSERT(inputTensorInfo.GetNumDimensions() > 1); // Need some data + + unsigned int K = 1; // Total number of activations in the input + for (unsigned int i = 1; i < inputTensorInfo.GetNumDimensions(); i++) + { + K *= inputTensorInfo.GetShape()[i]; + } + + for (unsigned int n = 0; n < inputTensorInfo.GetShape()[0]; n++) + { + for (unsigned int channelOutput = 0; channelOutput < N; channelOutput++) + { + float outval = 0.f; + + for (unsigned int channelInput = 0; channelInput < K; channelInput++) + { + float weight; + if (transposeWeights) + { + weight = weightData[channelOutput * K + channelInput]; + } + else + { + weight = weightData[channelInput * N + channelOutput]; + } + + outval += weight * inputData[n * K + channelInput]; + } + + if (biasData) + { + outval += biasData[channelOutput]; + } + + outputData[n * N + channelOutput] = outval; + } + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/FullyConnected.hpp b/src/armnn/backends/RefWorkloads/FullyConnected.hpp new file mode 100644 index 0000000000..9fa2456110 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/FullyConnected.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +/// Performs a matrix multiplication and optionally adds a bias +void FullyConnected(const float* inputData, + float* outputData, + const TensorInfo& inputTensorInfo, + const TensorInfo& outputTensorInfo, + const float* weightData, + const float* biasData, + bool transposeWeights); + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Merger.hpp b/src/armnn/backends/RefWorkloads/Merger.hpp new file mode 100644 index 0000000000..9695e457e2 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Merger.hpp @@ -0,0 +1,81 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "RefWorkloadUtils.hpp" + +#include "backends/WorkloadData.hpp" + +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +template <typename DataType> +void Merger(const MergerQueueDescriptor& data) +{ + const TensorInfo& outputInfo0 = GetTensorInfo(data.m_Outputs[0]); + + for (unsigned int index = 0 ; index < outputInfo0.GetNumElements(); ++index) + { + unsigned int indices[MaxNumOfTensorDimensions]; + + unsigned int indexRemainder = index; + unsigned int dimensionStride = outputInfo0.GetNumElements(); + + for (unsigned int i=0; i<outputInfo0.GetNumDimensions(); i++) + { + dimensionStride /= outputInfo0.GetShape()[i]; + indices[i] = indexRemainder / dimensionStride; // use integer division to round down + indexRemainder -= indices[i] * dimensionStride; + } + + for (unsigned int viewIdx = 0; viewIdx < data.m_ViewOrigins.size(); ++viewIdx) + { + MergerQueueDescriptor::ViewOrigin const& view = data.m_ViewOrigins[viewIdx]; + + //split view extents are defined by the size of (the corresponding) input tensor + const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]); + + // check all dimensions to see if this element is inside the given input view + bool insideView = true; + for (unsigned int i=0; i<inputInfo.GetNumDimensions(); i++) + { + if (indices[i] < view.m_Origin[i]) + { + insideView = false; + } + if (indices[i] >= view.m_Origin[i] + inputInfo.GetShape()[i]) + { + insideView = false; + } + } + + if (insideView) + { + unsigned int inIndex = 0; + unsigned int dimensionStride = 1; + + for (unsigned int i = inputInfo.GetNumDimensions(); i-- > 0;) + { + inIndex += dimensionStride * (indices[i] - view.m_Origin[i]); + dimensionStride *= inputInfo.GetShape()[i]; + } + + //we are within the view, copy input data to the output corresponding to this view + (GetOutputTensorData<DataType>(0, data))[index] = + (GetInputTensorData<DataType>(viewIdx, data))[inIndex]; + + //what should we do if input views overlap on the output tensor? + //we could error, take the average, or shm else... + //for now just stop after finding first view (input) that matches. + break; + } + } + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Multiplication.cpp b/src/armnn/backends/RefWorkloads/Multiplication.cpp new file mode 100644 index 0000000000..7f558d83c5 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Multiplication.cpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "Multiplication.hpp" + +namespace armnn +{ + +void Multiplication(const float* in0, + const float* in1, + unsigned int numElements, + float* out) +{ + for (unsigned int i = 0; i < numElements; ++i) + { + out[i] = in0[i] * in1[i]; + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Multiplication.hpp b/src/armnn/backends/RefWorkloads/Multiplication.hpp new file mode 100644 index 0000000000..d0b033e7ec --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Multiplication.hpp @@ -0,0 +1,16 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +namespace armnn +{ + +void Multiplication(const float* in0, + const float* in1, + unsigned int numElements, + float* out); + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Pooling2d.cpp b/src/armnn/backends/RefWorkloads/Pooling2d.cpp new file mode 100644 index 0000000000..6d15d8a436 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Pooling2d.cpp @@ -0,0 +1,241 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "Pooling2d.hpp" + +#include <armnn/Exceptions.hpp> +#include <armnn/Types.hpp> + +#include <boost/numeric/conversion/cast.hpp> + +#include <limits> +#include <algorithm> +#include <functional> + +namespace +{ + using PoolingAlgorithm = armnn::PoolingAlgorithm; + + float DefaultInitializer(PoolingAlgorithm algorithm) + { + switch (algorithm) + { + case PoolingAlgorithm::Max: + { + return std::numeric_limits<float>::lowest(); + } + case PoolingAlgorithm::Average: + case PoolingAlgorithm::L2: + { + return 0.0f; + } + default: + { + throw armnn::InvalidArgumentException("Unsupported pooling algorithm"); + } + } + } + + using Accumulator = std::function<void(float & accu, float value)>; + + Accumulator GetAccumulator(PoolingAlgorithm algorithm) + { + switch (algorithm) + { + case PoolingAlgorithm::Max: + { + return [](float & accu, float value) { + if (value > accu) { + accu = value; + } + }; + } + + case PoolingAlgorithm::Average: + { + return [](float & accu, float value) { + accu += value; + }; + } + + case PoolingAlgorithm::L2: + { + return [](float & accu, float value) { + accu += (value*value); + }; + } + + default: + { + throw armnn::InvalidArgumentException("Unsupported pooling algorithm"); + } + } + } + + using Executor = std::function<void(float & accumulated, float kernelSize)>; + + Executor GetExecutor(PoolingAlgorithm algorithm) + { + switch (algorithm) + { + case PoolingAlgorithm::Max: + { + return [](float & accumulated, float kernelSize) {}; + } + + case PoolingAlgorithm::Average: + { + return [](float & accumulated, float kernelSize) { + accumulated /= kernelSize; + }; + } + + case PoolingAlgorithm::L2: + { + return [](float & accumulated, float kernelSize) { + accumulated = sqrtf(accumulated / kernelSize); + }; + } + + default: + { + throw armnn::InvalidArgumentException("Unsupported pooling algorithm"); + } + } + } + + bool OnPaddingOnly(int start, int end, int maxRange, int padding) + { + if (end <= 0 || start > (maxRange - padding)) + { + return true; + } + else + { + return false; + } + } + + + bool ClampRange(int & start, int & end, int maxRange) + { + if (start < 0 || end > maxRange) + { + start = std::min(std::max(start, 0), maxRange); + end = std::min(std::max(end, 0), maxRange); + return true; + } + else + { + return false; + } + } +} + +namespace armnn +{ + +void Pooling2d(const float* in, + float* out, + const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + const Pooling2dDescriptor& params) +{ + const int batchSize = boost::numeric_cast<int>(outputInfo.GetShape()[0]); + const int channels = boost::numeric_cast<int>(outputInfo.GetShape()[1]); + const int heightOutput = boost::numeric_cast<int>(outputInfo.GetShape()[2]); + const int widthOutput = boost::numeric_cast<int>(outputInfo.GetShape()[3]); + const int heightInput = boost::numeric_cast<int>(inputInfo.GetShape()[2]); + const int widthInput = boost::numeric_cast<int>(inputInfo.GetShape()[3]); + const int padLeft = boost::numeric_cast<int>(params.m_PadLeft); + const int padRight = boost::numeric_cast<int>(params.m_PadRight); + const int padTop = boost::numeric_cast<int>(params.m_PadTop); + const int padBottom = boost::numeric_cast<int>(params.m_PadBottom); + const int strideX = boost::numeric_cast<int>(params.m_StrideX); + const int strideY = boost::numeric_cast<int>(params.m_StrideY); + const int poolHeight = boost::numeric_cast<int>(params.m_PoolHeight); + const int poolWidth = boost::numeric_cast<int>(params.m_PoolWidth); + + float defaultInitializer = DefaultInitializer(params.m_PoolType); + + Accumulator accumulate = GetAccumulator(params.m_PoolType); + Executor execute = GetExecutor(params.m_PoolType); + + // Check supported padding methods outside the loop to simplify + // the inner loop + if (params.m_PaddingMethod != PaddingMethod::Exclude && + params.m_PaddingMethod != PaddingMethod::IgnoreValue) + { + throw armnn::InvalidArgumentException("Unsupported padding type"); + } + + for (int n = 0; n < batchSize; n++) + { + for (int c = 0; c < channels; c++) + { + for (int yOutput = 0; yOutput < heightOutput; yOutput++) + { + for (int xOutput = 0; xOutput < widthOutput; xOutput++) + { + int hstart = (yOutput * strideY) - padTop; + int wstart = (xOutput * strideX) - padLeft; + int hend = hstart + poolHeight; + int wend = wstart + poolWidth; + + // Clamp the pooling region inside the valid input area (which includes the padding). + // This is necessary because the final pooling in a row may overlap beyond the padding. + hend = std::min(hend, heightInput + padRight); + wend = std::min(wend, widthInput + padBottom); + + float result = defaultInitializer; + float poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart)); + + // special case: when the pooling kernel is over a padding region and the padding + // size is larger or equal to the kernel and the kernel only covers + // padding and no real values, then we initialize the result as zero + // by convention. This is because we need to choose a value here and + // all values we have are padding, which we ignore. + if (OnPaddingOnly(hstart, hend, heightInput, padBottom) || + OnPaddingOnly(wstart, wend, widthInput, padRight)) + { + result = 0.0f; + } + + bool clamped = ClampRange(wstart, wend, widthInput); + clamped |= ClampRange(hstart, hend, heightInput); + + if (clamped && params.m_PaddingMethod == PaddingMethod::Exclude) + { + // when we exclude the padding, it means we calculate with a smaller + // kernel size, so I change the divisor here + poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart)); + } + + for (auto yInput = hstart; yInput < hend; yInput++) + { + for (auto xInput = wstart; xInput < wend; xInput++) + { + float inval = in[n * widthInput * heightInput * channels + + c * widthInput * heightInput + + yInput * widthInput + + xInput]; + + accumulate(result, inval); + } + } + + execute(result, poolAreaSize); + + out[n * widthOutput * heightOutput * channels + + c * widthOutput * heightOutput + + yOutput * widthOutput + + xOutput] = result; + } + } + } + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Pooling2d.hpp b/src/armnn/backends/RefWorkloads/Pooling2d.hpp new file mode 100644 index 0000000000..f88b1a0a4e --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Pooling2d.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <armnn/Descriptors.hpp> +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +/// Computes the Pooling2d operation +void Pooling2d(const float* in, + float* out, + const TensorInfo& inputInfo, + const TensorInfo& outputInfo, + const Pooling2dDescriptor& params); + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.cpp new file mode 100644 index 0000000000..f566759deb --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.cpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefActivationFloat32Workload.hpp" + +#include "Activation.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefActivationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefActivationFloat32Workload_Execute"); + + Activation(GetInputTensorDataFloat(0, m_Data), + GetOutputTensorDataFloat(0, m_Data), + GetTensorInfo(m_Data.m_Inputs[0]), + m_Data.m_Parameters.m_Function, + m_Data.m_Parameters.m_A, + m_Data.m_Parameters.m_B); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.hpp new file mode 100644 index 0000000000..d8bd216699 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefActivationFloat32Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" + +namespace armnn +{ + +class RefActivationFloat32Workload : public Float32Workload<ActivationQueueDescriptor> +{ +public: + using Float32Workload<ActivationQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefActivationUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefActivationUint8Workload.cpp new file mode 100644 index 0000000000..e8852f4bf8 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefActivationUint8Workload.cpp @@ -0,0 +1,38 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefActivationUint8Workload.hpp" + +#include "Activation.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +void RefActivationUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefActivationUint8Workload_Execute"); + + const TensorInfo& tensorInfo = GetTensorInfo(m_Data.m_Inputs[0]); + + auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), tensorInfo); + + std::vector<float> results(tensorInfo.GetNumElements()); + + Activation(dequant.data(), + results.data(), + tensorInfo, + m_Data.m_Parameters.m_Function, + m_Data.m_Parameters.m_A, + m_Data.m_Parameters.m_B); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), GetTensorInfo(m_Data.m_Outputs[0])); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefActivationUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefActivationUint8Workload.hpp new file mode 100644 index 0000000000..51514d0646 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefActivationUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefActivationUint8Workload : public Uint8Workload<ActivationQueueDescriptor> +{ +public: + using Uint8Workload<ActivationQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp new file mode 100644 index 0000000000..e06d7f9295 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.cpp @@ -0,0 +1,31 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefAdditionFloat32Workload.hpp" + +#include "Addition.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefAdditionFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefAdditionFloat32Workload_Execute"); + + const TensorShape& inShape0 = GetTensorInfo(m_Data.m_Inputs[0]).GetShape(); + const TensorShape& inShape1 = GetTensorInfo(m_Data.m_Inputs[1]).GetShape(); + const TensorShape& outShape = GetTensorInfo(m_Data.m_Outputs[0]).GetShape(); + + const float* inData0 = GetInputTensorDataFloat(0, m_Data); + const float* inData1 = GetInputTensorDataFloat(1, m_Data); + float* outData = GetOutputTensorDataFloat(0, m_Data); + + Addition(inShape0, inShape1, outShape, inData0, inData1, outData); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.hpp new file mode 100644 index 0000000000..e69ea28b28 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefAdditionFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefAdditionFloat32Workload : public Float32Workload<AdditionQueueDescriptor> +{ +public: + using Float32Workload<AdditionQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp new file mode 100644 index 0000000000..fa2dfeefc0 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.cpp @@ -0,0 +1,41 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefAdditionUint8Workload.hpp" + +#include "Addition.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +void RefAdditionUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefAdditionUint8Workload_Execute"); + + const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + auto dequant0 = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0); + auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1); + + std::vector<float> results(outputInfo.GetNumElements()); + + Addition(inputInfo0.GetShape(), + inputInfo1.GetShape(), + outputInfo.GetShape(), + dequant0.data(), + dequant1.data(), + results.data()); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.hpp new file mode 100644 index 0000000000..0f5a23ef4d --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefAdditionUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefAdditionUint8Workload : public Uint8Workload<AdditionQueueDescriptor> +{ +public: + using Uint8Workload<AdditionQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.cpp b/src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.cpp new file mode 100644 index 0000000000..50a198f011 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.cpp @@ -0,0 +1,49 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefBaseConstantWorkload.hpp" + +#include "RefWorkloadUtils.hpp" + +#include <armnn/Types.hpp> + +#include <boost/assert.hpp> + +#include <cstring> + +namespace armnn +{ + +template <armnn::DataType DataType> +void RefBaseConstantWorkload<DataType>::Execute() const +{ + // Considering the reference backend independently, it could be possible to initialise the intermediate tensor + // created by the layer output handler at workload construction time, rather than at workload execution time. + // However, this is not an option for other backends (e.g. CL). For consistency, we prefer to align all + // implementations. + // A similar argument can be made about performing the memory copy in the first place (the layer output handler + // could have a non-owning reference to the layer output tensor managed by the const input layer); again, this is + // not an option for other backends, and the extra complexity required to make this work for the reference backend + // may not be worth the effort (skipping a memory copy in the first inference). + if (!m_RanOnce) + { + const ConstantQueueDescriptor& data = this->m_Data; + + BOOST_ASSERT(data.m_LayerOutput != nullptr); + + const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]); + BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes()); + + memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(), + outputInfo.GetNumBytes()); + + m_RanOnce = true; + } +} + +template class RefBaseConstantWorkload<DataType::Float32>; +template class RefBaseConstantWorkload<DataType::QuantisedAsymm8>; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.hpp b/src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.hpp new file mode 100644 index 0000000000..0ede46d9fb --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefBaseConstantWorkload.hpp @@ -0,0 +1,33 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +#include <armnn/Types.hpp> + +namespace armnn +{ + +// Base class template providing an implementation of the Constant layer common to all data types +template <armnn::DataType DataType> +class RefBaseConstantWorkload : public TypedWorkload<ConstantQueueDescriptor, DataType> +{ +public: + RefBaseConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info) + : TypedWorkload<ConstantQueueDescriptor, DataType>(descriptor, info) + , m_RanOnce(false) + { + } + + virtual void Execute() const override; + +private: + mutable bool m_RanOnce; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefBatchNormalizationFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefBatchNormalizationFloat32Workload.cpp new file mode 100644 index 0000000000..c421b0f212 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefBatchNormalizationFloat32Workload.cpp @@ -0,0 +1,31 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefBatchNormalizationFloat32Workload.hpp" + +#include "BatchNormImpl.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefBatchNormalizationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchNormalizationFloat32Workload_Execute"); + + const float* var = m_Data.m_Variance->GetConstTensor<float>(); + const float* mean = m_Data.m_Mean->GetConstTensor<float>(); + const float* gamma = m_Data.m_Gamma->GetConstTensor<float>(); + const float* beta = m_Data.m_Beta->GetConstTensor<float>(); + + auto inputData = GetInputTensorDataFloat(0, m_Data); + auto outputData = GetOutputTensorDataFloat(0, m_Data); + + BatchNormImpl(m_Data, var, mean, gamma, beta, outputData, inputData); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefBatchNormalizationFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefBatchNormalizationFloat32Workload.hpp new file mode 100644 index 0000000000..cbcdadd749 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefBatchNormalizationFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefBatchNormalizationFloat32Workload : public Float32Workload<BatchNormalizationQueueDescriptor> +{ +public: + using Float32Workload<BatchNormalizationQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.cpp new file mode 100644 index 0000000000..8a48523765 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.cpp @@ -0,0 +1,40 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefBatchNormalizationUint8Workload.hpp" + +#include "BatchNormImpl.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +void RefBatchNormalizationUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchNormalizationUint8Workload_Execute"); + + const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& varInfo = GetTensorInfo(m_Data.m_Variance); + const TensorInfo& meanInfo = GetTensorInfo(m_Data.m_Mean); + const TensorInfo& gammaInfo = GetTensorInfo(m_Data.m_Gamma); + const TensorInfo& betaInfo = GetTensorInfo(m_Data.m_Beta); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + auto input = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0); + auto var = Dequantize(m_Data.m_Variance->GetConstTensor<uint8_t>(), varInfo); + auto mean = Dequantize(m_Data.m_Mean->GetConstTensor<uint8_t>(), meanInfo); + auto gamma = Dequantize(m_Data.m_Gamma->GetConstTensor<uint8_t>(), gammaInfo); + auto beta = Dequantize(m_Data.m_Beta->GetConstTensor<uint8_t>(), betaInfo); + + std::vector<float> results(outputInfo.GetNumElements()); + BatchNormImpl(m_Data, var.data(), mean.data(), gamma.data(), beta.data(), results.data(), input.data()); + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.hpp new file mode 100644 index 0000000000..57fe995ba5 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefBatchNormalizationUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefBatchNormalizationUint8Workload : public Uint8Workload<BatchNormalizationQueueDescriptor> +{ +public: + using Uint8Workload<BatchNormalizationQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefConstantFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefConstantFloat32Workload.cpp new file mode 100644 index 0000000000..0ed66013f6 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefConstantFloat32Workload.cpp @@ -0,0 +1,19 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefConstantFloat32Workload.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefConstantFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConstantFloat32Workload_Execute"); + RefBaseConstantWorkload::Execute(); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefConstantFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefConstantFloat32Workload.hpp new file mode 100644 index 0000000000..f0876a99bf --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefConstantFloat32Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "RefBaseConstantWorkload.hpp" + +namespace armnn +{ + +class RefConstantFloat32Workload : public RefBaseConstantWorkload<DataType::Float32> +{ +public: + using RefBaseConstantWorkload<DataType::Float32>::RefBaseConstantWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefConstantUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefConstantUint8Workload.cpp new file mode 100644 index 0000000000..2a4a514ad8 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefConstantUint8Workload.cpp @@ -0,0 +1,19 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefConstantUint8Workload.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefConstantUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConstantUint8Workload_Execute"); + RefBaseConstantWorkload::Execute(); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefConstantUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefConstantUint8Workload.hpp new file mode 100644 index 0000000000..504737dade --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefConstantUint8Workload.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "RefBaseConstantWorkload.hpp" + +namespace armnn +{ + +class RefConstantUint8Workload : public RefBaseConstantWorkload<DataType::QuantisedAsymm8> +{ +public: + using RefBaseConstantWorkload<DataType::QuantisedAsymm8>::RefBaseConstantWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefConvolution2dFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefConvolution2dFloat32Workload.cpp new file mode 100644 index 0000000000..6e4cc69063 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefConvolution2dFloat32Workload.cpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefConvolution2dFloat32Workload.hpp" + +#include "ConvImpl.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefConvolution2dFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvolution2dFloat32Workload_Execute"); + + float* outputData = GetOutputTensorDataFloat(0, m_Data); + const float* inputData = GetInputTensorDataFloat(0, m_Data); + const float* weightData = m_Data.m_Weight->template GetConstTensor<float>(); + const float* biasData = m_Data.m_Parameters.m_BiasEnabled ? + m_Data.m_Bias->template GetConstTensor<float>() : nullptr; + + ConvImpl<armnn::Convolution2dQueueDescriptor, float, float, float>( + m_Data, inputData, 0.0f, 0, weightData, 0.0f, 0, biasData, outputData, 0.0f, 0); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefConvolution2dFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefConvolution2dFloat32Workload.hpp new file mode 100644 index 0000000000..514369c262 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefConvolution2dFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefConvolution2dFloat32Workload : public Float32Workload<Convolution2dQueueDescriptor> +{ +public: + using Float32Workload<Convolution2dQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefConvolution2dUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefConvolution2dUint8Workload.cpp new file mode 100644 index 0000000000..f390baa387 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefConvolution2dUint8Workload.cpp @@ -0,0 +1,38 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefConvolution2dUint8Workload.hpp" + +#include "ConvImpl.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefConvolution2dUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvolution2dUint8Workload_Execute"); + + const uint8_t* inputData = GetInputTensorDataU8(0, m_Data); + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const uint8_t* weightsData = m_Data.m_Weight->template GetConstTensor<uint8_t>(); + const TensorInfo& weightsInfo = GetTensorInfo(m_Data.m_Weight); + const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? + m_Data.m_Bias->template GetConstTensor<int32_t>() : + nullptr; + uint8_t* outputData = GetOutputTensorDataU8(0, m_Data); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + ConvImpl<armnn::Convolution2dQueueDescriptor, uint8_t, int32_t, int32_t>( + m_Data, + inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), + weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(), + biasData, + outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset()); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefConvolution2dUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefConvolution2dUint8Workload.hpp new file mode 100644 index 0000000000..954a206463 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefConvolution2dUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefConvolution2dUint8Workload : public Uint8Workload<Convolution2dQueueDescriptor> +{ +public: + using Uint8Workload<Convolution2dQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dFloat32Workload.cpp new file mode 100644 index 0000000000..c631fecb66 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dFloat32Workload.cpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefDepthwiseConvolution2dFloat32Workload.hpp" + +#include "ConvImpl.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefDepthwiseConvolution2dFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDepthwiseConvolution2dFloat32Workload_Execute"); + + float* outputData = GetOutputTensorDataFloat(0, m_Data); + const float* inputData = GetInputTensorDataFloat(0, m_Data); + const float* weightData = m_Data.m_Weight->template GetConstTensor<float>(); + const float* biasData = m_Data.m_Parameters.m_BiasEnabled ? + m_Data.m_Bias->template GetConstTensor<float>() : nullptr; + + ConvImpl<armnn::DepthwiseConvolution2dQueueDescriptor, float, float, float> + (m_Data, inputData, 0.0f, 0, weightData, 0.0f, 0, biasData, outputData, 0.0f, 0, true); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dFloat32Workload.hpp new file mode 100644 index 0000000000..34e6524684 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefDepthwiseConvolution2dFloat32Workload : public Float32Workload<DepthwiseConvolution2dQueueDescriptor> +{ +public: + using Float32Workload<DepthwiseConvolution2dQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dUint8Workload.cpp new file mode 100644 index 0000000000..5a8fb13112 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dUint8Workload.cpp @@ -0,0 +1,38 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefDepthwiseConvolution2dUint8Workload.hpp" + +#include "ConvImpl.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefDepthwiseConvolution2dUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDepthwiseConvolution2dUint8Workload_Execute"); + + const uint8_t* inputData = GetInputTensorDataU8(0, m_Data); + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const uint8_t* weightsData = m_Data.m_Weight->template GetConstTensor<uint8_t>(); + const TensorInfo& weightsInfo = GetTensorInfo(m_Data.m_Weight); + const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? + m_Data.m_Bias->template GetConstTensor<int32_t>() : + nullptr; + uint8_t* outputData = GetOutputTensorDataU8(0, m_Data); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + ConvImpl<armnn::DepthwiseConvolution2dQueueDescriptor, uint8_t, int32_t, int32_t>( + m_Data, + inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), + weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(), + biasData, + outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), true); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dUint8Workload.hpp new file mode 100644 index 0000000000..bd9945f529 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefDepthwiseConvolution2dUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefDepthwiseConvolution2dUint8Workload : public Uint8Workload<DepthwiseConvolution2dQueueDescriptor> +{ +public: + using Uint8Workload<DepthwiseConvolution2dQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp new file mode 100644 index 0000000000..483fa7e00e --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.cpp @@ -0,0 +1,42 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefFakeQuantizationFloat32Workload.hpp" + +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <boost/numeric/conversion/cast.hpp> + +namespace armnn +{ + +void FakeQuantization(const float* inputData, float* outputData, uint32_t numElements, float min, float max) +{ + float scale = (max - min) / 255.f; + int32_t offset = boost::numeric_cast<int32_t>((-min * 255.f) / (max - min)); + + for (uint32_t i = 0; i < numElements; i++) + { + outputData[i] = static_cast<float>(armnn::Quantize<uint8_t>(inputData[i], scale, offset)); + } + +} + +void RefFakeQuantizationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFakeQuantizationFloat32Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + + const float* inputData = GetInputTensorDataFloat(0, m_Data); + float* outputData = GetOutputTensorDataFloat(0, m_Data); + FakeQuantization(inputData, outputData, inputInfo.GetNumElements(), + m_Data.m_Parameters.m_Min, + m_Data.m_Parameters.m_Max); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.hpp new file mode 100644 index 0000000000..7ad5272edb --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefFakeQuantizationFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefFakeQuantizationFloat32Workload : public Float32Workload<FakeQuantizationQueueDescriptor> +{ +public: + using Float32Workload<FakeQuantizationQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefFloorFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefFloorFloat32Workload.cpp new file mode 100644 index 0000000000..4bc7ec4404 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefFloorFloat32Workload.cpp @@ -0,0 +1,29 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefFloorFloat32Workload.hpp" + +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefFloorFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFloorFloat32Workload_Execute"); + + const float* const input = GetInputTensorDataFloat(0, m_Data); + float* const output = GetOutputTensorDataFloat(0, m_Data); + + unsigned int numElements = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements(); + for (unsigned int i = 0; i < numElements; ++i) + { + output[i] = floorf(input[i]); + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefFloorFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefFloorFloat32Workload.hpp new file mode 100644 index 0000000000..1eb5e2ff7b --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefFloorFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefFloorFloat32Workload : public Float32Workload<FloorQueueDescriptor> +{ +public: + using Float32Workload<FloorQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefFullyConnectedFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefFullyConnectedFloat32Workload.cpp new file mode 100644 index 0000000000..6fe203e5f0 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefFullyConnectedFloat32Workload.cpp @@ -0,0 +1,37 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefFullyConnectedFloat32Workload.hpp" + +#include "FullyConnected.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefFullyConnectedFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFullyConnectedFloat32Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + float* outputData = GetOutputTensorDataFloat(0, m_Data); + const float* inputData = GetInputTensorDataFloat(0, m_Data); + const float* weightData = m_Data.m_Weight->GetConstTensor<float>(); + const float* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Data.m_Bias->GetConstTensor<float>() : nullptr; + + FullyConnected(inputData, + outputData, + inputInfo, + outputInfo, + weightData, + biasData, + m_Data.m_Parameters.m_TransposeWeightMatrix); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefFullyConnectedFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefFullyConnectedFloat32Workload.hpp new file mode 100644 index 0000000000..cb835bd2ce --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefFullyConnectedFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefFullyConnectedFloat32Workload : public Float32Workload<FullyConnectedQueueDescriptor> +{ +public: + using Float32Workload<FullyConnectedQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp new file mode 100644 index 0000000000..0186d3f5e5 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefFullyConnectedUint8Workload.cpp @@ -0,0 +1,60 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefFullyConnectedUint8Workload.hpp" + +#include "FullyConnected.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +void RefFullyConnectedUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFullyConnectedUint8Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + const uint8_t* weightData = m_Data.m_Weight->GetConstTensor<uint8_t>(); + + auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo); + + auto weight = Dequantize(weightData, m_Data.m_Weight->GetTensorInfo()); + + std::vector<float> results(inputInfo.GetNumElements()); + + if (m_Data.m_Parameters.m_BiasEnabled) + { + const int32_t* biasData = m_Data.m_Bias->GetConstTensor<int32_t>(); + auto bias = Dequantize(biasData, m_Data.m_Bias->GetTensorInfo()); + + FullyConnected(dequant.data(), + results.data(), + inputInfo, + outputInfo, + weight.data(), + bias.data(), + m_Data.m_Parameters.m_TransposeWeightMatrix); + } + else + { + FullyConnected(dequant.data(), + results.data(), + inputInfo, + outputInfo, + weight.data(), + nullptr, + m_Data.m_Parameters.m_TransposeWeightMatrix); + } + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefFullyConnectedUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefFullyConnectedUint8Workload.hpp new file mode 100644 index 0000000000..cd14ea85e0 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefFullyConnectedUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefFullyConnectedUint8Workload : public Uint8Workload<FullyConnectedQueueDescriptor> +{ +public: + using Uint8Workload<FullyConnectedQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.cpp new file mode 100644 index 0000000000..82c1ecd32e --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.cpp @@ -0,0 +1,61 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefL2NormalizationFloat32Workload.hpp" + +#include "RefWorkloadUtils.hpp" +#include "TensorBufferArrayView.hpp" + +#include "Profiling.hpp" + +#include <cmath> + +namespace armnn +{ + +void RefL2NormalizationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefL2NormalizationFloat32Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + TensorBufferArrayView<const float> input(inputInfo.GetShape(), GetInputTensorDataFloat(0, m_Data)); + TensorBufferArrayView<float> output(outputInfo.GetShape(), GetOutputTensorDataFloat(0, m_Data)); + + const unsigned int batchSize = inputInfo.GetShape()[0]; + const unsigned int depth = inputInfo.GetShape()[1]; + const unsigned int rows = inputInfo.GetShape()[2]; + const unsigned int cols = inputInfo.GetShape()[3]; + + for (unsigned int n = 0; n < batchSize; ++n) + { + for (unsigned int d = 0; d < depth; ++d) + { + for (unsigned int h = 0; h < rows; ++h) + { + for (unsigned int w = 0; w < cols; ++w) + { + float reduction = 0.0; + for (unsigned int c = 0; c < depth; ++c) + { + const float value = input.Get(n, c, h, w); + reduction += value * value; + } + + // Using std::max(reduction, epsilon) below would prevent against division by 0. + // However, at the time of writing: + // - This is not supported by the ACL functions used to implement L2Normalization in the CL + // backend. + // - The reference semantics for this operator do not include this parameter. + const float scale = 1.0f / sqrtf(reduction); + output.Get(n, d, h, w) = input.Get(n, d, h, w) * scale; + } + } + } + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.hpp new file mode 100644 index 0000000000..a2420279f5 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefL2NormalizationFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefL2NormalizationFloat32Workload : public Float32Workload<L2NormalizationQueueDescriptor> +{ +public: + using Float32Workload<L2NormalizationQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefMergerFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefMergerFloat32Workload.cpp new file mode 100644 index 0000000000..41d3c05d4b --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefMergerFloat32Workload.cpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefMergerFloat32Workload.hpp" + +#include "Merger.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefMergerFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMergerFloat32Workload_Execute"); + Merger<float>(m_Data); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefMergerFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefMergerFloat32Workload.hpp new file mode 100644 index 0000000000..d894c2a2ca --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefMergerFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefMergerFloat32Workload : public Float32Workload<MergerQueueDescriptor> +{ +public: + using Float32Workload<MergerQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefMergerUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefMergerUint8Workload.cpp new file mode 100644 index 0000000000..3f4371b628 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefMergerUint8Workload.cpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefMergerUint8Workload.hpp" + +#include "Merger.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefMergerUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMergerUint8Workload_Execute"); + Merger<uint8_t>(m_Data); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefMergerUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefMergerUint8Workload.hpp new file mode 100644 index 0000000000..4c9bbcac50 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefMergerUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefMergerUint8Workload : public Uint8Workload<MergerQueueDescriptor> +{ +public: + using Uint8Workload<MergerQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp new file mode 100644 index 0000000000..ed68b1f6db --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.cpp @@ -0,0 +1,28 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefMultiplicationFloat32Workload.hpp" + +#include "Multiplication.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefMultiplicationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMultiplicationFloat32Workload_Execute"); + + const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]); + + float* outputData = GetOutputTensorDataFloat(0, m_Data); + const float* inputData0 = GetInputTensorDataFloat(0, m_Data); + const float* inputData1 = GetInputTensorDataFloat(1, m_Data); + Multiplication(inputData0, inputData1, inputInfo0.GetNumElements(), outputData); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.hpp new file mode 100644 index 0000000000..920d072836 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefMultiplicationFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefMultiplicationFloat32Workload : public Float32Workload<MultiplicationQueueDescriptor> +{ +public: + using Float32Workload<MultiplicationQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp new file mode 100644 index 0000000000..2e6f0e6c8b --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.cpp @@ -0,0 +1,38 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefMultiplicationUint8Workload.hpp" + +#include "Multiplication.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +void RefMultiplicationUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefMultiplicationUint8Workload_Execute"); + + const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + auto dequant0 = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0); + auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1); + + std::vector<float> results(outputInfo.GetNumElements()); + Multiplication(dequant0.data(), + dequant1.data(), + inputInfo0.GetNumElements(), + results.data()); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.hpp new file mode 100644 index 0000000000..5da2e581eb --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefMultiplicationUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefMultiplicationUint8Workload : public Uint8Workload<MultiplicationQueueDescriptor> +{ +public: + using Uint8Workload<MultiplicationQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefNormalizationFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefNormalizationFloat32Workload.cpp new file mode 100644 index 0000000000..c743207423 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefNormalizationFloat32Workload.cpp @@ -0,0 +1,185 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefNormalizationFloat32Workload.hpp" + +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <armnn/Tensor.hpp> + +#include <boost/log/trivial.hpp> +#include <boost/numeric/conversion/cast.hpp> + +namespace armnn +{ + +// Helper function to compute "Within" normalization using Krichevsky 2012: Local Brightness Normalization +static void NormalizeWithinUingLbr(const float* inputData, + float* outputData, + const TensorShape& tensorShape, + uint32_t norm_size, + float alpha, + float beta, + float kappa) +{ + const unsigned int batchSize = tensorShape[0]; + const unsigned int depth = tensorShape[1]; + const unsigned int rows = tensorShape[2]; + const unsigned int cols = tensorShape[3]; + + int radius = boost::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */ + + for (unsigned int n = 0; n < batchSize; n++) + { + for (unsigned int c = 0; c < depth; c++) + { + for (unsigned int h = 0; h < rows; h++) + { + for (unsigned int w = 0; w < cols; w++) + { + float accumulated_scale = 0.0; + for (int y = -radius; y <= radius; y++) + { + for (int x = -radius; x <= radius; x++) + { + int i = boost::numeric_cast<int>(w) + x; + int j = boost::numeric_cast<int>(h) + y; + + if ((i < 0) || (i >= boost::numeric_cast<int>(cols))) + { + continue; + } + + if ((j < 0) || (j >= boost::numeric_cast<int>(rows))) + { + continue; + } + + float inval = inputData[n * cols * rows * depth + + c * cols * rows + + boost::numeric_cast<unsigned int>(j) * cols + + boost::numeric_cast<unsigned int>(i)]; + + accumulated_scale += inval*inval; + } + } + outputData[n * cols * rows * depth + + c * cols * rows + + h * cols + + w] = inputData[n * cols * rows * depth + + c * cols * rows + + h * cols + + w] / (powf((kappa + (accumulated_scale * alpha)), beta)); + } + } + } + } +} + +// Helper function to compute "Across" normalization using Krichevsky 2012: Local Brightness Normalization +void NormalizeAcrossUingLbr(const float* inputData, + float* outputData, + const TensorShape& tensorShape, + uint32_t norm_size, + float alpha, + float beta, + float kappa) +{ + const unsigned int batchSize = tensorShape[0]; + const unsigned int depth = tensorShape[1]; + const unsigned int rows = tensorShape[2]; + const unsigned int cols = tensorShape[3]; + + int radius = boost::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */ + + for (unsigned int n = 0; n < batchSize; n++) + { + for (unsigned int c = 0; c < depth; c++) + { + for (unsigned int h = 0; h < rows; h++) + { + for (unsigned int w = 0; w < cols; w++) + { + float accumulated_scale = 0.0; + for (int z = -radius; z <= radius; z++) + { + int k = boost::numeric_cast<int>(c) + z; + + if ((k < 0) || (k >= boost::numeric_cast<int>(depth))) + { + continue; + } + + float inval = inputData[n * cols * rows * depth + + boost::numeric_cast<unsigned int>(k) * cols * rows + + h * cols + + w]; + + accumulated_scale += inval*inval; + } + float scale = kappa + (accumulated_scale * alpha); + scale = powf(scale, -beta); + outputData[n * cols * rows * depth + + c * cols * rows + + h * cols + + w] = scale * + inputData[n * cols * rows * depth + + c * cols * rows + + h * cols + + w]; + } + } + } + } +} + +void RefNormalizationFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefNormalizationFloat32Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + + float* outputData = GetOutputTensorDataFloat(0, m_Data); + const float* inputData = GetInputTensorDataFloat(0, m_Data); + + + if (NormalizationAlgorithmMethod::LocalBrightness == m_Data.m_Parameters.m_NormMethodType) + { + if (NormalizationAlgorithmChannel::Within == m_Data.m_Parameters.m_NormChannelType) + { + NormalizeWithinUingLbr(inputData, + outputData, + inputInfo.GetShape(), + m_Data.m_Parameters.m_NormSize, + m_Data.m_Parameters.m_Alpha, + m_Data.m_Parameters.m_Beta, + m_Data.m_Parameters.m_K); + } + else if (NormalizationAlgorithmChannel::Across == m_Data.m_Parameters.m_NormChannelType) + { + NormalizeAcrossUingLbr(inputData, + outputData, + inputInfo.GetShape(), + m_Data.m_Parameters.m_NormSize, + m_Data.m_Parameters.m_Alpha, + m_Data.m_Parameters.m_Beta, + m_Data.m_Parameters.m_K); + } + else + { + BOOST_LOG_TRIVIAL(warning) << "Illegal NORMALIZATION mode in normalization_f32"; + return; + } + } + else + { + BOOST_LOG_TRIVIAL(warning) << "Lcr method (Jarret 2009: Local Contrast Normalization) not supported yet."; + return; + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefNormalizationFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefNormalizationFloat32Workload.hpp new file mode 100644 index 0000000000..6f4175ae35 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefNormalizationFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefNormalizationFloat32Workload : public Float32Workload<NormalizationQueueDescriptor> +{ +public: + using Float32Workload<NormalizationQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefPermuteWorkload.cpp b/src/armnn/backends/RefWorkloads/RefPermuteWorkload.cpp new file mode 100644 index 0000000000..b2bb8fbf3d --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefPermuteWorkload.cpp @@ -0,0 +1,31 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefPermuteWorkload.hpp" +#include "RefWorkloadUtils.hpp" + +#include <Permute.hpp> + +namespace armnn +{ + +template <armnn::DataType DataType> +void RefPermuteWorkload<DataType>::Execute() const +{ + using T = ResolveType<DataType>; + + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, GetName() + "_Execute"); + + const ITensorHandle* src = m_Data.m_Inputs[0]; + const ITensorHandle* dst = m_Data.m_Outputs[0]; + const PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings; + + armnnUtils::Permute(GetTensorInfo(dst).GetShape(), mappings, GetConstCpuData<T>(src), GetCpuData<T>(dst)); +} + +template class RefPermuteWorkload<DataType::Float32>; +template class RefPermuteWorkload<DataType::QuantisedAsymm8>; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefPermuteWorkload.hpp b/src/armnn/backends/RefWorkloads/RefPermuteWorkload.hpp new file mode 100644 index 0000000000..4ca1f38588 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefPermuteWorkload.hpp @@ -0,0 +1,33 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" + +#include <armnn/TypesUtils.hpp> + +namespace armnn +{ + +template <armnn::DataType DataType> +class RefPermuteWorkload : public TypedWorkload<PermuteQueueDescriptor, DataType> +{ +public: + static const std::string& GetName() + { + static const std::string name = std::string("RefPermute") + GetDataTypeName(DataType) + "Workload"; + return name; + } + + using TypedWorkload<PermuteQueueDescriptor, DataType>::m_Data; + using TypedWorkload<PermuteQueueDescriptor, DataType>::TypedWorkload; + void Execute() const override; +}; + +using RefPermuteFloat32Workload = RefPermuteWorkload<DataType::Float32>; +using RefPermuteUint8Workload = RefPermuteWorkload<DataType::QuantisedAsymm8>; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefPooling2dFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefPooling2dFloat32Workload.cpp new file mode 100644 index 0000000000..030f96c892 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefPooling2dFloat32Workload.cpp @@ -0,0 +1,33 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefPooling2dFloat32Workload.hpp" + +#include "Pooling2d.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefPooling2dFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPooling2dFloat32Workload_Execute"); + + const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo0 = GetTensorInfo(m_Data.m_Outputs[0]); + + float* outputData = GetOutputTensorDataFloat(0, m_Data); + const float* inputData = GetInputTensorDataFloat(0, m_Data); + + Pooling2d(inputData, + outputData, + inputInfo0, + outputInfo0, + m_Data.m_Parameters); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefPooling2dFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefPooling2dFloat32Workload.hpp new file mode 100644 index 0000000000..598b365a17 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefPooling2dFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefPooling2dFloat32Workload : public Float32Workload<Pooling2dQueueDescriptor> +{ +public: + using Float32Workload<Pooling2dQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefPooling2dUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefPooling2dUint8Workload.cpp new file mode 100644 index 0000000000..7066fc910b --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefPooling2dUint8Workload.cpp @@ -0,0 +1,37 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefPooling2dUint8Workload.hpp" + +#include "Pooling2d.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +void RefPooling2dUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPooling2dUint8Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo); + + std::vector<float> results(outputInfo.GetNumElements()); + Pooling2d(dequant.data(), + results.data(), + inputInfo, + outputInfo, + m_Data.m_Parameters); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefPooling2dUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefPooling2dUint8Workload.hpp new file mode 100644 index 0000000000..cbeca2c41d --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefPooling2dUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefPooling2dUint8Workload : public Uint8Workload<Pooling2dQueueDescriptor> +{ +public: + using Uint8Workload<Pooling2dQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefReshapeFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefReshapeFloat32Workload.cpp new file mode 100644 index 0000000000..3bf7b48622 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefReshapeFloat32Workload.cpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefReshapeFloat32Workload.hpp" + +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <cstring> + +namespace armnn +{ + +void RefReshapeFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReshapeFloat32Workload_Execute"); + + void* output = GetOutputTensorData<void>(0, m_Data); + const void* input = GetInputTensorData<void>(0, m_Data); + unsigned int numBytes = GetTensorInfo(m_Data.m_Inputs[0]).GetNumBytes(); + memcpy(output, input, numBytes); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefReshapeFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefReshapeFloat32Workload.hpp new file mode 100644 index 0000000000..36fdf7f812 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefReshapeFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefReshapeFloat32Workload : public Float32Workload<ReshapeQueueDescriptor> +{ +public: + using Float32Workload<ReshapeQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefReshapeUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefReshapeUint8Workload.cpp new file mode 100644 index 0000000000..38742607cd --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefReshapeUint8Workload.cpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefReshapeUint8Workload.hpp" + +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include <cstring> + +namespace armnn +{ + +void RefReshapeUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefReshapeUint8Workload_Execute"); + + void* output = GetOutputTensorData<void>(0, m_Data); + const void* input = GetInputTensorData<void>(0, m_Data); + unsigned int numBytes = GetTensorInfo(m_Data.m_Inputs[0]).GetNumBytes(); + memcpy(output, input, numBytes); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefReshapeUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefReshapeUint8Workload.hpp new file mode 100644 index 0000000000..38da277bd2 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefReshapeUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefReshapeUint8Workload : public Uint8Workload<ReshapeQueueDescriptor> +{ +public: + using Uint8Workload<ReshapeQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.cpp new file mode 100644 index 0000000000..8ad7a76298 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.cpp @@ -0,0 +1,29 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefResizeBilinearFloat32Workload.hpp" + +#include "RefWorkloadUtils.hpp" +#include "ResizeBilinear.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefResizeBilinearFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeBilinearFloat32Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + ResizeBilinear(GetInputTensorDataFloat(0, m_Data), + inputInfo, + GetOutputTensorDataFloat(0, m_Data), + outputInfo); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.hpp new file mode 100644 index 0000000000..86e8693b91 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefResizeBilinearFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefResizeBilinearFloat32Workload : public Float32Workload<ResizeBilinearQueueDescriptor> +{ +public: + using Float32Workload<ResizeBilinearQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.cpp new file mode 100644 index 0000000000..dfa561db6d --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.cpp @@ -0,0 +1,33 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefResizeBilinearUint8Workload.hpp" + +#include "RefWorkloadUtils.hpp" +#include "ResizeBilinear.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +void RefResizeBilinearUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeBilinearUint8Workload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo); + + std::vector<float> results(outputInfo.GetNumElements()); + ResizeBilinear(dequant.data(), inputInfo, results.data(), outputInfo); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.hpp new file mode 100644 index 0000000000..f72fafda4f --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefResizeBilinearUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefResizeBilinearUint8Workload : public Uint8Workload<ResizeBilinearQueueDescriptor> +{ +public: + using Uint8Workload<ResizeBilinearQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.cpp new file mode 100644 index 0000000000..590e514d3d --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.cpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefSoftmaxFloat32Workload.hpp" + +#include "RefWorkloadUtils.hpp" +#include "Softmax.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefSoftmaxFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxFloat32Workload_Execute"); + + Softmax(GetInputTensorDataFloat(0, m_Data), + GetOutputTensorDataFloat(0, m_Data), + GetTensorInfo(m_Data.m_Inputs[0]), + m_Data.m_Parameters.m_Beta); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.hpp new file mode 100644 index 0000000000..4d30f9fa3f --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSoftmaxFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefSoftmaxFloat32Workload : public Float32Workload<SoftmaxQueueDescriptor> +{ +public: + using Float32Workload<SoftmaxQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefSoftmaxUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefSoftmaxUint8Workload.cpp new file mode 100644 index 0000000000..5ef4a6da92 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSoftmaxUint8Workload.cpp @@ -0,0 +1,36 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefSoftmaxUint8Workload.hpp" + +#include "RefWorkloadUtils.hpp" +#include "Softmax.hpp" + +#include "Profiling.hpp" + +#include <vector> + +namespace armnn +{ + +void RefSoftmaxUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSoftmaxUint8Workload_Execute"); + + const TensorInfo& tensorInfo = GetTensorInfo(m_Data.m_Inputs[0]); + + auto dequant = Dequantize(GetInputTensorDataU8(0, m_Data), tensorInfo); + + std::vector<float> results(tensorInfo.GetNumElements()); + + Softmax(dequant.data(), + results.data(), + tensorInfo, + m_Data.m_Parameters.m_Beta); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), GetTensorInfo(m_Data.m_Outputs[0])); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefSoftmaxUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefSoftmaxUint8Workload.hpp new file mode 100644 index 0000000000..fadc764e0a --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSoftmaxUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefSoftmaxUint8Workload : public Uint8Workload<SoftmaxQueueDescriptor> +{ +public: + using Uint8Workload<SoftmaxQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefSplitterFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefSplitterFloat32Workload.cpp new file mode 100644 index 0000000000..35ab4e22ef --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSplitterFloat32Workload.cpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefSplitterFloat32Workload.hpp" + +#include "Splitter.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefSplitterFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSplitterFloat32Workload_Execute"); + Splitter<float>(m_Data); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefSplitterFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefSplitterFloat32Workload.hpp new file mode 100644 index 0000000000..722dde129c --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSplitterFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefSplitterFloat32Workload : public Float32Workload<SplitterQueueDescriptor> +{ +public: + using Float32Workload<SplitterQueueDescriptor>::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefSplitterUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefSplitterUint8Workload.cpp new file mode 100644 index 0000000000..522a4463dd --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSplitterUint8Workload.cpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "RefSplitterUint8Workload.hpp" + +#include "Splitter.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefSplitterUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSplitterUint8Workload_Execute"); + Splitter<uint8_t>(m_Data); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefSplitterUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefSplitterUint8Workload.hpp new file mode 100644 index 0000000000..e28554951b --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSplitterUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefSplitterUint8Workload : public Uint8Workload<SplitterQueueDescriptor> +{ +public: + using Uint8Workload<SplitterQueueDescriptor>::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefWorkloadUtils.hpp b/src/armnn/backends/RefWorkloads/RefWorkloadUtils.hpp new file mode 100644 index 0000000000..088fe819e5 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefWorkloadUtils.hpp @@ -0,0 +1,125 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "backends/CpuTensorHandle.hpp" + +#include <armnn/Tensor.hpp> +#include <armnn/Types.hpp> + +#include <boost/polymorphic_cast.hpp> + +namespace armnn +{ + +//////////////////////////////////////////// +/// float32 helpers +//////////////////////////////////////////// + +inline const TensorInfo& GetTensorInfo(const ITensorHandle* tensorHandle) +{ + // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. + const ConstCpuTensorHandle* cpuTensorHandle = + boost::polymorphic_downcast<const ConstCpuTensorHandle*>(tensorHandle); + return cpuTensorHandle->GetTensorInfo(); +} + +template <typename DataType> +inline const DataType* GetConstCpuData(const ITensorHandle* tensorHandle) +{ + // We know that reference workloads use (Const)CpuTensorHandles only, so this cast is legitimate. + const ConstCpuTensorHandle* cpuTensorHandle = + boost::polymorphic_downcast<const ConstCpuTensorHandle*>(tensorHandle); + return cpuTensorHandle->GetConstTensor<DataType>(); +} + +template <typename DataType> +inline DataType* GetCpuData(const ITensorHandle* tensorHandle) +{ + // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. + const CpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast<const CpuTensorHandle*>(tensorHandle); + return cpuTensorHandle->GetTensor<DataType>(); +}; + +template <typename DataType, typename PayloadType> +const DataType* GetInputTensorData(unsigned int idx, const PayloadType& data) +{ + const ITensorHandle* tensorHandle = data.m_Inputs[idx]; + return GetConstCpuData<DataType>(tensorHandle); +} + +template <typename DataType, typename PayloadType> +DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data) +{ + const ITensorHandle* tensorHandle = data.m_Outputs[idx]; + return GetCpuData<DataType>(tensorHandle); +} + +template <typename PayloadType> +const float* GetInputTensorDataFloat(unsigned int idx, const PayloadType& data) +{ + return GetInputTensorData<float>(idx, data); +} + +template <typename PayloadType> +float* GetOutputTensorDataFloat(unsigned int idx, const PayloadType& data) +{ + return GetOutputTensorData<float>(idx, data); +} + +//////////////////////////////////////////// +/// u8 helpers +//////////////////////////////////////////// + +inline const uint8_t* GetConstCpuU8Data(const ITensorHandle* tensorHandle) +{ + // We know that reference workloads use (Const)CpuTensorHandles only, so this cast is legitimate. + const ConstCpuTensorHandle* cpuTensorHandle = + boost::polymorphic_downcast<const ConstCpuTensorHandle*>(tensorHandle); + return cpuTensorHandle->GetConstTensor<uint8_t>(); +}; + +inline uint8_t* GetCpuU8Data(const ITensorHandle* tensorHandle) +{ + // We know that reference workloads use CpuTensorHandles only, so this cast is legitimate. + const CpuTensorHandle* cpuTensorHandle = boost::polymorphic_downcast<const CpuTensorHandle*>(tensorHandle); + return cpuTensorHandle->GetTensor<uint8_t>(); +}; + +template <typename PayloadType> +const uint8_t* GetInputTensorDataU8(unsigned int idx, const PayloadType& data) +{ + const ITensorHandle* tensorHandle = data.m_Inputs[idx]; + return GetConstCpuU8Data(tensorHandle); +} + +template <typename PayloadType> +uint8_t* GetOutputTensorDataU8(unsigned int idx, const PayloadType& data) +{ + const ITensorHandle* tensorHandle = data.m_Outputs[idx]; + return GetCpuU8Data(tensorHandle); +} + +template<typename T> +std::vector<float> Dequantize(const T* quant, const TensorInfo& info) +{ + std::vector<float> ret(info.GetNumElements()); + for (size_t i = 0; i < info.GetNumElements(); i++) + { + ret[i] = armnn::Dequantize(quant[i], info.GetQuantizationScale(), info.GetQuantizationOffset()); + } + return ret; +} + +inline void Quantize(uint8_t* quant, const float* dequant, const TensorInfo& info) +{ + for (size_t i = 0; i < info.GetNumElements(); i++) + { + quant[i] = armnn::Quantize<uint8_t>(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset()); + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/ResizeBilinear.cpp b/src/armnn/backends/RefWorkloads/ResizeBilinear.cpp new file mode 100644 index 0000000000..7b386ed467 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/ResizeBilinear.cpp @@ -0,0 +1,92 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ResizeBilinear.hpp" + +#include "TensorBufferArrayView.hpp" + +#include <boost/numeric/conversion/cast.hpp> + +#include <cmath> +#include <algorithm> + +namespace armnn +{ + +namespace +{ + +inline float Lerp(float a, float b, float w) +{ + return w * b + (1.f - w) * a; +} + +} + +void ResizeBilinear(const float* in, const TensorInfo& inputInfo, float* out, const TensorInfo& outputInfo) +{ + // We follow the definition of TensorFlow and AndroidNN: The top-left corner of a texel in the output + // image is projected into the input image to figure out the interpolants and weights. Note that this + // will yield different results than if projecting the centre of output texels. + + const unsigned int batchSize = inputInfo.GetShape()[0]; + const unsigned int channelCount = inputInfo.GetShape()[1]; + + const unsigned int inputHeight = inputInfo.GetShape()[2]; + const unsigned int inputWidth = inputInfo.GetShape()[3]; + const unsigned int outputHeight = outputInfo.GetShape()[2]; + const unsigned int outputWidth = outputInfo.GetShape()[3]; + + // How much to scale pixel coordinates in the output image to get the corresponding pixel coordinates + // in the input image + const float scaleY = boost::numeric_cast<float>(inputHeight) / boost::numeric_cast<float>(outputHeight); + const float scaleX = boost::numeric_cast<float>(inputWidth) / boost::numeric_cast<float>(outputWidth); + + TensorBufferArrayView<const float> input(inputInfo.GetShape(), in); + TensorBufferArrayView<float> output(outputInfo.GetShape(), out); + + for (unsigned int n = 0; n < batchSize; ++n) + { + for (unsigned int c = 0; c < channelCount; ++c) + { + for (unsigned int y = 0; y < outputHeight; ++y) + { + // Corresponding real-valued height coordinate in input image + const float iy = boost::numeric_cast<float>(y) * scaleY; + + // Discrete height coordinate of top-left texel (in the 2x2 texel area used for interpolation) + const float fiy = floorf(iy); + const unsigned int y0 = boost::numeric_cast<unsigned int>(fiy); + + // Interpolation weight (range [0,1]) + const float yw = iy - fiy; + + for (unsigned int x = 0; x < outputWidth; ++x) + { + // Real-valued and discrete width coordinates in input image + const float ix = boost::numeric_cast<float>(x) * scaleX; + const float fix = floorf(ix); + const unsigned int x0 = boost::numeric_cast<unsigned int>(fix); + + // Interpolation weight (range [0,1]) + const float xw = ix - fix; + + // Discrete width/height coordinates of texels below and to the right of (x0, y0) + const unsigned int x1 = std::min(x0 + 1, inputWidth - 1u); + const unsigned int y1 = std::min(y0 + 1, inputHeight - 1u); + + // Interpolation + const float ly0 = Lerp(input.Get(n, c, y0, x0), input.Get(n, c, y0, x1), xw); // lerp along row y0 + const float ly1 = Lerp(input.Get(n, c, y1, x0), input.Get(n, c, y1, x1), xw); // lerp along row y1 + const float l = Lerp(ly0, ly1, yw); + + output.Get(n, c, y, x) = l; + } + } + } + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/ResizeBilinear.hpp b/src/armnn/backends/RefWorkloads/ResizeBilinear.hpp new file mode 100644 index 0000000000..50e8128d18 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/ResizeBilinear.hpp @@ -0,0 +1,15 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +void ResizeBilinear(const float* in, const TensorInfo& inputInfo, float* out, const TensorInfo& outputInfo); + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Softmax.cpp b/src/armnn/backends/RefWorkloads/Softmax.cpp new file mode 100644 index 0000000000..58840e3076 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Softmax.cpp @@ -0,0 +1,49 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "Softmax.hpp" + +#include <cmath> +#include <vector> + +namespace armnn +{ + +/// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo +void Softmax(const float* in, float* out, const TensorInfo& tensorInfo, float beta) +{ + unsigned int numChannels = tensorInfo.GetShape()[1]; + for (unsigned int n = 0; n < tensorInfo.GetShape()[0]; n++) + { + // find maximum channel + float max = in[n * numChannels]; + for (unsigned int c = 1; c < numChannels; c++) + { + float val = in[n * numChannels + c]; + if (val > max) + { + max = val; + } + } + + // exponentiate all values and sum + std::vector<float> exponentials(numChannels); + float sum = 0.0f; + for (unsigned int c = 0; c < numChannels; c++) + { + float val = in[n * numChannels + c]; + exponentials[c] = expf((val - max) * beta); + sum += exponentials[c]; + } + + // divide exponentials by sum to give outputs + for (unsigned int c = 0; c < numChannels; c++) + { + out[n * numChannels + c] = exponentials[c] / sum; + } + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Softmax.hpp b/src/armnn/backends/RefWorkloads/Softmax.hpp new file mode 100644 index 0000000000..c508ab2b82 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Softmax.hpp @@ -0,0 +1,16 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +/// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo +void Softmax(const float* in, float* out, const TensorInfo& tensorInfo, float beta); + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Splitter.hpp b/src/armnn/backends/RefWorkloads/Splitter.hpp new file mode 100644 index 0000000000..67f6c100f9 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Splitter.hpp @@ -0,0 +1,83 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "RefWorkloadUtils.hpp" + +#include "backends/WorkloadData.hpp" + +#include <armnn/Tensor.hpp> + +#include <boost/assert.hpp> + +namespace armnn +{ + +template <typename DataType> +void Splitter(const SplitterQueueDescriptor& data) +{ + const TensorInfo& inputInfo0 = GetTensorInfo(data.m_Inputs[0]); + + for (unsigned int index = 0; index < inputInfo0.GetNumElements(); ++index) + { + unsigned int indices[MaxNumOfTensorDimensions]; + + unsigned int indexRemainder = index; + unsigned int dimensionStride = inputInfo0.GetNumElements(); + + for (unsigned int i = 0; i<inputInfo0.GetNumDimensions(); i++) + { + dimensionStride /= inputInfo0.GetShape()[i]; + indices[i] = indexRemainder / dimensionStride; // use integer division to round down + indexRemainder -= indices[i] * dimensionStride; + } + + for (unsigned int viewIdx = 0; viewIdx < data.m_ViewOrigins.size(); ++viewIdx) + { + SplitterQueueDescriptor::ViewOrigin const& view = data.m_ViewOrigins[viewIdx]; + + //split view extents are defined by the size of (the corresponding) input tensor + const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]); + + // check all dimensions to see if this element is inside the given input view + bool insideView = true; + for (unsigned int i = 0; i<outputInfo.GetNumDimensions(); i++) + { + if (indices[i] < view.m_Origin[i]) + { + insideView = false; + } + if (indices[i] >= view.m_Origin[i] + outputInfo.GetShape()[i]) + { + insideView = false; + } + } + + if (insideView) + { + unsigned int outIndex = 0; + unsigned int dimensionStride = 1; + + for (unsigned int i = outputInfo.GetNumDimensions(); i-- > 0;) + { + outIndex += dimensionStride * (indices[i] - view.m_Origin[i]); + dimensionStride *= outputInfo.GetShape()[i]; + } + + //we are within the view, copy input data to the output corresponding to this view + DataType* outputData = GetOutputTensorData<DataType>(viewIdx, data); + BOOST_ASSERT(outputData); + + const DataType* inputData = GetInputTensorData<DataType>(0, data); + BOOST_ASSERT(inputData); + + outputData[outIndex] = inputData[index]; + } + } + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/TensorBufferArrayView.hpp b/src/armnn/backends/RefWorkloads/TensorBufferArrayView.hpp new file mode 100644 index 0000000000..3994c1f1de --- /dev/null +++ b/src/armnn/backends/RefWorkloads/TensorBufferArrayView.hpp @@ -0,0 +1,42 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include <armnn/Tensor.hpp> + +#include <boost/assert.hpp> + +namespace armnn +{ + +// Utility class providing access to raw tensor memory based on indices along each dimension +template <typename DataType> +class TensorBufferArrayView +{ +public: + TensorBufferArrayView(const TensorShape& shape, DataType* data) + : m_Shape(shape) + , m_Data(data) + { + } + + DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const + { + BOOST_ASSERT( b < m_Shape[0] || (m_Shape[0] == 0 && b == 0) ); + BOOST_ASSERT( c < m_Shape[1] || (m_Shape[1] == 0 && c == 0) ); + BOOST_ASSERT( h < m_Shape[2] || (m_Shape[2] == 0 && h == 0) ); + BOOST_ASSERT( w < m_Shape[3] || (m_Shape[3] == 0 && w == 0) ); + + return m_Data[b * m_Shape[1] * m_Shape[2] * m_Shape[3] + + c * m_Shape[2] * m_Shape[3] + + h * m_Shape[3] + + w]; + } + +private: + const TensorShape m_Shape; + DataType* m_Data; +}; + +} //namespace armnn diff --git a/src/armnn/backends/Workload.hpp b/src/armnn/backends/Workload.hpp new file mode 100644 index 0000000000..dbc7574d0e --- /dev/null +++ b/src/armnn/backends/Workload.hpp @@ -0,0 +1,80 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "WorkloadData.hpp" +#include "WorkloadInfo.hpp" +#include <algorithm> +#include "Profiling.hpp" + +namespace armnn +{ + +// Workload interface to enqueue a layer computation +class IWorkload +{ +public: + virtual ~IWorkload(){}; + + virtual void Execute() const = 0; +}; + +// NullWorkload used to denote an unsupported workload when used by the MakeWorkload<> template +// in the various workload factories. +// There should never be an instantiation of a NullWorkload. +class NullWorkload : public IWorkload +{ + NullWorkload()=delete; +}; + +template <typename QueueDescriptor> +class BaseWorkload : public IWorkload +{ +public: + + BaseWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info) + : m_Data(descriptor) + { + m_Data.Validate(info); + } + + const QueueDescriptor& GetData() const { return m_Data; } + +protected: + const QueueDescriptor m_Data; +}; + +template <typename QueueDescriptor, armnn::DataType DataType> +class TypedWorkload : public BaseWorkload<QueueDescriptor> +{ +public: + + TypedWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload<QueueDescriptor>(descriptor, info) + { + BOOST_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(), + info.m_InputTensorInfos.end(), + [&](auto it){ + return it.GetDataType() == DataType; + }), + "Trying to create workload with incorrect type"); + BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(), + info.m_OutputTensorInfos.end(), + [&](auto it){ + return it.GetDataType() == DataType; + }), + "Trying to create workload with incorrect type"); + } + + static constexpr armnn::DataType ms_DataType = DataType; +}; + +template <typename QueueDescriptor> +using Float32Workload = TypedWorkload<QueueDescriptor, armnn::DataType::Float32>; + +template <typename QueueDescriptor> +using Uint8Workload = TypedWorkload<QueueDescriptor, armnn::DataType::QuantisedAsymm8>; + +} //namespace armnn diff --git a/src/armnn/backends/WorkloadData.cpp b/src/armnn/backends/WorkloadData.cpp new file mode 100644 index 0000000000..96a37802f1 --- /dev/null +++ b/src/armnn/backends/WorkloadData.cpp @@ -0,0 +1,753 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "WorkloadData.hpp" + +#include "CpuTensorHandle.hpp" +#include "WorkloadInfo.hpp" + +#include <algorithm> +#include <string> +#include <sstream> +#include <iomanip> + +#include <boost/format.hpp> + +namespace armnn +{ + +//--------------------------------------------------------------- +DataType GetBiasDataType(DataType inputDataType) +{ + switch (inputDataType) + { + case DataType::Float32: + return DataType::Float32; + case DataType::QuantisedAsymm8: + return DataType::Signed32; + default: + BOOST_ASSERT_MSG(false, "Invalid input data type"); + return DataType::Float32; + } +} + +namespace +{ + +//--------------------------------------------------------------- +//android ndk does not support std::to_string function. +template <typename T> +std::string to_string(T value) +{ + std::ostringstream os; + os << value; + return os.str(); +} + +//--------------------------------------------------------------- +void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName) +{ + if (!ptr) + { + throw InvalidArgumentException(descName + ": Invalid null pointer. The " + + paramName + " parameter must be set."); + } +} + +//--------------------------------------------------------------- +void ValidateTensorShapesMatch(const TensorInfo& first, + const TensorInfo& second, + std::string const& descName, + std::string const& firstName, + std::string const& secondName) +{ + if (first.GetShape() != second.GetShape()) + { + throw InvalidArgumentException(descName + ": " + + firstName + " & " + secondName + " must have identical shapes"); + } +} + +//--------------------------------------------------------------- +void ValidateNoInputs(const WorkloadInfo& workloadInfo, std::string const& descName) +{ + if (workloadInfo.m_InputTensorInfos.size() != 0) + { + throw InvalidArgumentException(descName + + ": Requires no inputs. " + + to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided."); + } +} + +//--------------------------------------------------------------- +void ValidateSingleInput(const WorkloadInfo& workloadInfo, std::string const& descName) +{ + if (workloadInfo.m_InputTensorInfos.size() != 1) + { + throw InvalidArgumentException(descName + + ": Requires exactly one input. " + + to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided." ); + } +} + +//--------------------------------------------------------------- +void ValidateTwoInputs(const WorkloadInfo& workloadInfo, std::string const& descName) +{ + if (workloadInfo.m_InputTensorInfos.size() != 2) + { + throw InvalidArgumentException(descName + + ": Requires exactly two workloadInfo.m_InputTensorInfos. " + + to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided."); + } +} + +//--------------------------------------------------------------- +void ValidateSingleOutput(const WorkloadInfo& workloadInfo, std::string const& descName) +{ + if (workloadInfo.m_OutputTensorInfos.size() != 1) + { + throw InvalidArgumentException(descName + + ": Requires exactly one output. " + + to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided."); + } +} + +//--------------------------------------------------------------- +void ValidateTensorNumDimensions(const TensorInfo& tensor, + std::string const& descName, + unsigned int numDimensions, + std::string const& tensorName) +{ + if (tensor.GetNumDimensions() != numDimensions) + { + throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " + + to_string(tensor.GetNumDimensions()) + " dimensions for " + + tensorName + " tensor."); + } +} + +//--------------------------------------------------------------- +void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType, + const std::string& descName, std::string const& tensorName) +{ + if (tensor.GetDataType() != dataType) + { + throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " + + GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor."); + } +} + +//--------------------------------------------------------------- +void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo, + const TensorInfo& weightsTensorInfo, const std::string& descName) +{ + if (biasTensor.GetQuantizationOffset() != 0) + { + throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " + + to_string(biasTensor.GetQuantizationOffset())); + } + const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale(); + if (biasTensor.GetQuantizationScale() != expectedScale) + { + // Print the float values with extra precision to see very small differences + std::stringstream msg; + msg << std::setprecision(10) << descName << ": Expected " << expectedScale << + " quantization scale for bias tensor (the product of the input and weight scales), but got " << + biasTensor.GetQuantizationScale(); + throw InvalidArgumentException(msg.str()); + } +} + +//--------------------------------------------------------------- +void ValidateTensors(const std::vector<ITensorHandle*>& vec, + unsigned int numExpected, + const std::string& descName, + const std::string& varName) +{ + if (vec.empty() && numExpected > 0) + { + throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array."); + } + + for (unsigned int i = 0; i < numExpected; ++i) + { + if (!vec[i]) + { + throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i)); + } + } +} + +//--------------------------------------------------------------- +void ValidateBroadcastTensorShapesMatch(const TensorInfo& first, + const TensorInfo& second, + const TensorInfo& output, + std::string const& descName, + std::string const& firstName, + std::string const& secondName) +{ + // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get + // broadcasted. + if (first.GetNumDimensions() != second.GetNumDimensions()) + { + throw InvalidArgumentException(descName + ": Tensors " + + firstName + " & " + secondName + + " must have the same number of dimensions in order to be broadcasted"); + } + uint32_t numDims = first.GetNumDimensions(); + std::vector<uint32_t> outputDims(numDims, 0u); + for (uint32_t i = 0; i < numDims; i++) + { + const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i]; + const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1); + if (dimsNotEqual && dimsNotOne) + { + throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes"); + } + outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]); + } + TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data()); + if (broadcastShape != output.GetShape()) + { + throw InvalidArgumentException(descName + ": The tensor shape resulting from adding " + + firstName + " & " + secondName + + " does not match the output shape"); + } +} + +//--------------------------------------------------------------- +/// Validates that the output tensor's quantization scale is greater than the product +/// of the two input tensors' quantization scales. This is a requirement of the implementation of +/// the quantized multiplication. +void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2, + const TensorInfo& outputTensorInfo, std::string const& descName, + const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName) +{ + if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8) + { + if (outputTensorInfo.GetQuantizationScale() <= + inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale()) + { + std::stringstream msg; + msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " << + "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors"; + throw InvalidArgumentException(msg.str()); + } + } +} + +} //namespace + +void QueueDescriptor::ValidateInputsOutputs(const std::string& descName, + unsigned int numExpectedIn, unsigned int numExpectedOut) const +{ + ValidateTensors(m_Inputs, numExpectedIn, descName, "input"); + ValidateTensors(m_Outputs, numExpectedOut, descName, "output"); +} + +//--------------------------------------------------------------- +void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "MemCopyQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "MemCopyQueueDescriptor"); + + if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size()) + { + throw InvalidArgumentException(boost::str( + boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)") + % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size())); + } + + for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i) + { + if (workloadInfo.m_InputTensorInfos[i].GetNumElements() != + workloadInfo.m_OutputTensorInfos[i].GetNumElements()) + { + throw InvalidArgumentException(boost::str( + boost::format("Number of elements for tensor input and output %1% does not match") + % i )); + } + } + + if (m_Inputs.size() != m_Outputs.size()) + { + throw InvalidArgumentException(boost::str( + boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)") + % m_Inputs.size() % m_Outputs.size())); + } + + for (unsigned int i = 0; i < m_Inputs.size(); ++i) + { + if (!m_Inputs[i]) + { + throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i)); + } + + if (!m_Outputs[i]) + { + throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i)); + } + } +} + +//--------------------------------------------------------------- +void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "ActivationQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "ActivationQueueDescriptor"); + ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_OutputTensorInfos[0], + "ActivationQueueDescriptor", + "input", + "output"); +} + +//--------------------------------------------------------------- +void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "SoftmaxQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "SoftmaxQueueDescriptor"); + ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SoftmaxQueueDescriptor", 2, "input"); + ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SoftmaxQueueDescriptor", 2, "output"); + + ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_OutputTensorInfos[0], + "SoftmaxQueueDescriptor", + "input", + "output"); +} + +//--------------------------------------------------------------- +void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "SplitterQueueDescriptor"); + + if (workloadInfo.m_OutputTensorInfos.size() <= 0) + { + throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided."); + } + + if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size()) + { + throw InvalidArgumentException( + "SplitterQueueDescriptor: Number of split windows " + "has to match number of workloadInfo.m_OutputTensorInfos. " + "Number of windows: " + + to_string(m_ViewOrigins.size()) + + ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size())); + } + + //the dimensionality of all the windows has to match the dimensionality (not shape) of the input + std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions(); + for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w ) + { + //check that the dimensionality of input is same as the split windows + ViewOrigin const& e = m_ViewOrigins[w]; + if (e.m_Origin.size() != inputDims) + { + throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to " + "have the same dimensionality as the input tensor. " + "Window origin (index: " + + to_string(w) + ") has " + to_string(e.m_Origin.size()) + + " dimensions, the input " + "tensor has " + + to_string(inputDims) + " dimensions."); + } + for (unsigned int i = 0; i < e.m_Origin.size(); ++i) + { + if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] > + workloadInfo.m_InputTensorInfos[0].GetShape()[i]) + { + throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to " + "be smaller or equal than the size of the input in that coord."); + } + } + } +} + +//--------------------------------------------------------------- +void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleOutput(workloadInfo, "MergerQueueDescriptor"); + + if (m_Inputs.size() <= 0) + { + throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided."); + } + if (m_Outputs.size() <= 0) + { + throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided."); + } + + if (workloadInfo.m_InputTensorInfos.size() <= 0) + { + throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided."); + } + if (workloadInfo.m_OutputTensorInfos.size() <= 0) + { + throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided."); + } + + if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size()) + { + throw InvalidArgumentException( + "MergerQueueDescriptor: Number of split windows " + "has to match number of workloadInfo.m_InputTensorInfos. " + "Number of windows: " + + to_string(m_ViewOrigins.size()) + + ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size())); + } + + //the dimensionality of all the windows has to match the dimensionality (not shape) of the output + std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions(); + for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w ) + { + //check that the dimensionality of output is same as the split windows + ViewOrigin const& e = m_ViewOrigins[w]; + if (e.m_Origin.size() != outputDims) + { + throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to " + "have the same dimensionality as the output tensor. " + "Window origin (index: " + + to_string(w) + ") has " + to_string(e.m_Origin.size()) + + " dimensions, the output " + "tensor has " + + to_string(outputDims) + " dimensions."); + } + //check that the merge windows are within the output tensor + for (unsigned int i = 0; i < e.m_Origin.size(); ++i) + { + if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i] + > workloadInfo.m_OutputTensorInfos[0].GetShape()[i]) + { + throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to " + "be smaller or equal than the size of the output in that coord."); + } + } + } +} + +//--------------------------------------------------------------- +void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "FullyConnectedQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "FullyConnectedQueueDescriptor"); + ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output"); + + if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 || + workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4)) + { + throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions."); + } + + if (m_Weight == nullptr) + { + throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing."); + } + + ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight"); + + if (m_Parameters.m_BiasEnabled) + { + if (m_Bias == nullptr) + { + throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but " + "bias value tensor descriptor is missing."); + } + + // validate type and quantization values + ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(), + workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor"); + + ValidateTensorDataType(m_Bias->GetTensorInfo(), + GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()), + "FullyConnectedQueueDescriptor", "bias"); + + ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias"); + } + + ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), + workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output"); +} + +//--------------------------------------------------------------- +void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "NormalizationQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "NormalizationQueueDescriptor"); + ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_OutputTensorInfos[0], + "NormalizationQueueDescriptor", + "input", + "output"); +} + +void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateTwoInputs(workloadInfo, "AdditionQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "AdditionQueueDescriptor"); + + ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_InputTensorInfos[1], + workloadInfo.m_OutputTensorInfos[0], + "AdditionQueueDescriptor", + "first input", + "second input"); + +} + +//--------------------------------------------------------------- +void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateTwoInputs(workloadInfo, "MultiplicationQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "MultiplicationQueueDescriptor"); + ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_InputTensorInfos[1], + "MultiplicationQueueDescriptor", + "first input", + "second input"); + ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_OutputTensorInfos[0], + "MultiplicationQueueDescriptor", + "input", + "output"); +} + +void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "BatchNormalizationQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "BatchNormalizationQueueDescriptor"); + ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_OutputTensorInfos[0], + "BatchNormalizationQueueDescriptor", + "input", + "output"); + ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean"); + ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance"); + ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta"); + ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma"); + + + ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean"); + ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance"); + ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta"); + ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma"); + + ValidateTensorShapesMatch( + m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance"); + ValidateTensorShapesMatch( + m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta"); + ValidateTensorShapesMatch( + m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma"); +} + +void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "Convolution2dQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "Convolution2dQueueDescriptor"); + + ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input"); + ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output"); + + ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight"); + ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight"); + ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(), + "Convolution2dQueueDescriptor", "weight"); + if (m_Parameters.m_BiasEnabled) + { + ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias"); + ValidateTensorDataType(m_Bias->GetTensorInfo(), + GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()), + "Convolution2dQueueDescriptor", "bias"); + ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(), + workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor"); + } + + ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), + workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output"); +} + +void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor"); + + ValidateTensorNumDimensions( + workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input"); + ValidateTensorNumDimensions( + workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output"); + + ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight"); + ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight"); + + //inputChannels * channelMultiplier should be equal to outputChannels + const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0]; + const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1]; + const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[1]; + if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels) + { + throw InvalidArgumentException( + boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be " + "equal to input_channels (provided %2%) multiplied by channel_multiplier " + "(provided %3%).") + % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier)); + } + + if (m_Parameters.m_BiasEnabled) + { + ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias"); + ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias"); + ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(), + workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor"); + + ValidateTensorDataType(m_Bias->GetTensorInfo(), + GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()), + "DepthwiseConvolution2dQueueDescriptor", "bias"); + } + + ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), + workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output"); +} + +void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "PermuteQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "PermuteQueueDescriptor"); + + const PermutationVector& mapping = m_Parameters.m_DimMappings; + + const TensorInfo& input = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0]; + + ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input"); + ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output"); + + for (unsigned int i = 0; i < mapping.GetSize(); ++i) + { + if (input.GetShape()[i] != output.GetShape()[mapping[i]]) + { + throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) + + " (=" + to_string(input.GetShape()[i]) + ") " + + "must match dst dimension " + to_string(mapping[i]) + + " (=" + to_string(output.GetShape()[mapping[i]]) + ")"); + } + } +} + +void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "Pooling2dQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "Pooling2dQueueDescriptor"); + + ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input"); + ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output"); +} + +void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "ResizeBilinearQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "ResizeBilinearQueueDescriptor"); + + ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input"); + ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output"); + + // Resize bilinear only changes width and height: batch and channel count must match + { + const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0]; + const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0]; + if (inputBatchSize != outputBatchSize) + { + throw InvalidArgumentException( + boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) " + "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize)); + } + } + + { + const unsigned int inputChannelCount = workloadInfo.m_InputTensorInfos[0].GetShape()[1]; + const unsigned int outputChannelCount = workloadInfo.m_OutputTensorInfos[0].GetShape()[1]; + if (inputChannelCount != outputChannelCount) + { + throw InvalidArgumentException( + boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) " + "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount)); + } + } +} + +void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "FakeQuantizationQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "FakeQuantizationQueueDescriptor"); + + ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input"); + ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output"); + ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_OutputTensorInfos[0], + "FakeQuantizationQueueDescriptor", + "input", + "output"); + if (m_Parameters.m_Min > m_Parameters.m_Max) + { + throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max"); + } + +} + +void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "L2NormalizationQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "L2NormalizationQueueDescriptor"); + + ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input"); + ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output"); + ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_OutputTensorInfos[0], + "L2NormalizationQueueDescriptor", + "input", + "output"); +} + +void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateNoInputs(workloadInfo, "ConstantQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "ConstantQueueDescriptor"); + + if (!m_LayerOutput) + { + throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified"); + } + + ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), + workloadInfo.m_OutputTensorInfos[0], + "ConstantQueueDescriptor", + "constant", + "output"); +} + +void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "ReshapeQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "ReshapeQueueDescriptor"); + + if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + { + throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " + + to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " + + to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements."); + } +} + +void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "FloorQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "FlootQueueDescriptor"); + + if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0]) + { + throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match."); + } +} + +} //namespace armnn diff --git a/src/armnn/backends/WorkloadData.hpp b/src/armnn/backends/WorkloadData.hpp new file mode 100644 index 0000000000..7f8713582f --- /dev/null +++ b/src/armnn/backends/WorkloadData.hpp @@ -0,0 +1,252 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "WorkloadDataFwd.hpp" + +#include "armnn/Types.hpp" +#include "armnn/Tensor.hpp" +#include "armnn/Descriptors.hpp" +#include "armnn/Exceptions.hpp" +#include "InternalTypes.hpp" +#include "OutputHandler.hpp" +#include "CpuTensorHandleFwd.hpp" + +namespace armnn +{ + +//a helper function that returns the bias data type required for given input data type. +DataType GetBiasDataType(DataType inputDataType); + +struct WorkloadInfo; + +struct QueueDescriptor +{ + std::vector<ITensorHandle*> m_Inputs; + std::vector<ITensorHandle*> m_Outputs; + + void ValidateInputsOutputs(const std::string& descName, + unsigned int numExpectedIn, unsigned int numExpectedOut) const; + + +protected: + ~QueueDescriptor() = default; + QueueDescriptor() = default; + QueueDescriptor(QueueDescriptor const&) = default; + QueueDescriptor& operator=(QueueDescriptor const&) = default; +}; + +// Base class for queue descriptors which contain parameters +template <typename LayerDescriptor> +struct QueueDescriptorWithParameters : public QueueDescriptor +{ + LayerDescriptor m_Parameters; + +protected: + ~QueueDescriptorWithParameters() = default; + QueueDescriptorWithParameters() = default; + QueueDescriptorWithParameters(QueueDescriptorWithParameters const&) = default; + QueueDescriptorWithParameters& operator=(QueueDescriptorWithParameters const&) = default; +}; + +struct MemCopyQueueDescriptor : QueueDescriptor +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +using InputQueueDescriptor = MemCopyQueueDescriptor; +using OutputQueueDescriptor = MemCopyQueueDescriptor; + +// Softmax layer workload data +struct SoftmaxQueueDescriptor : QueueDescriptorWithParameters<SoftmaxDescriptor> +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Splitter layer workload data +struct SplitterQueueDescriptor : QueueDescriptorWithParameters<ViewsDescriptor> +{ + struct ViewOrigin + { + ViewOrigin() {} + ViewOrigin(std::vector<unsigned int> const& origin) : m_Origin(origin) {} + + //view origin (size of the vector is the same as number of dimensions of the view) + std::vector<unsigned int> m_Origin; + }; + + //view defines a tensor that will be carved from the input tensor. + //view origins are stored here, the extents are defined by sizes of the output tensors. + std::vector<ViewOrigin> m_ViewOrigins; + + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Merger layer workload data +struct MergerQueueDescriptor : QueueDescriptorWithParameters<OriginsDescriptor> +{ + struct ViewOrigin + { + ViewOrigin() {} + ViewOrigin(const std::vector<unsigned int>& origin) : m_Origin(origin) {} + + //view origin (size of the vector is the same as number of dimensions of the view) + std::vector<unsigned int> m_Origin; + }; + + //view defines a sub-area of the output tensor that will be filled with the corresponding input tensor. + //view origins are stored here, the extents are defined by sizes of the input tensors. + std::vector<ViewOrigin> m_ViewOrigins; + + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Activation layer workload data +struct ActivationQueueDescriptor : QueueDescriptorWithParameters<ActivationDescriptor> +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Fully connected layer workload data +struct FullyConnectedQueueDescriptor : QueueDescriptorWithParameters<FullyConnectedDescriptor> +{ + FullyConnectedQueueDescriptor() + : m_Weight(nullptr) + , m_Bias(nullptr) + { + } + + const ConstCpuTensorHandle* m_Weight; + const ConstCpuTensorHandle* m_Bias; + + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Permute layer workload data +struct PermuteQueueDescriptor : QueueDescriptorWithParameters<PermuteDescriptor> +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Pooling 2D layer workload data +struct Pooling2dQueueDescriptor : QueueDescriptorWithParameters<Pooling2dDescriptor> +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Convolution 2D layer workload data +struct Convolution2dQueueDescriptor : QueueDescriptorWithParameters<Convolution2dDescriptor> +{ + Convolution2dQueueDescriptor() + : m_Weight(nullptr) + , m_Bias(nullptr) + { + } + + const ConstCpuTensorHandle* m_Weight; + const ConstCpuTensorHandle* m_Bias; + + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Depthwise Convolution 2D layer workload data +struct DepthwiseConvolution2dQueueDescriptor : QueueDescriptorWithParameters<DepthwiseConvolution2dDescriptor> +{ + DepthwiseConvolution2dQueueDescriptor() + : m_Weight(nullptr) + , m_Bias(nullptr) + { + } + + const ConstCpuTensorHandle* m_Weight; + const ConstCpuTensorHandle* m_Bias; + + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Normalization layer workload data +struct NormalizationQueueDescriptor : QueueDescriptorWithParameters<NormalizationDescriptor> +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Add layer workload data +struct AdditionQueueDescriptor : QueueDescriptor +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Multiplication layer workload data +struct MultiplicationQueueDescriptor : QueueDescriptor +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +// Batch norm layer workload data +struct BatchNormalizationQueueDescriptor : QueueDescriptorWithParameters<BatchNormalizationDescriptor> +{ + BatchNormalizationQueueDescriptor() + : m_Mean(nullptr) + , m_Variance(nullptr) + , m_Beta(nullptr) + , m_Gamma(nullptr) + { + } + + const ConstCpuTensorHandle* m_Mean; + const ConstCpuTensorHandle* m_Variance; + const ConstCpuTensorHandle* m_Beta; + const ConstCpuTensorHandle* m_Gamma; + + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +struct ResizeBilinearQueueDescriptor : QueueDescriptorWithParameters<ResizeBilinearDescriptor> +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +struct FakeQuantizationQueueDescriptor : QueueDescriptorWithParameters<FakeQuantizationDescriptor> +{ + FakeQuantizationQueueDescriptor() + : m_Min(nullptr) + , m_Max(nullptr) + { + } + + const ConstCpuTensorHandle* m_Min; + const ConstCpuTensorHandle* m_Max; + + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +struct L2NormalizationQueueDescriptor : QueueDescriptor +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +struct ConstantQueueDescriptor : QueueDescriptor +{ + ConstantQueueDescriptor() + : m_LayerOutput(nullptr) + { + } + + const ConstCpuTensorHandle* m_LayerOutput; + + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +struct ReshapeQueueDescriptor : QueueDescriptorWithParameters<ReshapeDescriptor> +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +struct FloorQueueDescriptor : QueueDescriptor +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + +} //namespace armnn diff --git a/src/armnn/backends/WorkloadDataCollector.hpp b/src/armnn/backends/WorkloadDataCollector.hpp new file mode 100644 index 0000000000..4dfd0ea5f4 --- /dev/null +++ b/src/armnn/backends/WorkloadDataCollector.hpp @@ -0,0 +1,36 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/Tensor.hpp> + +#include <vector> + +namespace armnn +{ +class ITensorHandle; + +class WorkloadDataCollector +{ +public: + WorkloadDataCollector(std::vector<ITensorHandle*>& handles, std::vector<TensorInfo>& infos) + : m_Handles(handles) + , m_Infos(infos) + { + } + + void Push(ITensorHandle* handle, const TensorInfo& info) + { + m_Handles.push_back(handle); + m_Infos.push_back(info); + } + +private: + std::vector<ITensorHandle*>& m_Handles; + std::vector<TensorInfo>& m_Infos; +}; + + +} //namespace armnn diff --git a/src/armnn/backends/WorkloadDataFwd.hpp b/src/armnn/backends/WorkloadDataFwd.hpp new file mode 100644 index 0000000000..1b466b69ca --- /dev/null +++ b/src/armnn/backends/WorkloadDataFwd.hpp @@ -0,0 +1,27 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +namespace armnn +{ + +struct QueueDescriptor; +template <typename LayerDescriptor> +struct QueueDescriptorWithParameters; +struct SoftmaxQueueDescriptor; +struct SplitterQueueDescriptor; +struct MergerQueueDescriptor; +struct ActivationQueueDescriptor; +struct FullyConnectedQueueDescriptor; +struct PermuteQueueDescriptor; +struct Pooling2dQueueDescriptor; +struct Convolution2dQueueDescriptor; +struct NormalizationQueueDescriptor; +struct MultiplicationQueueDescriptor; +struct BatchNormalizationQueueDescriptor; +struct FakeQuantizationQueueDescriptor; +struct ReshapeQueueDescriptor; + +} // namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/WorkloadFactory.cpp b/src/armnn/backends/WorkloadFactory.cpp new file mode 100644 index 0000000000..32634a6d0f --- /dev/null +++ b/src/armnn/backends/WorkloadFactory.cpp @@ -0,0 +1,214 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "WorkloadFactory.hpp" +#include "RefWorkloadFactory.hpp" +#include "NeonWorkloadFactory.hpp" +#include "ClWorkloadFactory.hpp" + +#include "armnn/Types.hpp" +#include "armnn/LayerSupport.hpp" +#include "Layer.hpp" +#include "Layers.hpp" +#include "CpuTensorHandle.hpp" + +#include <boost/cast.hpp> +#include <cstring> +#include <boost/iterator/transform_iterator.hpp> + +namespace armnn +{ + +bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, DataType dataType, + std::string& outReasonIfUnsupported) +{ + constexpr size_t reasonCapacity = 1024; + char reason[reasonCapacity]; + bool result; + switch(layer.GetType()) + { + case LayerType::Activation: + { + auto cLayer = boost::polymorphic_downcast<const ActivationLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsActivationSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); + break; + } + case LayerType::Addition: + { + const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = IsAdditionSupported(compute, input0, input1, output, reason, reasonCapacity); + break; + } + case LayerType::BatchNormalization: + { + auto cLayer = boost::polymorphic_downcast<const BatchNormalizationLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsBatchNormalizationSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); + break; + } + case LayerType::Constant: + { + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = IsConstantSupported(compute, output, reason, reasonCapacity); + break; + } + case LayerType::Convolution2d: + { + auto cLayer = boost::polymorphic_downcast<const Convolution2dLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsConvolution2dSupported(compute, input, cLayer->GetParameters(), + cLayer->m_Weight->GetTensorInfo(), reason, reasonCapacity); + break; + } + case LayerType::MemCopy: + { + // MemCopy supported for CpuRef, CpuAcc and GpuAcc backends + // (also treat Undefined as CpuRef to avoid breaking lots of Unit tests) + result = compute == Compute::CpuRef || compute == Compute::Undefined + || compute == Compute::CpuAcc || compute == Compute::GpuAcc; + strcpy(reason, "Unsupported backend type"); + break; + } + case LayerType::DepthwiseConvolution2d: + { + auto cLayer = boost::polymorphic_downcast<const DepthwiseConvolution2dLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsDepthwiseConvolutionSupported(compute, input, cLayer->GetParameters(), + cLayer->m_Weight->GetTensorInfo(), reason, reasonCapacity); + break; + } + case LayerType::FakeQuantization: + { + auto cLayer = boost::polymorphic_downcast<const FakeQuantizationLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsFakeQuantizationSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); + break; + } + case LayerType::Floor: + { + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = IsFloorSupported(compute, input, output, reason, reasonCapacity); + break; + } + case LayerType::FullyConnected: + { + auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsFullyConnectedSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); + break; + } + case LayerType::Input: + { + const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo(); + result = IsInputSupported(compute, input, reason, reasonCapacity); + break; + } + case LayerType::L2Normalization: + { + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsL2NormalizationSupported(compute, input, reason, reasonCapacity); + break; + } + case LayerType::Merger: + { + auto cLayer = boost::polymorphic_downcast<const MergerLayer*>(&layer); + + // Get vector of all inputs + auto getTensorInfo = [](const InputSlot& slot) + { + return &slot.GetConnectedOutputSlot()->GetTensorInfo(); + }; + auto begin = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo); + auto end = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo); + + std::vector<const TensorInfo*> inputs(begin, end); + + result = IsMergerSupported(compute, inputs, cLayer->GetParameters(), reason, reasonCapacity); + break; + } + case LayerType::Multiplication: + { + const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); + result = IsMultiplicationSupported(compute, input0, input1, reason, reasonCapacity); + break; + } + case LayerType::Normalization: + { + auto cLayer = boost::polymorphic_downcast<const NormalizationLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = IsNormalizationSupported(compute, input, output, cLayer->GetParameters(), reason, reasonCapacity); + break; + } + case LayerType::Output: + { + const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsOutputSupported(compute, output, reason, reasonCapacity); + break; + } + case LayerType::Permute: + { + auto cLayer = boost::polymorphic_downcast<const PermuteLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = IsPermuteSupported(compute, input, output, cLayer->GetParameters(), reason, reasonCapacity); + break; + } + case LayerType::Pooling2d: + { + auto cLayer = boost::polymorphic_downcast<const Pooling2dLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = IsPooling2dSupported(compute, input, output, cLayer->GetParameters(), reason, reasonCapacity); + break; + } + case LayerType::Reshape: + { + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsReshapeSupported(compute, input, reason, reasonCapacity); + break; + } + case LayerType::ResizeBilinear: + { + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsResizeBilinearSupported(compute, input, reason, reasonCapacity); + break; + } + case LayerType::Softmax: + { + auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsSoftmaxSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); + break; + } + case LayerType::Splitter: + { + auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + result = IsSplitterSupported(compute, input, cLayer->GetParameters(), reason, reasonCapacity); + break; + } + default: + { + BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer."); + strcpy(reason, "Unrecognised layer type"); + result = false; + break; + } + } + outReasonIfUnsupported = reason; + return result; +} + +bool IWorkloadFactory::IsLayerSupported(const Layer& layer, DataType dataType, std::string& outReasonIfUnsupported) +{ + return IsLayerSupported(layer.GetComputeDevice(), layer, dataType, outReasonIfUnsupported); +} + +}
\ No newline at end of file diff --git a/src/armnn/backends/WorkloadFactory.hpp b/src/armnn/backends/WorkloadFactory.hpp new file mode 100644 index 0000000000..d3f5bfb40f --- /dev/null +++ b/src/armnn/backends/WorkloadFactory.hpp @@ -0,0 +1,105 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Workload.hpp" +#include <memory> +#include "armnn/TensorFwd.hpp" +#include "OutputHandler.hpp" + +namespace armnn +{ + +class Layer; + +// Workload factory interface for compute backends +class IWorkloadFactory +{ +public: + virtual ~IWorkloadFactory() { } + + virtual Compute GetCompute() const = 0; + + static bool IsLayerSupported(Compute compute, const Layer& layer, DataType dataType, + std::string& outReasonIfUnsupported); + static bool IsLayerSupported(const Layer& layer, DataType dataType, std::string& outReasonIfUnsupported); + + virtual bool SupportsSubTensors() const = 0; + + virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent, + TensorShape const& subTensorShape, + unsigned int const* subTensorOrigin + ) const = 0; + + virtual std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const = 0; + + virtual std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d( + const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; + + virtual std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor, + const WorkloadInfo& info) const = 0; +}; + +} //namespace armnn
\ No newline at end of file diff --git a/src/armnn/backends/WorkloadInfo.hpp b/src/armnn/backends/WorkloadInfo.hpp new file mode 100644 index 0000000000..b0a0d2fe0f --- /dev/null +++ b/src/armnn/backends/WorkloadInfo.hpp @@ -0,0 +1,18 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +namespace armnn +{ + +/// Contains information about inputs and outputs to a layer. +/// This is needed at construction of workloads, but are not stored. +struct WorkloadInfo +{ + std::vector<TensorInfo> m_InputTensorInfos; + std::vector<TensorInfo> m_OutputTensorInfos; +}; + +} //namespace armnn diff --git a/src/armnn/backends/test/ActivationFixture.hpp b/src/armnn/backends/test/ActivationFixture.hpp new file mode 100644 index 0000000000..a67a110354 --- /dev/null +++ b/src/armnn/backends/test/ActivationFixture.hpp @@ -0,0 +1,56 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "TensorCopyUtils.hpp" +#include "WorkloadTestUtils.hpp" + +struct ActivationFixture +{ + ActivationFixture() + { + auto boostArrayExtents = boost::extents + [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)] + [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)] + [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)] + [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)]; + output.resize(boostArrayExtents); + outputExpected.resize(boostArrayExtents); + input.resize(boostArrayExtents); + + unsigned int inputShape[] = { batchSize, channels, height, width }; + unsigned int outputShape[] = { batchSize, channels, height, width }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + input = MakeRandomTensor<float, 4>(inputTensorInfo, 21453); + } + + unsigned int width = 17; + unsigned int height = 29; + unsigned int channels = 2; + unsigned int batchSize = 5; + + boost::multi_array<float, 4> output; + boost::multi_array<float, 4> outputExpected; + boost::multi_array<float, 4> input; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + // parameters used by some of the activation functions + float a = 0.234f; + float b = -12.345f; +}; + + +struct PositiveActivationFixture : public ActivationFixture +{ + PositiveActivationFixture() + { + input = MakeRandomTensor<float, 4>(inputTensorInfo, 2342423, 0.0f, 1.0f); + } +};
\ No newline at end of file diff --git a/src/armnn/backends/test/ActivationTestImpl.hpp b/src/armnn/backends/test/ActivationTestImpl.hpp new file mode 100644 index 0000000000..255a00ef0b --- /dev/null +++ b/src/armnn/backends/test/ActivationTestImpl.hpp @@ -0,0 +1,559 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/ArmNN.hpp> +#include <armnn/Tensor.hpp> +#include <armnn/TypesUtils.hpp> +#include <backends/WorkloadInfo.hpp> + +#include "test/TensorHelpers.hpp" +#include "QuantizeHelper.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" +#include "ActivationFixture.hpp" + +#include <algorithm> + +template<typename T> +LayerTestResult<T, 4> BoundedReLuTestCommon(armnn::IWorkloadFactory& workloadFactory, + float upperBound, float lowerBound, + float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, + const std::vector<T>& inputData, const std::vector<T>& outputExpectedData, + unsigned int inputWidth, unsigned int inputHeight, + unsigned int inputChannels, unsigned int inputBatchSize) +{ + unsigned int outputWidth = inputWidth; + unsigned int outputHeight = inputHeight; + unsigned int outputChannels = inputChannels; + unsigned int outputBatchSize = inputBatchSize; + + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::GetDataType<T>()); + + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::GetDataType<T>()); + + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(inputScale); + inputTensorInfo.SetQuantizationOffset(inputOffset); + + outputTensorInfo.SetQuantizationScale(outputScale); + outputTensorInfo.SetQuantizationOffset(outputOffset); + } + + LayerTestResult<T, 4> result(inputTensorInfo); + + auto input = MakeTensor<T, 4>(inputTensorInfo, inputData); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + // Setup bounded ReLu + armnn::ActivationQueueDescriptor descriptor; + armnn::WorkloadInfo workloadInfo; + AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get()); + + descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu; + descriptor.m_Parameters.m_A = upperBound; + descriptor.m_Parameters.m_B = lowerBound; + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + + result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData); + + return result; +} + +LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory& workloadFactory) +{ + unsigned int inputWidth = 4u; + unsigned int inputHeight = 5u; + unsigned int inputChannels = 1u; + unsigned int inputBatchSize = 1; + + std::vector<float> input = std::vector<float>{ + -2.0f, 0.1f, 0.5f, 1.25f, + 0.786f, 0.9875f, -1.5f, 0.384f, + 1.0001f, 3.5f, 7.5f, 0.896f, + 2.126f, 2.0f, 0.3f, 0.15f, + 0.999f, 1.2f, 0.89f, 6.1f, + }; + + // Calculated manually + std::vector<float> output = std::vector<float>{ + -1.0f, 0.1f, 0.5f, 1.0f, + 0.786f, 0.9875f, -1.0f, 0.384f, + 1.0f, 1.0f, 1.0f, 0.896f, + 1.0f, 1.0f, 0.3f, 0.15f, + 0.999f, 1.0f, 0.89f, 1.0f, + }; + + return BoundedReLuTestCommon(workloadFactory, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output, + inputWidth, inputHeight, inputChannels, inputBatchSize); +} + +LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory& workloadFactory) +{ + unsigned int inputWidth = 4u; + unsigned int inputHeight = 5u; + unsigned int inputChannels = 1u; + unsigned int inputBatchSize = 1; + + std::vector<float> input = std::vector<float>{ + -1.0f, 0.1f, 0.5f, 6.25f, + 0.786f, 5.9875f, -0.5f, 0.384f, + 6.0001f, 3.5f, 7.5f, 0.896f, + 2.126f, 12.0f, 0.3f, 0.15f, + 0.999f, 1.2f, 0.89f, 6.1f, + }; + + // Calculated manually + std::vector<float> output = std::vector<float>{ + 0.0f, 0.1f, 0.5f, 6.0f, + 0.786f, 5.9875f, 0.0f, 0.384f, + 6.0f, 3.5f, 6.0f, 0.896f, + 2.126f, 6.0f, 0.3f, 0.15f, + 0.999f, 1.2f, 0.89f, 6.0f, + }; + + return BoundedReLuTestCommon(workloadFactory, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output, + inputWidth, inputHeight, inputChannels, inputBatchSize); +} + +LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory& workloadFactory) +{ + unsigned int inputWidth = 3u; + unsigned int inputHeight = 2u; + unsigned int inputChannels = 1u; + unsigned int inputBatchSize = 1; + + std::vector<uint8_t> input = std::vector<uint8_t>{ + 51, 124, 28, + 251, 8, 92 + }; + + // Calculated manually + std::vector<uint8_t> output = std::vector<uint8_t>{ + 0, 122, 0, + 255, 0, 58 + }; + + float inputScale = 12.0f / 255.0f; + int32_t inputOffset = 63; + float outputScale = 6.0f / 255.0f; + int32_t outputOffset = 0; + + return BoundedReLuTestCommon(workloadFactory, 6.0f, 0.0f, + inputScale, inputOffset, outputScale, outputOffset, + input, output, + inputWidth, inputHeight, inputChannels, inputBatchSize); +} + +LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory& workloadFactory) +{ + unsigned int inputWidth = 3u; + unsigned int inputHeight = 2u; + unsigned int inputChannels = 1u; + unsigned int inputBatchSize = 1; + + std::vector<uint8_t> input = std::vector<uint8_t>{ + 51, 230, 28, + 251, 8, 92 + }; + + // Calculated manually + std::vector<uint8_t> output = std::vector<uint8_t>{ + 51, 192, 32, + 192, 32, 92 + }; + + int32_t inputOffset = 112; + float inputScale = 0.0125f; + + return BoundedReLuTestCommon(workloadFactory, 1.0f, -1.0f, + inputScale, inputOffset, inputScale, inputOffset, // input/output scale & offset same + input, output, + inputWidth, inputHeight, inputChannels, inputBatchSize); +} + +namespace +{ + +struct BoundedReLuRandomInputTestTraits +{ + constexpr static unsigned int inputHeight = 31u; + constexpr static unsigned int inputWidth = 19u; + constexpr static unsigned int inputChannels = 4u; + constexpr static unsigned int inputBatchSize = 2; + + constexpr static unsigned int outputHeight = inputHeight; + constexpr static unsigned int outputWidth = inputWidth; + constexpr static unsigned int outputChannels = inputChannels; + constexpr static unsigned int outputBatchSize = inputBatchSize; + + static armnn::TensorInfo GetInputTensorInfo() + { + return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::Float32); + } + + static armnn::TensorInfo GetOutputTensorInfo() + { + return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::Float32); + } +}; + +boost::multi_array<float, 4> BoundedReLuRandomInputTest(armnn::IWorkloadFactory& workloadFactory, + float lowerBound, + float upperBound, + const armnn::ActivationDescriptor& activationDescriptor) +{ + const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo(); + const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo(); + + boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo)); + + // min/max random values passed to MakeRandomTensor are purposely outside of the ReLu range [lowerBound, upperBound] + auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + // Setup bounded ReLu + armnn::ActivationQueueDescriptor descriptor; + armnn::WorkloadInfo workloadInfo; + AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get()); + descriptor.m_Parameters = activationDescriptor; + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get()); + + return output; +} + +} // namespace + +LayerTestResult<float, 4> CompareBoundedReLuTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& otherWorkloadFactory, + float upperBound, + float lowerBound) +{ + LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo()); + + armnn::ActivationDescriptor activationDescriptor; + activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu; + activationDescriptor.m_A = upperBound; + activationDescriptor.m_B = lowerBound; + + result.output = BoundedReLuRandomInputTest(workloadFactory, 0.0f, upperBound, activationDescriptor); + result.outputExpected = BoundedReLuRandomInputTest(otherWorkloadFactory, 0.0f, upperBound, activationDescriptor); + + return result; +} + +template<typename T> +LayerTestResult<T,4> ConstantLinearActivationTestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 0.0f, + int32_t qOffset = 0) +{ + unsigned int inputHeight = 20; + unsigned int inputWidth = 17; + unsigned int inputChannels = 3; + unsigned int batchSize = 5; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth}; + + inputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>()); + outputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + LayerTestResult<T, 4> ret(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + // Do linear activation that should leave tensor unchanged + armnn::ActivationQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Parameters.m_A = 1.0f; + data.m_Parameters.m_B = 0.0f; + data.m_Parameters.m_Function = armnn::ActivationFunction::Linear; + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + // Ensure output equals input + ret.outputExpected = input; + + return ret; +} + +LayerTestResult<float, 4> ConstantLinearActivationTest(armnn::IWorkloadFactory& workloadFactory) +{ + return ConstantLinearActivationTestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return ConstantLinearActivationTestCommon<uint8_t>(workloadFactory, 4.0f, 3); +} + +template<typename T> +LayerTestResult<T, 4> SimpleActivationTest(armnn::IWorkloadFactory& workloadFactory, + armnn::ActivationFunction activationFunction, + float activationParameterA, + float activationParameterB, + float qScale, + int32_t qOffset, + const std::vector<float>& inputData, + const std::vector<float>& outputExpectedData) +{ + constexpr static unsigned int inputWidth = 16u; + constexpr static unsigned int inputHeight = 1u; + constexpr static unsigned int inputChannels = 1u; + constexpr static unsigned int inputBatchSize = 1u; + + constexpr static unsigned int outputWidth = inputWidth; + constexpr static unsigned int outputHeight = inputHeight; + constexpr static unsigned int outputChannels = inputChannels; + constexpr static unsigned int outputBatchSize = inputBatchSize; + + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + LayerTestResult<T, 4> result(inputTensorInfo); + + auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData)); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + // Setup bounded ReLu + armnn::ActivationQueueDescriptor descriptor; + armnn::WorkloadInfo workloadInfo; + AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get()); + + descriptor.m_Parameters.m_Function = activationFunction; + descriptor.m_Parameters.m_A = activationParameterA; + descriptor.m_Parameters.m_B = activationParameterB; + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + + // Calculated manually + result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData)); + + return result; +} + +template<typename T> +LayerTestResult<T, 4> SimpleSigmoidTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset) +{ + std::vector<float> inputData = { + -0.1f, -0.2f, -0.3f, -0.4f, + 0.1f, 0.2f, 0.3f, 0.4f, + -1.0f, -2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, 3.0f, 4.0f + }; + + // Calculate output values for input + auto f = [](float value) + { + return 1.0f / (1.0f + std::exp(-value)); + }; + std::vector<float> outputExpectedData(inputData.size()); + std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f); + + return SimpleActivationTest<T>(workloadFactory, + armnn::ActivationFunction::Sigmoid, + 0.f, + 0.f, + qScale, + qOffset, + inputData, + outputExpectedData); +} + +LayerTestResult<float, 4> SimpleSigmoidTest(armnn::IWorkloadFactory& workloadFactory) +{ + return SimpleSigmoidTestCommon<float>(workloadFactory, 0.0f, 0); +} + +LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return SimpleSigmoidTestCommon<uint8_t>(workloadFactory, 0.1f, 50); +} + +template<typename T> +LayerTestResult<T,4> CompareActivationTestImpl(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::ActivationFunction f, + unsigned int batchSize = 5, + float qScale = 0.0f, + int32_t qOffset = 0) +{ + unsigned int width = 17; + unsigned int height = 29; + unsigned int channels = 2; + + float a = 0.234f; + float b = -12.345f; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int shape[] = {batchSize, channels, height, width}; + + inputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>()); + outputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + float minVal = -10.f; + if (f == armnn::ActivationFunction::Sqrt) + { + minVal = 0.f; + } + + boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f); + + + LayerTestResult<T,4> ret(outputTensorInfo); + auto boostArrayExtents = boost::extents + [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)] + [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)] + [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)] + [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)]; + ret.output.resize(boostArrayExtents); + ret.outputExpected.resize(boostArrayExtents); + + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ActivationQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Parameters.m_A = a; + data.m_Parameters.m_B = b; + data.m_Parameters.m_Function = f; + + armnn::ActivationQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info); + BOOST_ASSERT(workload != nullptr); + std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo); + BOOST_ASSERT(workloadRef != nullptr); + + inputHandle->Allocate(); + outputHandle->Allocate(); + inputHandleRef->Allocate(); + outputHandleRef->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + + workload->Execute(); + workloadRef->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + + return ret; +} + +LayerTestResult<float,4> CompareActivationTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::ActivationFunction f, + unsigned int batchSize) +{ + return CompareActivationTestImpl<float>(workloadFactory, refWorkloadFactory, f, batchSize); +} + +LayerTestResult<uint8_t,4> CompareActivationUint8Test(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::ActivationFunction f) +{ + return CompareActivationTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, f, 5, 0.1f, 50); +} diff --git a/src/armnn/backends/test/ArmComputeCl.cpp b/src/armnn/backends/test/ArmComputeCl.cpp new file mode 100644 index 0000000000..5933cebc80 --- /dev/null +++ b/src/armnn/backends/test/ArmComputeCl.cpp @@ -0,0 +1,269 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> + +#include "test/TensorHelpers.hpp" +#include "LayerTests.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/ClWorkloadFactory.hpp" +#include "backends/ClWorkloadUtils.hpp" +#include "backends/RefWorkloadFactory.hpp" +#include "backends/ClLayerSupport.hpp" +#include "ActivationFixture.hpp" + +#include <arm_compute/core/CL/CLKernelLibrary.h> +#include <arm_compute/runtime/CL/CLScheduler.h> +#include <string> +#include <iostream> + +#include "test/UnitTests.hpp" + +BOOST_AUTO_TEST_SUITE(Compute_ArmComputeCl) +using FactoryType = armnn::ClWorkloadFactory; + +// ============================================================================ +// UNIT tests + +// Activation +ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest) + +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f) + +ARMNN_AUTO_TEST_CASE(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest) +ARMNN_AUTO_TEST_CASE(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest) + +// Fully Connected +ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) + +ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) + +// Convolution +ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d, SimpleConvolution2d3x5Test, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquare, SimpleConvolution2d3x3Test, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false) +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest) + +// Depthwise Convolution +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, false) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, false) + +// Splitter +BOOST_AUTO_TEST_CASE(SimpleSplitter) +{ + armnn::ClWorkloadFactory workloadFactory; + auto testResult = SplitterTest(workloadFactory); + for (unsigned int i = 0; i < testResult.size(); ++i) + { + BOOST_TEST(CompareTensors(testResult[i].output, testResult[i].outputExpected)); + } +} + +BOOST_AUTO_TEST_CASE(SimpleSplitterUint8) +{ + armnn::ClWorkloadFactory workloadFactory; + auto testResult = SplitterUint8Test(workloadFactory); + for (unsigned int i = 0; i < testResult.size(); ++i) + { + BOOST_TEST(CompareTensors(testResult[i].output, testResult[i].outputExpected)); + } +} + +ARMNN_AUTO_TEST_CASE(CopyViaSplitter, CopyViaSplitterTest) +ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) + +// Merger +ARMNN_AUTO_TEST_CASE(SimpleMerger, MergerTest) +ARMNN_AUTO_TEST_CASE(MergerUint8, MergerUint8Test) + +// Pooling +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPadding, IgnorePaddingSimpleAveragePooling2dNoPaddingTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8, + IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride3Uint8, L2Pooling2dSize3Stride3Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride4, L2Pooling2dSize3Stride4Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride4Uint8, L2Pooling2dSize3Stride4Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize9, L2Pooling2dSize9Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize9Uint8, L2Pooling2dSize9Uint8Test) + +// Add +ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest) +ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest) + +// Mul +ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest) + +// Batch Norm +ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest) + +ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest) + +// Resize Bilinear +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest) + +// Constant +ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) +ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantTestUint8) + +// Concat +ARMNN_AUTO_TEST_CASE(Concatenation1d, Concatenation1dTest) +ARMNN_AUTO_TEST_CASE(Concatenation1dUint8, Concatenation1dUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0, Concatenation2dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0Uint8, Concatenation2dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1, Concatenation2dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1Uint8, Concatenation2dDim1Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDims, Concatenation2dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDimsUint8, Concatenation2dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDims, Concatenation2dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDimsUint8, Concatenation2dDim1DiffInputDimsUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0, Concatenation3dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0Uint8, Concatenation3dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1, Concatenation3dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1Uint8, Concatenation3dDim1Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2, Concatenation3dDim2Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2Uint8, Concatenation3dDim2Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDims, Concatenation3dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDimsUint8, Concatenation3dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDims, Concatenation3dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDimsUint8, Concatenation3dDim1DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDims, Concatenation3dDim2DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDimsUint8, Concatenation3dDim2DiffInputDimsUint8Test) + +// Floor +ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) + +// Reshape +ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) +ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) + +// Permute +ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test) +ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test) + +// ============================================================================ +// COMPARE tests + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32, CompareDepthwiseConvolution2dTest<float>) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8, CompareDepthwiseConvolution2dTest<uint8_t>) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest, + armnn::NormalizationAlgorithmChannel::Within, + armnn::NormalizationAlgorithmMethod::LocalBrightness) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationAcrossWithReference, CompareNormalizationTest, + armnn::NormalizationAlgorithmChannel::Across, + armnn::NormalizationAlgorithmMethod::LocalBrightness) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta1WithReference, CompareSoftmaxTest, 1.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta2WithReference, CompareSoftmaxTest, 2.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxUint8, CompareSoftmaxUint8Test, 1.0f) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMaxPooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::Max) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::Average) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithRefUint8, ComparePooling2dUint8Test, + armnn::PoolingAlgorithm::Average) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareL2Pooling2dWithRef, ComparePooling2dTest, armnn::PoolingAlgorithm::L2) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAddition, CompareAdditionTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMultiplicationWithRef, CompareMultiplicationTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareBatchNorm, CompareBatchNormTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareReLu1, CompareBoundedReLuTest, 1.0f, -1.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareReLu6, CompareBoundedReLuTest, 6.0f, 0.0f) + +// ============================================================================ +// FIXTURE tests + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSigmoidActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Sigmoid, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareTanhActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::TanH, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLinearActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Linear, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::ReLu, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::BoundedReLu, 5u) +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReferenceUint8, ActivationFixture, + CompareActivationUint8Test, armnn::ActivationFunction::BoundedReLu) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSoftReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::SoftReLu, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLeakyReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::LeakyReLu, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareAbsActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Abs, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSqrtActivationWithReference, PositiveActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Sqrt, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSquareActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Square, 5u) + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/backends/test/ArmComputeNeon.cpp b/src/armnn/backends/test/ArmComputeNeon.cpp new file mode 100644 index 0000000000..dd8a668940 --- /dev/null +++ b/src/armnn/backends/test/ArmComputeNeon.cpp @@ -0,0 +1,360 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> + +#include "test/TensorHelpers.hpp" +#include "LayerTests.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/NeonLayerSupport.hpp" +#include "backends/NeonWorkloadFactory.hpp" +#include "backends/RefWorkloadFactory.hpp" +#include "backends/test/TensorCopyUtils.hpp" +#include "ActivationFixture.hpp" + +#include "WorkloadTestUtils.hpp" + +#include "test/UnitTests.hpp" + +BOOST_AUTO_TEST_SUITE(Compute_ArmComputeNeon) +using FactoryType = armnn::NeonWorkloadFactory; + +// ============================================================================ +// UNIT tests + +// Convolution +ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d, SimpleConvolution2d3x5Test, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquare, SimpleConvolution2d3x3Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false) +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest) + +namespace +{ + +armnn::Convolution2dDescriptor MakeConv2dDesc(uint32_t strideX, uint32_t strideY, + uint32_t padLeft = 0, uint32_t padRight = 0, uint32_t padTop = 0, uint32_t padBottom = 0) +{ + armnn::Convolution2dDescriptor result; + result.m_StrideX = strideX; + result.m_StrideY = strideY; + result.m_PadLeft = padLeft; + result.m_PadRight = padRight; + result.m_PadTop = padTop; + result.m_PadBottom = padBottom; + result.m_BiasEnabled = true; + return result; +} + +} + +BOOST_AUTO_TEST_CASE(Conv2dUtils) +{ + // the only preferred Neon convolution is 1x1 with padding=0 and stride size {1,2,3} + armnn::TensorShape shape1x1({ 1,1,1,1 }); + armnn::TensorInfo info1x1(shape1x1, armnn::DataType::Float32); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 2))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 3))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 1))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 2))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(2, 3))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 1))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 2))); + BOOST_TEST(armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 3))); + + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(4, 1))); + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(4, 5))); + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(3, 6))); + + // non zero padding is not preferred for direct convolution + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 1, 0))); + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 0, 1))); + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info1x1, MakeConv2dDesc(1, 1, 1, 1))); + + // 2x2 filter not preferred for direct convolution + armnn::TensorShape shape2x2({ 1,1,2,2 }); + armnn::TensorInfo info2x2(shape2x2, armnn::DataType::Float32); + BOOST_TEST(!armnn::IsNeonDirectConvolutionPreferred(info2x2, MakeConv2dDesc(1, 1))); +} + +// Depthwise Convolution +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, false) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, true) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, false) + +namespace +{ + +armnn::DepthwiseConvolution2dDescriptor MakeDepthwiseConv2dDesc(uint32_t strideX, uint32_t strideY, + uint32_t depthMultiplier = 1, uint32_t padLeft = 0, uint32_t padRight = 0, + uint32_t padTop = 0, uint32_t padBottom = 0) +{ + armnn::DepthwiseConvolution2dDescriptor desc; + desc.m_PadLeft = padLeft; + desc.m_PadRight = padRight; + desc.m_PadTop = padTop; + desc.m_PadBottom = padBottom; + desc.m_StrideX = strideX; + desc.m_StrideY = strideY; + desc.m_BiasEnabled = true; + return desc; +} + +} + +BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils) +{ + armnn::TensorInfo inputInfo({ 1, 1, 10, 10 }, armnn::DataType::Float32); + armnn::TensorInfo weightsInfo3x3({ 1, 1, 3, 3 }, armnn::DataType::Float32); + + // Strides supported: 1,2,3 + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(1, 1), weightsInfo3x3)); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(1, 2), weightsInfo3x3)); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(1, 3), weightsInfo3x3)); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(2, 1), weightsInfo3x3)); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(2, 2), weightsInfo3x3)); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(2, 3), weightsInfo3x3)); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(3, 1), weightsInfo3x3)); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(3, 2), weightsInfo3x3)); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(3, 3), weightsInfo3x3)); + + // Unsupported stride + BOOST_TEST(!armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(4, 1), weightsInfo3x3)); + + // Supported weights shape 1x1 + armnn::TensorInfo weightsInfo1x1({ 1, 1, 1, 1 }, armnn::DataType::Float32); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(1, 1), weightsInfo1x1)); + + // Supported shape 2x2 + armnn::TensorInfo weightsInfo2x2({ 1, 1, 2, 2 }, armnn::DataType::Float32); + BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, MakeDepthwiseConv2dDesc(1, 1), weightsInfo2x2)); +} + +// Pooling +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride3Uint8, L2Pooling2dSize3Stride3Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride4, L2Pooling2dSize3Stride4Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride4Uint8, L2Pooling2dSize3Stride4Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize9, L2Pooling2dSize9Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize9Uint8, L2Pooling2dSize9Uint8Test) + +// Ignore padding values for pooling but count padding fields into the divisor +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPadding, IgnorePaddingSimpleAveragePooling2dNoPaddingTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8, + IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test) + +// Activation +ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest) + +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f) + +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f) + +ARMNN_AUTO_TEST_CASE(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest) +ARMNN_AUTO_TEST_CASE(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest) + +// Splitter +BOOST_AUTO_TEST_CASE(SimpleSplitter) +{ + armnn::NeonWorkloadFactory workloadFactory; + auto testResult = SplitterTest(workloadFactory); + for (unsigned int i = 0; i < testResult.size(); ++i) + { + BOOST_TEST(CompareTensors(testResult[i].output, testResult[i].outputExpected)); + } +} + +BOOST_AUTO_TEST_CASE(SimpleSplitterUint8) +{ + armnn::NeonWorkloadFactory workloadFactory; + auto testResult = SplitterUint8Test(workloadFactory); + for (unsigned int i = 0; i < testResult.size(); ++i) + { + BOOST_TEST(CompareTensors(testResult[i].output, testResult[i].outputExpected)); + } +} + +ARMNN_AUTO_TEST_CASE(CopyViaSplitter, CopyViaSplitterTest) +ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) + +// Merger +ARMNN_AUTO_TEST_CASE(SimpleMerger, MergerTest) +ARMNN_AUTO_TEST_CASE(MergerUint8, MergerUint8Test) + +// Fully Connected +ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) +ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) + +// Add +ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest) +ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest) + +// Mul +ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest) + +// Batch Norm +ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest) + +// Constant +ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) +ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantTestUint8) + +// Concatenation +ARMNN_AUTO_TEST_CASE(Concatenation1d, Concatenation1dTest) +ARMNN_AUTO_TEST_CASE(Concatenation1dUint8, Concatenation1dUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0, Concatenation2dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0Uint8, Concatenation2dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1, Concatenation2dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1Uint8, Concatenation2dDim1Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDims, Concatenation2dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDimsUint8, Concatenation2dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDims, Concatenation2dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDimsUint8, Concatenation2dDim1DiffInputDimsUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0, Concatenation3dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0Uint8, Concatenation3dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1, Concatenation3dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1Uint8, Concatenation3dDim1Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2, Concatenation3dDim2Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2Uint8, Concatenation3dDim2Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDims, Concatenation3dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDimsUint8, Concatenation3dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDims, Concatenation3dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDimsUint8, Concatenation3dDim1DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDims, Concatenation3dDim2DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDimsUint8, Concatenation3dDim2DiffInputDimsUint8Test) + +// L2 Normalization +ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest); +ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest); +ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest); +ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest); + +// Floor +ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) + +// Reshape +ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) +ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) + +// Permute +ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test) +ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test) +// ============================================================================ +// COMPARE tests + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32, CompareDepthwiseConvolution2dTest<float>) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8, CompareDepthwiseConvolution2dTest<uint8_t>) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest, + armnn::NormalizationAlgorithmChannel::Within, + armnn::NormalizationAlgorithmMethod::LocalBrightness) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationAcrossWithReference, CompareNormalizationTest, + armnn::NormalizationAlgorithmChannel::Across, + armnn::NormalizationAlgorithmMethod::LocalBrightness) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMaxPooling2dWithReference, ComparePooling2dTest, armnn::PoolingAlgorithm::Max) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMaxPooling2dWithReferenceUint8, ComparePooling2dUint8Test, + armnn::PoolingAlgorithm::Max) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithReference, ComparePooling2dTest, + armnn::PoolingAlgorithm::Average) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAveragePooling2dWithReferenceUint8, ComparePooling2dUint8Test, + armnn::PoolingAlgorithm::Average) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareL2Pooling2dWithReference, ComparePooling2dTest, armnn::PoolingAlgorithm::L2) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(UNSUPPORTED_CompareL2Pooling2dWithReferenceUint8, ComparePooling2dUint8Test, + armnn::PoolingAlgorithm::L2) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta1WithReference, CompareSoftmaxTest, 1.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxBeta2WithReference, CompareSoftmaxTest, 2.0f) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxUint8Beta1WithReference, CompareSoftmaxUint8Test, 1.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareSoftmaxUint8Beta2WithReference, CompareSoftmaxUint8Test, 2.0f) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareAddition, CompareAdditionTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareMultiplicationWithReference, CompareMultiplicationTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareBatchNorm, CompareBatchNormTest) + +ARMNN_COMPARE_REF_AUTO_TEST_CASE(ReLu1, CompareBoundedReLuTest, 1.0f, -1.0f) +ARMNN_COMPARE_REF_AUTO_TEST_CASE(ReLu6, CompareBoundedReLuTest, 6.0f, 0.0f) + +// ============================================================================ +// FIXTURE tests + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSigmoidActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Sigmoid, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareTanhActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::TanH, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLinearActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Linear, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::ReLu, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::BoundedReLu, 5u) +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareBoundedReLuActivationWithReferenceUint8, ActivationFixture, + CompareActivationUint8Test, armnn::ActivationFunction::BoundedReLu) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSoftReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::SoftReLu, 1u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareLeakyReLuActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::LeakyReLu, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareAbsActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Abs, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSqrtActivationWithReference, PositiveActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Sqrt, 5u) + +ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(CompareSquareActivationWithReference, ActivationFixture, + CompareActivationTest, armnn::ActivationFunction::Square, 5u) + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/backends/test/BatchNormTestImpl.hpp b/src/armnn/backends/test/BatchNormTestImpl.hpp new file mode 100644 index 0000000000..861ef6b053 --- /dev/null +++ b/src/armnn/backends/test/BatchNormTestImpl.hpp @@ -0,0 +1,112 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/ArmNN.hpp> +#include <armnn/Tensor.hpp> +#include <backends/WorkloadInfo.hpp> + +#include "test/TensorHelpers.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" + +#include "backends/test/QuantizeHelper.hpp" + + +template<typename T> +LayerTestResult<T,4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset) +{ + const unsigned int width = 2; + const unsigned int height = 3; + const unsigned int channels = 2; + const unsigned int num = 1; + + armnn::TensorInfo inputTensorInfo({num, channels, height, width}, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({num, channels, height, width}, armnn::GetDataType<T>()); + armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + tensorInfo.SetQuantizationScale(qScale); + tensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, + { + 1.f, 4.f, + 4.f, 2.f, + 1.f, 6.f, + + 1.f, 1.f, + 4.f, 1.f, + -2.f, 4.f + })); + // these values are per-channel of the input + auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2})); + auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9})); + auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2})); + auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1})); + LayerTestResult<T,4> ret(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::BatchNormalizationQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle meanTensor(tensorInfo); + armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo); + armnn::ScopedCpuTensorHandle betaTensor(tensorInfo); + armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo); + + AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); + AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); + AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); + AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Mean = &meanTensor; + data.m_Variance = &varianceTensor; + data.m_Beta = &betaTensor; + data.m_Gamma = &gammaTensor; + data.m_Parameters.m_Eps = 0.0f; + + // for each channel: + // substract mean, divide by standard deviation (with an epsilon to avoid div by 0) + // multiply by gamma and add beta + ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, + { + 1.f, 4.f, + 4.f, 2.f, + 1.f, 6.f, + + 3.f, 3.f, + 4.f, 3.f, + 2.f, 4.f + })); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +}
\ No newline at end of file diff --git a/src/armnn/backends/test/Conv2dTestImpl.hpp b/src/armnn/backends/test/Conv2dTestImpl.hpp new file mode 100644 index 0000000000..0c0511b234 --- /dev/null +++ b/src/armnn/backends/test/Conv2dTestImpl.hpp @@ -0,0 +1,802 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/ArmNN.hpp> +#include <armnn/Tensor.hpp> +#include <armnn/TypesUtils.hpp> +#include <backends/WorkloadInfo.hpp> + +#include "test/TensorHelpers.hpp" +#include "QuantizeHelper.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" + +// Mapping from input type to bias type for fully connected layers. +// float => float, uint8_t => int32_t +template<typename T> +struct FullyConnectedBiasTypeForInputType; + +template<> +struct FullyConnectedBiasTypeForInputType<float> +{ + using Type = float; +}; + +template<> +struct FullyConnectedBiasTypeForInputType<uint8_t> +{ + using Type = int32_t; +}; + +// Modifies a std::vector in-place using a specified bias +template<typename T, typename B> +void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset, + const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h) +{ + BOOST_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()), + "Invalid type and parameter combination."); + BOOST_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()), + "Invalid type and parameter combination."); + + // Note we need to dequantize and re-quantize the image value and the bias + for (uint32_t i = 0; i < bias.size(); ++i) + { + float dBias = SelectiveDequantize(bias[i], bScale, bOffset); + for (uint32_t y = 0; y < h; ++y) + { + for (uint32_t x = 0; x < w; ++x) + { + uint32_t offset = (i * h + y) * w + x; + BOOST_ASSERT(offset < v.size()); + T& outRef = v[offset]; + float dOutput = SelectiveDequantize(outRef, vScale, vOffset); + outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset); + } + } + } +} + + + +template<typename T, typename B> +LayerTestResult<T, 4> SimpleConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory, + const boost::multi_array<T, 4>& input, + const boost::multi_array<T, 4>& kernel, + const boost::multi_array<B, 1>& bias, + const boost::multi_array<T, 4>& outputExpected, + float qScale, + int32_t qOffset, + uint32_t padLeft = 0, + uint32_t padTop = 0, + uint32_t padRight = 0, + uint32_t padBottom = 0) +{ + unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[2]); + unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[3]); + unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[1]); + unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]); + + unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]); + unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]); + unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]); + unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]); + + unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[2]); + unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[3]); + + bool biasEnabled = bias.size() > 0; + + // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches) + BOOST_ASSERT(inputNum == 1); + BOOST_ASSERT(outputNum == 1); + + // If a bias is used, its size must equal the number of output channels + BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels); + + + // Note these tensors will use two (identical) batches + armnn::TensorInfo inputTensorInfo({2*inputNum, inputChannels, inputHeight, inputWidth}, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({2*outputNum, outputChannels, outputHeight, outputWidth}, + armnn::GetDataType<T>()); + armnn::TensorInfo kernelDesc({outputChannels, inputChannels, kernelHeight, kernelWidth}, armnn::GetDataType<T>()); + armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + kernelDesc.SetQuantizationScale(qScale); + kernelDesc.SetQuantizationOffset(qOffset); + biasDesc.SetQuantizationScale(qScale*qScale); + biasDesc.SetQuantizationOffset(0); + } + + LayerTestResult<T, 4> ret(outputTensorInfo); + + // Construct input data - Two batches of the same input image + std::vector<T> inputImage; + inputImage.assign(input.data(), input.data() + 1*inputChannels*inputHeight*inputWidth); + std::vector<T> inputData; + inputData.insert(inputData.end(), inputImage.begin(), inputImage.end()); + inputData.insert(inputData.end(), inputImage.begin(), inputImage.end()); + auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData); + + std::vector<T> outputImage; + outputImage.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth); + + // Apply bias to output image if enabled + if(biasEnabled) + { + std::vector<T> biasV; + biasV.assign(bias.data(), bias.data() + outputChannels); + ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), + biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(), + outputWidth, outputHeight); + } + + // Construct expected output data - two identical images + std::vector<T> outputData; + outputData.insert(outputData.end(), outputImage.begin(), outputImage.end()); + outputData.insert(outputData.end(), outputImage.begin(), outputImage.end()); + + ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData); + + // todo: nontrivial padding and strides + uint32_t strideX = 1; + uint32_t strideY = 1; + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::Convolution2dQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc); + armnn::ScopedCpuTensorHandle biasTensor(biasDesc); + + AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); + + if(biasEnabled) + { + AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + } + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + data.m_Weight = &weightsTensor; + data.m_Bias = &biasTensor; // still set this whether or not bias is enabled - can be a source of bugs + data.m_Parameters.m_StrideX = strideX; + data.m_Parameters.m_StrideY = strideY; + data.m_Parameters.m_PadLeft = padLeft; + data.m_Parameters.m_PadRight = padRight; + data.m_Parameters.m_PadTop = padTop; + data.m_Parameters.m_PadBottom = padBottom; + data.m_Parameters.m_BiasEnabled = biasEnabled; + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info); + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + +template<typename T, typename B> +LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset, + bool biasEnabled) +{ + unsigned int inputHeight = 3; + unsigned int inputWidth = 3; + unsigned int inputChannels = 2; + unsigned int inputNum = 1; + + unsigned int kernelHeight = 3; + unsigned int kernelWidth = 3; + unsigned int kernelChannels = inputChannels; + + unsigned int outputHeight = 1; + unsigned int outputWidth = 1; + unsigned int outputChannels = kernelChannels; + unsigned int outputNum = inputNum; + + armnn::TensorInfo inputTensorInfo({ inputNum, inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ outputNum, outputChannels, outputHeight, outputWidth }, + armnn::GetDataType<T>()); + armnn::TensorInfo kernelDesc({ 1, outputChannels, kernelHeight, kernelWidth }, armnn::GetDataType<T>()); + armnn::TensorInfo biasDesc({ outputChannels }, armnn::GetDataType<B>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + kernelDesc.SetQuantizationScale(qScale); + kernelDesc.SetQuantizationOffset(qOffset); + biasDesc.SetQuantizationScale(qScale*qScale); + biasDesc.SetQuantizationOffset(0); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>( + QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), { + 1.f, 2.f, 1.f, + 2.f, 1.f, 2.f, + 1.f, 2.f, 1.f, + + 1.f, 2.f, 1.f, + 2.f, 1.f, 2.f, + 1.f, 2.f, 1.f, + }))); + + std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(), + {0, 2})); + auto bias = MakeTensor<B, 1>(biasDesc, biasV); + + auto kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>( + QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), { + 1.f, 0.f, 1.f, + 0.f, 0.f, 0.f, + -1.f, 0.f, -1.f, + + 1.f, 0.f, 1.f, + 0.f, 0.f, 0.f, + -1.f, 0.f, -1.f, + }))); + + // manually calculated + std::vector<T> outputImage( + QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset(), + {0.f, 0.f}) + ); + + // Optionally apply bias to output image + if(biasEnabled) + { + ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), + biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(), + outputWidth, outputHeight); + } + + LayerTestResult<T, 4> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::DepthwiseConvolution2dQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc); + armnn::ScopedCpuTensorHandle biasTensor(biasDesc); + + AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); + AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + data.m_Weight = &weightsTensor; + data.m_Bias = &biasTensor; // still set this whether or not bias is enabled + data.m_Parameters.m_StrideX = 1; + data.m_Parameters.m_StrideY = 1; + data.m_Parameters.m_PadLeft = 0; + data.m_Parameters.m_PadRight = 0; + data.m_Parameters.m_PadTop = 0; + data.m_Parameters.m_PadBottom = 0; + data.m_Parameters.m_BiasEnabled = biasEnabled; + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info); + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + +template<typename T, typename B> +LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset, + bool biasEnabled) +{ + unsigned int depthMultiplier = 2; + + unsigned int inputHeight = 8; + unsigned int inputWidth = 16; + unsigned int inputChannels = 2; + unsigned int inputBatchSize = 1; + + unsigned int kernelHeight = 5; + unsigned int kernelWidth = 3; + + unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2; + unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2; + unsigned int outputChannels = inputChannels * depthMultiplier; + unsigned int outputBatchSize = inputBatchSize; + + armnn::TensorInfo inputTensorInfo({inputBatchSize, inputChannels, inputHeight, inputWidth}, + armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({outputBatchSize, outputChannels, outputHeight, outputWidth}, + armnn::GetDataType<T>()); + armnn::TensorInfo kernelDesc({depthMultiplier, inputChannels, kernelHeight, kernelWidth}, armnn::GetDataType<T>()); + armnn::TensorInfo biasDesc({outputChannels}, armnn::GetDataType<B>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + kernelDesc.SetQuantizationScale(qScale); + kernelDesc.SetQuantizationOffset(qOffset); + biasDesc.SetQuantizationScale(qScale*qScale); + biasDesc.SetQuantizationOffset(0); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>( + QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), { + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }))); + + std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(), + {0, 2, 1, -1})); + auto bias = MakeTensor<B, 1>(biasDesc, biasV); + + auto kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>( + QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), { + 1, 1, 1, + 1, -1, 1, + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + + 0, 0, 0, + 0, -1, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 1, 0, + 0, 0, 0, + 0, 0, 0 + }))); + + // manually calculated + std::vector<T> outputImage = std::vector<T>( + QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), { + 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, + 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, + 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, + 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, + 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, + 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, + + -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, + -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, + -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, + -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, + + 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f + })); + + // Optionally apply bias to output image + if(biasEnabled) + { + ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), + biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(), + outputWidth, outputHeight); + } + + LayerTestResult<T, 4> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::DepthwiseConvolution2dQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc); + armnn::ScopedCpuTensorHandle biasTensor(biasDesc); + + AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); + AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + data.m_Weight = &weightsTensor; + data.m_Bias = &biasTensor; // still set this whether or not bias is enabled + data.m_Parameters.m_StrideX = 2; + data.m_Parameters.m_StrideY = 1; + data.m_Parameters.m_PadLeft = 0; + data.m_Parameters.m_PadRight = 0; + data.m_Parameters.m_PadTop = 1; + data.m_Parameters.m_PadBottom = 1; + data.m_Parameters.m_BiasEnabled = biasEnabled; + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info); + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + + + +template<typename T> +LayerTestResult<T,4> Convolution1dTestImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset, + bool biasEnabled) +{ + using B = typename FullyConnectedBiasTypeForInputType<T>::Type; + + // until we have a specialist 1D convolution layer, we can fake one using + // 2D convolution with the final dimension set to 1. + // I don't anticipate this being particularly slow, given that convolution is implemented + // as a matrix multiplication, at which point dimension doesn't matter. + + unsigned int batchSize = 1; + unsigned int inputChannels = 2; + unsigned int outputChannels = 3; + unsigned int inputSize = 5; // the 1D size (could view as 'width' or 'height') + unsigned int kernelSize = 3; + unsigned int padSize = 2; + unsigned int stride = 1; + unsigned int outputSize = 7; // (inputSize + 2 * padSize - kernelSize + 1) / stride + + armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, armnn::GetDataType<T>()); + armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, armnn::GetDataType<T>()); + armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, armnn::GetDataType<T>()); + armnn::TensorInfo biasInfo({outputChannels}, armnn::GetDataType<B>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputInfo.SetQuantizationScale(qScale); + inputInfo.SetQuantizationOffset(qOffset); + outputInfo.SetQuantizationScale(qScale); + outputInfo.SetQuantizationOffset(qOffset); + kernelInfo.SetQuantizationScale(qScale); + kernelInfo.SetQuantizationOffset(qOffset); + biasInfo.SetQuantizationScale(inputInfo.GetQuantizationScale()*kernelInfo.GetQuantizationScale()); + biasInfo.SetQuantizationOffset(0); + } + + std::vector<T> inputData( + QuantizedVector<T>(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), { + 5.0f, -2.0f, 2.5f, 0.0f, 1.0f, + -3.0f, 3.2f, 5.0f, 2.0f, 3.0f, + })); + + std::vector<T> kernelData( + QuantizedVector<T>(kernelInfo.GetQuantizationScale(), kernelInfo.GetQuantizationOffset(), { + 1.0f, 0.0f, 0.0f, + 0.0f, 2.0f, -1.5f, + + 0.0f, 0.0f, 0.0f, + 0.2f, 0.2f, 0.2f, + + 0.5f, 0.0f, 0.5f, + 0.0f, -1.0f, 0.0f + })); + + std::vector<B> biasData( + QuantizedVector<B>(biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(), { + 1.0f, 0.0f, 0.0f + })); + + std::vector<T> outputData( + QuantizedVector<T>(outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), { + 4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f, + -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f, + 2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f + })); + + // Optionally apply bias to output image + if(biasEnabled) + { + ApplyBias(outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), + biasData, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(), + 1, outputSize); + } + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo); + + armnn::Convolution2dQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle weightsTensor(kernelInfo); + armnn::ScopedCpuTensorHandle biasTensor(biasInfo); + + AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); + AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data()); + + AddInputToWorkload(data, info, inputInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputInfo, outputHandle.get()); + + data.m_Weight = &weightsTensor; + data.m_Bias = &biasTensor; + data.m_Parameters.m_StrideX = 1; + data.m_Parameters.m_StrideY = stride; + data.m_Parameters.m_PadLeft = 0; + data.m_Parameters.m_PadRight = 0; + data.m_Parameters.m_PadTop = padSize; + data.m_Parameters.m_PadBottom = padSize; + data.m_Parameters.m_BiasEnabled = biasEnabled; + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info); + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); + + workload->Execute(); + + // output + LayerTestResult<T,4> ret(outputInfo); + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + ret.outputExpected = MakeTensor<T, 4>(outputInfo, outputData); + return ret; +} + + + +template<typename T> +LayerTestResult<T,4> CompareConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory) +{ + unsigned int inputHeight = 8; + unsigned int inputWidth = 16; + unsigned int inputChannels = 3; + unsigned int inputNum = 5; + + unsigned int kernelHeight = 3; + unsigned int kernelWidth = 3; + + unsigned int strideX = 2; + unsigned int strideY = 3; + unsigned int padX = 1; + unsigned int padY = 1; + + unsigned int outputNum = inputNum; + unsigned int outputChannels = 2; + unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY; + unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + armnn::TensorInfo kernelDesc; + armnn::TensorInfo biasDesc; + + unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth}; + unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth}; + unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth}; + unsigned int biasShape[] = {outputChannels}; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>()); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>()); + kernelDesc = armnn::TensorInfo(4, kernelShape, armnn::GetDataType<T>()); + biasDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType<T>()); + + LayerTestResult<T,4> ret(outputTensorInfo); + + auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908); + auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234); + auto bias = MakeRandomTensor<T, 1>(biasDesc, 1028); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::Convolution2dQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc); + armnn::ScopedCpuTensorHandle biasTensor(biasDesc); + + AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); + AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Weight = &weightsTensor; + data.m_Bias = &biasTensor; + data.m_Parameters.m_StrideX = strideX; + data.m_Parameters.m_StrideY = strideY; + data.m_Parameters.m_PadLeft = padX; + data.m_Parameters.m_PadRight = padX; + data.m_Parameters.m_PadTop = padY; + data.m_Parameters.m_PadBottom = padY; + data.m_Parameters.m_BiasEnabled = true; + + std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo); + + armnn::Convolution2dQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info); + std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateConvolution2d(refData, refInfo); + + outputHandleRef->Allocate(); + inputHandleRef->Allocate(); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + + workload->Execute(); + workloadRef->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + + return ret; +} + +template<typename T> +LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory) +{ + unsigned int inputHeight = 8; + unsigned int inputWidth = 16; + unsigned int inputChannels = 3; + unsigned int inputNum = 5; + + unsigned int kernelHeight = 3; + unsigned int kernelWidth = 3; + unsigned int channelMultiplier = 1; + + unsigned int strideX = 2; + unsigned int strideY = 3; + unsigned int padX = 1; + unsigned int padY = 1; + + unsigned int outputNum = inputNum; + unsigned int outputChannels = inputChannels * channelMultiplier; + unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY; + unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + armnn::TensorInfo kernelDesc; + armnn::TensorInfo biasDesc; + + unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth }; + unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth }; + unsigned int kernelShape[] = { channelMultiplier, inputChannels, kernelHeight, kernelWidth }; + unsigned int biasShape[] = { outputChannels }; + + float inputsQScale = armnn::IsQuantizedType<T>() ? 1.0f : 0; + float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0; + int32_t qOffset = 0; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>(), inputsQScale, qOffset); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>(), outputQScale, qOffset); + kernelDesc = armnn::TensorInfo(4, kernelShape, armnn::GetDataType<T>(), inputsQScale, qOffset); + biasDesc = armnn::TensorInfo(1, biasShape, armnn::GetBiasDataType(armnn::GetDataType<T>()), inputsQScale, qOffset); + + LayerTestResult<T, 4> ret(outputTensorInfo); + + auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908, 0.0f, 255.0f); + auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234, 0.0f, 255.0f); + auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type, 1>(biasDesc, 1028, 0.0f, 255.0f); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::DepthwiseConvolution2dQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc); + armnn::ScopedCpuTensorHandle biasTensor(biasDesc); + + AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); + AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Weight = &weightsTensor; + data.m_Bias = &biasTensor; + data.m_Parameters.m_StrideX = strideX; + data.m_Parameters.m_StrideY = strideY; + data.m_Parameters.m_PadLeft = padX; + data.m_Parameters.m_PadRight = padX; + data.m_Parameters.m_PadTop = padY; + data.m_Parameters.m_PadBottom = padY; + data.m_Parameters.m_BiasEnabled = true; + + std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo); + + armnn::DepthwiseConvolution2dQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info); + std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateDepthwiseConvolution2d(refData, refInfo); + + outputHandleRef->Allocate(); + inputHandleRef->Allocate(); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + + workload->Execute(); + workloadRef->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + + return ret; +} diff --git a/src/armnn/backends/test/CreateWorkloadCl.cpp b/src/armnn/backends/test/CreateWorkloadCl.cpp new file mode 100644 index 0000000000..3f320d80e9 --- /dev/null +++ b/src/armnn/backends/test/CreateWorkloadCl.cpp @@ -0,0 +1,356 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "backends/ClWorkloadFactory.hpp" +#include "backends/RefWorkloadFactory.hpp" +#include "backends/MemCopyWorkload.hpp" +#include "backends/ClWorkloadUtils.hpp" +#include "backends/ClWorkloads.hpp" +#include "backends/ClTensorHandle.hpp" + +#include "test/CreateWorkloadClNeon.hpp" + +boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle* tensorHandle, + std::initializer_list<unsigned int> expectedDimensions) +{ + return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions); +} + +BOOST_AUTO_TEST_SUITE(CreateWorkloadCl) + +BOOST_AUTO_TEST_CASE(CreateActivationWorkload) +{ + Graph graph; + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + + auto workload = CreateActivationWorkloadTest<ClActivationFloat32Workload>(factory, graph); + + // check that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest) + ActivationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1})); +} + +BOOST_AUTO_TEST_CASE(CreateAdditionWorkload) +{ + Graph graph; + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + + auto workload = CreateAdditionWorkloadTest<ClAdditionFloat32Workload>(factory, graph); + + // check that inputs/outputs are as we expect them (see definition of CreateAdditionWorkloadTest) + AdditionQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto inputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3})); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3})); +} + +BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWorkload) +{ + Graph graph; + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + + auto workload = CreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloat32Workload>(factory, graph); + + // check that inputs/outputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest) + BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 1, 1})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3, 1, 1})); +} + +template <typename Convolution2dWorkloadType> +static void Convolution2dWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + auto workload = CreateConvolution2dWorkloadTest<Convolution2dWorkloadType>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest) + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 8, 16})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 2, 10})); +} + +BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat32Workload) +{ + Convolution2dWorkloadTest<ClConvolution2dFloat32Workload>(); +} + + +template <typename Convolution2dWorkloadType> +static void DirectConvolution2dWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + auto workload = CreateDirectConvolution2dWorkloadTest<Convolution2dWorkloadType>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateDirectConvolution2dWorkloadTest) + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6})); +} + +BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat32Workload) +{ + DirectConvolution2dWorkloadTest<ClConvolution2dFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload) +{ + DirectConvolution2dWorkloadTest<ClConvolution2dUint8Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkload) +{ + Graph graph; + ClWorkloadFactory factory; + auto workload = + CreateFullyConnectedWorkloadTest<ClFullyConnectedFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest) + FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7})); +} + +BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkload) +{ + Graph graph; + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + + auto workload = + CreateMultiplicationWorkloadTest<ClMultiplicationFloat32Workload>(factory, graph); + + // check that inputs/outputs are as we expect them (see definition of CreateMultiplicationWorkloadTest) + MultiplicationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto inputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3})); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3})); +} + +BOOST_AUTO_TEST_CASE(CreateNormalizationWorkload) +{ + Graph graph; + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + + auto workload = CreateNormalizationWorkloadTest<ClNormalizationFloat32Workload>(factory, graph); + + // check that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest) + NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 5, 5, 1})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 5, 5, 1})); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dWorkload) +{ + Graph graph; + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + + auto workload = CreatePooling2dWorkloadTest<ClPooling2dFloat32Workload>(factory, graph); + + // check that inputs/outputs are as we expect them (see definition of CreatePooling2dWorkloadTest) + Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 2, 5, 5})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 2, 2, 4})); +} + +template <typename ReshapeWorkloadType> +static void ClCreateReshapeWorkloadTest() +{ + Graph graph; + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + + auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest) + ReshapeQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4})); // Leading size 1 dimensions are collapsed by ACL. +} + +BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload) +{ + ClCreateReshapeWorkloadTest<ClReshapeFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload) +{ + ClCreateReshapeWorkloadTest<ClReshapeUint8Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateSoftmaxWorkload) +{ + Graph graph; + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + + auto workload = CreateSoftmaxWorkloadTest<ClSoftmaxFloat32Workload>(factory, graph); + + // check that inputs/outputs are as we expect them (see definition of ClSoftmaxFloat32Workload) + SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1})); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1})); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterWorkload) +{ + Graph graph; + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + + auto workload = CreateSplitterWorkloadTest<ClSplitterFloat32Workload>(factory, graph); + + // check that outputs are as we expect them (see definition of CreateSplitterWorkloadTest) + SplitterQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {7})); + auto outputHandle0 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {4})); + auto outputHandle1 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {1})); + auto outputHandle2 = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2})); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterMerger) +{ + // Test that it is possible to decide which output of the splitter layer + // should be lined to which input of the merger layer + // We test that is is possible to specify 0th output + // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input + // of the merger. + + Graph graph; + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + + auto workloads = + CreateSplitterMergerWorkloadTest<ClSplitterFloat32Workload, ClMergerFloat32Workload>(factory, graph); + + auto wlSplitter = std::move(workloads.first); + auto wlMerger = std::move(workloads.second); + + //check that the index of inputs/outputs matches what we declared on InputDescriptor construction. + armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]); + armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]); + armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[0]); + armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlMerger->GetData().m_Inputs[1]); + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(mIn0); + BOOST_TEST(mIn1); + + //fliped order of inputs/outputs + bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0); + BOOST_TEST(validDataPointers); + + + //also make sure that the inputs are subtensors of one tensor and outputs are sub tensors of another tensor + bool validSubTensorParents = (mIn0->GetTensor().parent() == mIn1->GetTensor().parent()) + && (sOut0->GetTensor().parent() == sOut1->GetTensor().parent()); + + BOOST_TEST(validSubTensorParents); +} + +BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs) +{ + // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer. + // We create a splitter with two outputs. That each of those outputs is used by two different activation layers + + Graph graph; + ClWorkloadFactory factory; + std::unique_ptr<ClSplitterFloat32Workload> wlSplitter; + std::unique_ptr<ClActivationFloat32Workload> wlActiv0_0; + std::unique_ptr<ClActivationFloat32Workload> wlActiv0_1; + std::unique_ptr<ClActivationFloat32Workload> wlActiv1_0; + std::unique_ptr<ClActivationFloat32Workload> wlActiv1_1; + + CreateSplitterMultipleInputsOneOutputWorkloadTest<ClSplitterFloat32Workload, + ClActivationFloat32Workload>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1); + + //check that the index of inputs/outputs matches what we declared on InputDescriptor construction. + armnn::ClSubTensorHandle* sOut0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[0]); + armnn::ClSubTensorHandle* sOut1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlSplitter->GetData().m_Outputs[1]); + armnn::ClSubTensorHandle* activ0_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]); + armnn::ClSubTensorHandle* activ0_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]); + armnn::ClSubTensorHandle* activ1_0Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]); + armnn::ClSubTensorHandle* activ1_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]); + + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(activ0_0Im); + BOOST_TEST(activ0_1Im); + BOOST_TEST(activ1_0Im); + BOOST_TEST(activ1_1Im); + + bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) && + (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im); + + BOOST_TEST(validDataPointers); +} + +BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsCl) +{ + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + CreateMemCopyWorkloads<CopyFromCpuToClWorkload,CopyFromClToCpuWorkload,IClTensorHandle>(factory); +} + +BOOST_AUTO_TEST_CASE(CreateL2NormalizationWorkload) +{ + Graph graph; + ClWorkloadFactory factory; + factory.LoadOpenClRuntime(); + + auto workload = CreateL2NormalizationWorkloadTest<ClL2NormalizationFloat32Workload>(factory, graph); + + // check that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest) + L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 5, 20, 50, 67 })); + BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 5, 20, 50, 67 })); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/backends/test/CreateWorkloadNeon.cpp b/src/armnn/backends/test/CreateWorkloadNeon.cpp new file mode 100644 index 0000000000..807937ba2b --- /dev/null +++ b/src/armnn/backends/test/CreateWorkloadNeon.cpp @@ -0,0 +1,302 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "backends/NeonWorkloadFactory.hpp" +#include "backends/NeonWorkloadUtils.hpp" +#include "backends/NeonWorkloads.hpp" +#include "backends/MemCopyWorkload.hpp" +#include "backends/NeonTensorHandle.hpp" + +#include "test/CreateWorkloadClNeon.hpp" + +BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon) + +namespace +{ + +bool TestNeonTensorHandleInfo(armnn::INeonTensorHandle* handle, const armnn::TensorInfo& expectedInfo) +{ + using namespace armnn::armcomputetensorutils; + + const arm_compute::ITensorInfo* handleInfo = handle->GetTensor().info(); + const arm_compute::TensorInfo expectedAclInfo = BuildArmComputeTensorInfo(expectedInfo); + + if (handleInfo->data_type() != expectedAclInfo.data_type()) + { + return false; + } + + if (handleInfo->num_dimensions() != expectedAclInfo.num_dimensions()) + { + return false; + } + + if (handleInfo->quantization_info() != expectedAclInfo.quantization_info()) + { + return false; + } + + for (std::size_t d = 0; d < expectedAclInfo.num_dimensions(); ++d) + { + if (handleInfo->dimension(d) != expectedAclInfo.dimension(d)) + { + return false; + } + } + + return true; +} + +} // namespace + +BOOST_AUTO_TEST_CASE(CreateActivationWorkload) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateActivationWorkloadTest<NeonActivationFloat32Workload>(factory, graph); + + // check that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest) + ActivationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType::Float32))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType::Float32))); +} + +BOOST_AUTO_TEST_CASE(CreateAdditionWorkload) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateAdditionWorkloadTest<NeonAdditionFloat32Workload>(factory, graph); + + // check that inputs/outputs are as we expect them (see definition of CreateAdditionWorkloadTest) + AdditionQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle1 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto inputHandle2 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[1]); + auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType::Float32))); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType::Float32))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType::Float32))); +} + +BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWorkload) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateBatchNormalizationWorkloadTest<NeonBatchNormalizationFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest) + BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({2, 3, 1, 1}, DataType::Float32))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3, 1, 1}, DataType::Float32))); +} + +BOOST_AUTO_TEST_CASE(CreateConvolution2dWorkload) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateConvolution2dWorkloadTest<NeonConvolution2dFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest) + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({2, 3, 8, 16}, DataType::Float32))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 2, 2, 10}, DataType::Float32))); +} + +BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkload) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateFullyConnectedWorkloadTest<NeonFullyConnectedFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest) + FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType::Float32))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType::Float32))); +} + +BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkload) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateMultiplicationWorkloadTest<NeonMultiplicationFloat32Workload>(factory, graph); + + // check that inputs/outputs are as we expect them (see definition of CreateMultiplicationWorkloadTest) + MultiplicationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle1 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto inputHandle2 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[1]); + auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType::Float32))); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType::Float32))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType::Float32))); +} + +BOOST_AUTO_TEST_CASE(CreateNormalizationWorkload) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateNormalizationWorkloadTest<NeonNormalizationFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest) + NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 5, 5, 1}, DataType::Float32))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 5, 5, 1}, DataType::Float32))); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dWorkload) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreatePooling2dWorkloadTest<NeonPooling2dFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest) + Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 2, 5, 5}, DataType::Float32))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 2, 2, 4}, DataType::Float32))); +} + +template <typename ReshapeWorkloadType> +static void NeonCreateReshapeWorkloadTest(DataType dataType) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest) + ReshapeQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, dataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, dataType))); +} + +BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload) +{ + NeonCreateReshapeWorkloadTest<NeonReshapeFloat32Workload>(DataType::Float32); +} + +BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload) +{ + NeonCreateReshapeWorkloadTest<NeonReshapeUint8Workload>(DataType::QuantisedAsymm8); +} + +BOOST_AUTO_TEST_CASE(CreateSoftmaxWorkload) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateSoftmaxWorkloadTest<NeonSoftmaxFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest) + SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType::Float32))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({4, 1}, DataType::Float32))); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterWorkload) +{ + Graph graph; + NeonWorkloadFactory factory; + auto workload = CreateSplitterWorkloadTest<NeonSplitterFloat32Workload>(factory, graph); + + // check that outputs are as we expect them (see definition of CreateSplitterWorkloadTest) + SplitterQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 7}, DataType::Float32))); + auto outputHandle0 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 4}, DataType::Float32))); + auto outputHandle1 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[1]); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({1, 1}, DataType::Float32))); + auto outputHandle2 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[2]); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({1, 2}, DataType::Float32))); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterMerger) +{ + // Test that it is possible to decide which output of the splitter layer + // should be lined to which input of the merger layer + // We test that is is possible to specify 0th output + // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input + // of the merger. + + Graph graph; + NeonWorkloadFactory factory; + + auto workloads = + CreateSplitterMergerWorkloadTest<NeonSplitterFloat32Workload, NeonMergerFloat32Workload>(factory, graph); + + auto wlSplitter = std::move(workloads.first); + auto wlMerger = std::move(workloads.second); + + //check that the index of inputs/outputs matches what we declared on InputDescriptor construction. + armnn::INeonTensorHandle* sOut0 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[0]); + armnn::INeonTensorHandle* sOut1 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[1]); + armnn::INeonTensorHandle* mIn0 = dynamic_cast<armnn::INeonTensorHandle*>(wlMerger->GetData().m_Inputs[0]); + armnn::INeonTensorHandle* mIn1 = dynamic_cast<armnn::INeonTensorHandle*>(wlMerger->GetData().m_Inputs[1]); + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(mIn0); + BOOST_TEST(mIn1); + + bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0); + + BOOST_TEST(validDataPointers); +} + +BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs) +{ + // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer. + // We create a splitter with two outputs. That each of those outputs is used by two different activation layers + + Graph graph; + NeonWorkloadFactory factory; + std::unique_ptr<NeonSplitterFloat32Workload> wlSplitter; + std::unique_ptr<NeonActivationFloat32Workload> wlActiv0_0; + std::unique_ptr<NeonActivationFloat32Workload> wlActiv0_1; + std::unique_ptr<NeonActivationFloat32Workload> wlActiv1_0; + std::unique_ptr<NeonActivationFloat32Workload> wlActiv1_1; + + CreateSplitterMultipleInputsOneOutputWorkloadTest<NeonSplitterFloat32Workload, + NeonActivationFloat32Workload>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1); + + armnn::INeonTensorHandle* sOut0 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[0]); + armnn::INeonTensorHandle* sOut1 = dynamic_cast<armnn::INeonTensorHandle*>(wlSplitter->GetData().m_Outputs[1]); + armnn::INeonTensorHandle* activ0_0Im = dynamic_cast<armnn::INeonTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]); + armnn::INeonTensorHandle* activ0_1Im = dynamic_cast<armnn::INeonTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]); + armnn::INeonTensorHandle* activ1_0Im = dynamic_cast<armnn::INeonTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]); + armnn::INeonTensorHandle* activ1_1Im = dynamic_cast<armnn::INeonTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]); + + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(activ0_0Im); + BOOST_TEST(activ0_1Im); + BOOST_TEST(activ1_0Im); + BOOST_TEST(activ1_1Im); + + bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) && + (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im); + + BOOST_TEST(validDataPointers); +} + +BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsNeon) +{ + NeonWorkloadFactory factory; + CreateMemCopyWorkloads<CopyFromCpuToNeonWorkload,CopyFromNeonToCpuWorkload,INeonTensorHandle>(factory); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/backends/test/CreateWorkloadRef.cpp b/src/armnn/backends/test/CreateWorkloadRef.cpp new file mode 100644 index 0000000000..e0eacebe1a --- /dev/null +++ b/src/armnn/backends/test/CreateWorkloadRef.cpp @@ -0,0 +1,414 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "backends/RefWorkloadFactory.hpp" +#include "backends/RefWorkloads.hpp" +#include "backends/CpuTensorHandle.hpp" + +#include "test/CreateWorkload.hpp" + +namespace +{ + +template<typename Workload> +void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo) +{ + auto queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo)); + BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); +} + +template <typename Workload> +void CheckInputsOutput(std::unique_ptr<Workload> workload, + const TensorInfo& inputInfo0, + const TensorInfo& inputInfo1, + const TensorInfo& outputInfo) +{ + auto queueDescriptor = workload->GetData(); + auto inputHandle0 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]); + auto inputHandle1 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[1]); + auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0)); + BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1)); + BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo)); +} +} + +BOOST_AUTO_TEST_SUITE(CreateWorkloadRef) + +template <typename ActivationWorkloadType> +static void RefCreateActivationWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateActivationWorkloadTest<ActivationWorkloadType>(factory, graph); + + // check that outputs are as we expect them (see definition of CreateActivationWorkloadTest) + CheckInputOutput(std::move(workload), + TensorInfo({ 1, 1 }, ActivationWorkloadType::ms_DataType), + TensorInfo({ 1, 1 }, ActivationWorkloadType::ms_DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload) +{ + RefCreateActivationWorkloadTest<RefActivationFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload) +{ + RefCreateActivationWorkloadTest<RefActivationUint8Workload>(); +} + +template <typename AdditionWorkloadType> +static void RefCreateAdditionWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateAdditionWorkloadTest<AdditionWorkloadType>(factory, graph); + + // check that outputs are as we expect them (see definition of CreateAdditionWorkloadTest) + CheckInputsOutput(std::move(workload), + TensorInfo({ 2, 3 }, AdditionWorkloadType::ms_DataType), + TensorInfo({ 2, 3 }, AdditionWorkloadType::ms_DataType), + TensorInfo({ 2, 3 }, AdditionWorkloadType::ms_DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload) +{ + RefCreateAdditionWorkloadTest<RefAdditionFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload) +{ + RefCreateAdditionWorkloadTest<RefAdditionUint8Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWorkload) +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest) + CheckInputOutput( + std::move(workload), TensorInfo({2, 3, 1, 1}, DataType::Float32), TensorInfo({2, 3, 1, 1}, DataType::Float32)); +} + +BOOST_AUTO_TEST_CASE(CreateConvolution2dWorkload) +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest) + CheckInputOutput(std::move(workload), + TensorInfo({2, 3, 8, 16}, DataType::Float32), + TensorInfo({2, 2, 2, 10}, DataType::Float32)); +} + +BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolution2dWorkload) +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = + CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest) + CheckInputOutput(std::move(workload), + TensorInfo({2, 3, 8, 16}, DataType::Float32), + TensorInfo({2, 9, 2, 10}, DataType::Float32)); +} + +template <typename FullyConnectedWorkloadType> +static void RefCreateFullyConnectedWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest) + float inputsQScale = FullyConnectedWorkloadType::ms_DataType == DataType::QuantisedAsymm8 ? 1.0f : 0.0; + float outputQScale = FullyConnectedWorkloadType::ms_DataType == DataType::QuantisedAsymm8 ? 2.0f : 0.0; + CheckInputOutput(std::move(workload), + TensorInfo({ 3, 1, 4, 5 }, FullyConnectedWorkloadType::ms_DataType, inputsQScale), + TensorInfo({ 3, 7 }, FullyConnectedWorkloadType::ms_DataType, outputQScale)); +} + +BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat32Workload) +{ + RefCreateFullyConnectedWorkloadTest<RefFullyConnectedFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateFullyConnectedUint8Workload) +{ + RefCreateFullyConnectedWorkloadTest<RefFullyConnectedUint8Workload>(); +} + +template <typename MultiplicationWorkloadType> +static void RefCreateMultiplicationWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateMultiplicationWorkloadTest<MultiplicationWorkloadType>(factory, graph); + + // check that outputs are as we expect them (see definition of CreateMultiplicationWorkloadTest) + CheckInputsOutput(std::move(workload), + TensorInfo({ 2, 3 }, MultiplicationWorkloadType::ms_DataType), + TensorInfo({ 2, 3 }, MultiplicationWorkloadType::ms_DataType), + TensorInfo({ 2, 3 }, MultiplicationWorkloadType::ms_DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload) +{ + RefCreateMultiplicationWorkloadTest<RefMultiplicationFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload) +{ + RefCreateMultiplicationWorkloadTest<RefMultiplicationUint8Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateNormalizationWorkload) +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateNormalizationWorkloadTest<RefNormalizationFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest) + CheckInputOutput(std::move(workload), + TensorInfo({3, 5, 5, 1}, DataType::Float32), + TensorInfo({3, 5, 5, 1}, DataType::Float32)); +} + +template <typename Pooling2dWorkloadType> +static void RefCreatePooling2dWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest) + CheckInputOutput( + std::move(workload), + TensorInfo({3, 2, 5, 5}, Pooling2dWorkloadType::ms_DataType), + TensorInfo({3, 2, 2, 4}, Pooling2dWorkloadType::ms_DataType)); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload) +{ + RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload) +{ + RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload>(); +} + +template <typename SoftmaxWorkloadType> +static void RefCreateSoftmaxWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest) + CheckInputOutput( + std::move(workload), + TensorInfo({4, 1}, SoftmaxWorkloadType::ms_DataType), + TensorInfo({4, 1}, SoftmaxWorkloadType::ms_DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload) +{ + RefCreateSoftmaxWorkloadTest<RefSoftmaxFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateSoftmaxUint8Workload) +{ + RefCreateSoftmaxWorkloadTest<RefSoftmaxUint8Workload>(); +} + +template <typename SplitterWorkloadType> +static void RefCreateSplitterWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType>(factory, graph); + + // check that outputs are as we expect them (see definition of CreateSplitterWorkloadTest) + SplitterQueueDescriptor queueDescriptor = workload->GetData(); + auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]); + BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 1, 7 }, SplitterWorkloadType::ms_DataType))); + auto outputHandle0 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]); + BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 4 }, SplitterWorkloadType::ms_DataType))); + auto outputHandle1 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[1]); + BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 1, 1 }, SplitterWorkloadType::ms_DataType))); + auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[2]); + BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 1, 2 }, SplitterWorkloadType::ms_DataType))); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload) +{ + RefCreateSplitterWorkloadTest<RefSplitterFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload) +{ + RefCreateSplitterWorkloadTest<RefSplitterUint8Workload>(); +} + +template <typename SplitterWorkloadType, typename MergerWorkloadType> +static void RefCreateSplitterMergerWorkloadTest() +{ + // Test that it is possible to decide which output of the splitter layer + // should be lined to which input of the merger layer + // We test that is is possible to specify 0th output + // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input + // of the merger. + + Graph graph; + RefWorkloadFactory factory; + auto workloads = CreateSplitterMergerWorkloadTest<SplitterWorkloadType, MergerWorkloadType>(factory, graph); + + auto wlSplitter = std::move(workloads.first); + auto wlMerger = std::move(workloads.second); + + //check that the index of inputs/outputs matches what we declared on InputDescriptor construction. + armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]); + armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]); + armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[0]); + armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[1]); + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(mIn0); + BOOST_TEST(mIn1); + + bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0); + + BOOST_TEST(validDataPointers); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32) +{ + RefCreateSplitterMergerWorkloadTest<RefSplitterFloat32Workload, RefMergerFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8) +{ + RefCreateSplitterMergerWorkloadTest<RefSplitterUint8Workload, RefMergerUint8Workload>(); +} + +template <typename SplitterWorkloadType, typename ActivationWorkloadType> +static void RefCreateSingleOutputMultipleInputsTest() +{ + // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer. + // We create a splitter with two outputs. That each of those outputs is used by two different activation layers + + Graph graph; + RefWorkloadFactory factory; + std::unique_ptr<SplitterWorkloadType> wlSplitter; + std::unique_ptr<ActivationWorkloadType> wlActiv0_0; + std::unique_ptr<ActivationWorkloadType> wlActiv0_1; + std::unique_ptr<ActivationWorkloadType> wlActiv1_0; + std::unique_ptr<ActivationWorkloadType> wlActiv1_1; + + CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType, + ActivationWorkloadType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1); + + armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]); + armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]); + armnn::CpuTensorHandle* activ0_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]); + armnn::CpuTensorHandle* activ0_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]); + armnn::CpuTensorHandle* activ1_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]); + armnn::CpuTensorHandle* activ1_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]); + + + BOOST_TEST(sOut0); + BOOST_TEST(sOut1); + BOOST_TEST(activ0_0Im); + BOOST_TEST(activ0_1Im); + BOOST_TEST(activ1_0Im); + BOOST_TEST(activ1_1Im); + + bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) && + (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im); + + BOOST_TEST(validDataPointers); +} + +BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32) +{ + RefCreateSingleOutputMultipleInputsTest<RefSplitterFloat32Workload, RefActivationFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8) +{ + RefCreateSingleOutputMultipleInputsTest<RefSplitterUint8Workload, RefActivationUint8Workload>(); +} + +template <typename ResizeBilinearWorkloadType> +static void RefCreateResizeBilinearTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest) + CheckInputOutput( + std::move(workload), + TensorInfo({ 2, 3, 4, 4 }, ResizeBilinearWorkloadType::ms_DataType), + TensorInfo({ 2, 3, 2, 2 }, ResizeBilinearWorkloadType::ms_DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32) +{ + RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8) +{ + RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32) +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateL2NormalizationWorkloadTest<RefL2NormalizationFloat32Workload>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest) + CheckInputOutput( + std::move(workload), + TensorInfo({ 5, 20, 50, 67 }, RefL2NormalizationFloat32Workload::ms_DataType), + TensorInfo({ 5, 20, 50, 67 }, RefL2NormalizationFloat32Workload::ms_DataType)); +} + +template <typename ReshapeWorkloadType> +static void RefCreateReshapeWorkloadTest() +{ + Graph graph; + RefWorkloadFactory factory; + auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType>(factory, graph); + + // check that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest) + CheckInputOutput( + std::move(workload), + TensorInfo({ 4, 1 }, ReshapeWorkloadType::ms_DataType), + TensorInfo({ 1, 4 }, ReshapeWorkloadType::ms_DataType)); +} + +BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload) +{ + RefCreateReshapeWorkloadTest<RefReshapeFloat32Workload>(); +} + +BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload) +{ + RefCreateReshapeWorkloadTest<RefReshapeUint8Workload>(); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/backends/test/FullyConnectedTestImpl.hpp b/src/armnn/backends/test/FullyConnectedTestImpl.hpp new file mode 100644 index 0000000000..479da3fabc --- /dev/null +++ b/src/armnn/backends/test/FullyConnectedTestImpl.hpp @@ -0,0 +1,286 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +template<typename T, typename B> +LayerTestResult<T, 2> SimpleFullyConnectedTestImpl( + armnn::IWorkloadFactory& workloadFactory, + armnn::TensorInfo inputTensorInfo, + armnn::TensorInfo outputTensorInfo, + armnn::TensorInfo weightsDesc, + armnn::TensorInfo biasesDesc, + boost::multi_array<T, 2> weights, + boost::multi_array<B, 1> bias, + boost::multi_array<T, 4> input, + bool biasEnabled, + bool transposeWeights) +{ + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::FullyConnectedQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc); + armnn::ScopedCpuTensorHandle biasTensor(biasesDesc); + + AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]); + AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Weight = &weightsTensor; + data.m_Bias = &biasTensor; + data.m_Parameters.m_BiasEnabled = biasEnabled; + data.m_Parameters.m_TransposeWeightMatrix = transposeWeights; + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info); + LayerTestResult<T, 2> result(outputTensorInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); + + return result; +} + +LayerTestResult<float, 2> FullyConnectedFloat32Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled, + bool transposeWeights) +{ + unsigned int inputWidth = 1; + unsigned int inputHeight = 1; + unsigned int inputChannels = 5; + unsigned int inputNum = 2; + + unsigned int outputChannels = 3; + unsigned int outputNum = 2; + + // Define the tensor descriptors + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + armnn::TensorInfo weightsDesc; + armnn::TensorInfo biasesDesc; + + unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth }; + unsigned int outputShape[] = { outputNum, outputChannels }; + unsigned int weightsShape[] = { inputChannels, outputChannels }; + if (transposeWeights) + { + std::swap(weightsShape[0], weightsShape[1]); + } + unsigned int biasShape[] = { outputChannels }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32); + weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32); + biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32); + + LayerTestResult<float, 2> result(outputTensorInfo); + + boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>( + { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + + 5.0f, 4.0f, 3.0f, 2.0f, 1.0f + }) + ); + + boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>( + { + .5f, 2.f, .5f, + .5f, 2.f, 1.f, + .5f, 2.f, 2.f, + .5f, 2.f, 3.f, + .5f, 2.f, 4.f + })); + + if (transposeWeights) + { + weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>( + { + .5f, .5f, .5f, .5f, .5f, + 2.f, 2.f, 2.f, 2.f, 2.f, + .5f, 1.f, 2.f, 3.f, 4.f + })); + } + + + std::vector<float> biasValues({0.f, 0.f, 0.f}); + if (biasEnabled) + { + biasValues = std::vector<float>({10.f, 20.f, 30.f}); + } + boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues); + + result = SimpleFullyConnectedTestImpl<float>( + workloadFactory, + inputTensorInfo, outputTensorInfo, + weightsDesc, biasesDesc, + weights, bias, input, + biasEnabled, transposeWeights + ); + + result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>( + { + 0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0], + 2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1], + 0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2], + + 2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0], + 10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1], + 2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2] + }) + ); + + return result; +} + +LayerTestResult<uint8_t, 2> FullyConnectedUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled) +{ + constexpr static unsigned int inputWidth = 3u; + constexpr static unsigned int inputHeight = 2u; + constexpr static unsigned int inputChannels = 1u; + + constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels; + + constexpr static unsigned int outputChannels = 2u; + + armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, armnn::DataType::QuantisedAsymm8); + inputTensorInfo.SetQuantizationScale(0.1f); + inputTensorInfo.SetQuantizationOffset(63); + + armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, armnn::DataType::QuantisedAsymm8); + outputTensorInfo.SetQuantizationScale(5.f); + outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10); + + armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, armnn::DataType::QuantisedAsymm8); + weightsDesc.SetQuantizationScale(0.2f); + weightsDesc.SetQuantizationOffset(93); + + armnn::TensorInfo biasesDesc({ outputChannels }, armnn::DataType::Signed32); + biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale()); + biasesDesc.SetQuantizationOffset(0); + + LayerTestResult<uint8_t, 2> result(outputTensorInfo); + + auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>{51, 124, 28, + 251, 8, 92}); + + auto weights = MakeTensor<uint8_t, 2>(weightsDesc, std::vector<uint8_t>{51, 193, 42, 53, 175, 34, + 210, 145, 23, 74, 34, 150}); + + // scale = 0.02 + // offset = 0 + auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500}); + + result = SimpleFullyConnectedTestImpl<uint8_t>( + workloadFactory, + inputTensorInfo, outputTensorInfo, + weightsDesc, biasesDesc, + weights, bias, input, + biasEnabled, true + ); + + // manually calculated + // note one of these values has been clamped to 0 + if (biasEnabled) + { + result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 242}); + } + else + { + result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 32}); + } + + return result; +} + + + +// +// ArmNN variant of the AndroidNN fully_connected_float_large test. +// +// Tests the fully connected layer with large values, optionally transposing weights. +// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode. +// +template<typename T> +LayerTestResult<T, 2> FullyConnectedLargeTestCommon(armnn::IWorkloadFactory& workloadFactory, + bool transposeWeights, + float qScale = 0.0f, + int32_t qOffset = 0) +{ + unsigned int inputWidth = 1; + unsigned int inputHeight = 1; + unsigned int inputChannels = 5; + unsigned int inputNum = 1; + + unsigned int outputChannels = 1; + unsigned int outputNum = 1; + + // Define the tensor descriptors + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + armnn::TensorInfo weightsDesc; + armnn::TensorInfo biasesDesc; + + unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth }; + unsigned int outputShape[] = { outputNum, outputChannels }; + unsigned int weightsShape[] = { inputChannels, outputChannels }; + if (transposeWeights) + { + std::swap(weightsShape[0], weightsShape[1]); + } + + unsigned int biasShape[] = { outputChannels }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>()); + outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>()); + weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::GetDataType<T>()); + biasesDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + LayerTestResult<T, 2> result(outputTensorInfo); + + boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f, + }) + ); + + boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc, + QuantizedVector<T>(qScale, qOffset, { + 2.0f, 3.0f, 4.0f, 5.0f, 6.0f + }) + ); + + std::vector<T> biasValues({900000.f}); + boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues); + + result = SimpleFullyConnectedTestImpl<T>( + workloadFactory, + inputTensorInfo, outputTensorInfo, + weightsDesc, biasesDesc, + weights, bias, input, + true, transposeWeights + ); + + result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 965432.0f, + }) + ); + + return result; +} diff --git a/src/armnn/backends/test/IsLayerSupportedTest.cpp b/src/armnn/backends/test/IsLayerSupportedTest.cpp new file mode 100644 index 0000000000..4b4c9f6099 --- /dev/null +++ b/src/armnn/backends/test/IsLayerSupportedTest.cpp @@ -0,0 +1,70 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> + +#include "test/TensorHelpers.hpp" +#include "LayerTests.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/RefWorkloadFactory.hpp" +#include <Layers.hpp> + +#include <string> +#include <iostream> +#include <backends/ClWorkloadFactory.hpp> +#include <backends/NeonWorkloadFactory.hpp> + +#include "IsLayerSupportedTestImpl.hpp" + + +BOOST_AUTO_TEST_SUITE(IsLayerSupported) + +BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches) +{ + LayerTypeMatchesTest(); +} + +BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Reference) +{ + armnn::RefWorkloadFactory factory; + IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float32>(&factory); +} + +BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference) +{ + armnn::RefWorkloadFactory factory; + IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory); +} + +#ifdef ARMCOMPUTENEON_ENABLED +BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Neon) +{ + armnn::NeonWorkloadFactory factory; + IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::Float32>(&factory); +} + +BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Neon) +{ + armnn::NeonWorkloadFactory factory; + IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory); +} +#endif //#ifdef ARMCOMPUTENEON_ENABLED + + +#ifdef ARMCOMPUTECL_ENABLED +BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Cl) +{ + armnn::ClWorkloadFactory factory; + IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::Float32>(&factory); +} + +BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Cl) +{ + armnn::ClWorkloadFactory factory; + IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory); +} +#endif //#ifdef ARMCOMPUTECL_ENABLED + +BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file diff --git a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp new file mode 100644 index 0000000000..abc9806737 --- /dev/null +++ b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp @@ -0,0 +1,440 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Graph.hpp" + +#include <boost/core/ignore_unused.hpp> + +namespace +{ +armnn::Graph dummyGraph; + +// Make a dummy TensorInfo object +template<armnn::DataType DataType> +armnn::TensorInfo MakeDummyTensorInfo() +{ + return armnn::TensorInfo({2,2,2,2}, DataType); +} + + +// Make a dummy WorkloadInfo using a dummy TensorInfo. +template<armnn::DataType DataType> +armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs) +{ + armnn::WorkloadInfo info; + for (unsigned int i=0; i < numInputs; i++) + { + info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>()); + } + for (unsigned int o=0; o < numOutputs; o++) + { + info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>()); + } + return info; +} + +// template class to create a dummy layer (2 parameters) +template<typename LayerType, typename DescType = typename LayerType::DescriptorType> +struct DummyLayer +{ + DummyLayer() + { + m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), ""); + } + ~DummyLayer() + { + dummyGraph.EraseLayer(m_Layer); + } + LayerType* m_Layer; +}; + +// template class to create a dummy layer (1 parameter) +template<typename LayerType> +struct DummyLayer<LayerType, void> +{ + DummyLayer() + { + m_Layer = dummyGraph.AddLayer<LayerType>(""); + } + ~DummyLayer() + { + dummyGraph.EraseLayer(m_Layer); + } + LayerType* m_Layer; +}; + +template<> +struct DummyLayer<armnn::ConstantLayer, void> +{ + DummyLayer() + { + m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>(std::shared_ptr<armnn::ScopedCpuTensorHandle>(), ""); + } + ~DummyLayer() + { + dummyGraph.EraseLayer(m_Layer); + } + armnn::ConstantLayer* m_Layer; +}; + +template<> +struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId> +{ + DummyLayer() + { + m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), ""); + + } + ~DummyLayer() + { + dummyGraph.EraseLayer(m_Layer); + } + armnn::InputLayer* m_Layer; +}; + +template<> +struct DummyLayer<armnn::MergerLayer> +{ + DummyLayer() + { + armnn::OriginsDescriptor desc(2); + m_Layer = dummyGraph.AddLayer<armnn::MergerLayer>(desc, ""); + + } + ~DummyLayer() + { + dummyGraph.EraseLayer(m_Layer); + } + armnn::MergerLayer* m_Layer; +}; + +template<> +struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId> +{ + DummyLayer() + { + m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), ""); + + } + ~DummyLayer() + { + dummyGraph.EraseLayer(m_Layer); + } + armnn::OutputLayer* m_Layer; +}; + +template<> +struct DummyLayer<armnn::SplitterLayer> +{ + DummyLayer() + { + armnn::ViewsDescriptor desc(1); + m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, ""); + + } + ~DummyLayer() + { + dummyGraph.EraseLayer(m_Layer); + } + armnn::SplitterLayer* m_Layer; +}; + +template <typename ConvolutionLayerType> +struct DummyConvolutionLayer +{ + DummyConvolutionLayer() + { + typename ConvolutionLayerType::DescriptorType desc; + m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, ""); + m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); + m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>( + armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32)); + } + ~DummyConvolutionLayer() + { + dummyGraph.EraseLayer(m_Layer); + } + ConvolutionLayerType* m_Layer; +}; + +template<> +struct DummyLayer<armnn::Convolution2dLayer> + : public DummyConvolutionLayer<armnn::Convolution2dLayer> +{ +}; + +template<> +struct DummyLayer<armnn::DepthwiseConvolution2dLayer> + : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer> +{ +}; + +// Tag for giving LayerType entries a unique strong type each. +template<armnn::LayerType> +struct Tag{}; + +#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \ +template<armnn::DataType DataType> \ +struct LayerTypePolicy<armnn::LayerType::name, DataType> \ +{ \ + using Type = armnn::name##Layer; \ + using Desc = descType; \ + using QueueDesc = armnn::name##QueueDescriptor; \ + constexpr static const char* NameStr = #name; \ + \ + static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \ + unsigned int nIn, unsigned int nOut) \ + { \ + QueueDesc desc; \ + armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \ + return factory->Create##name(desc, info); \ + } \ +}; + +// define a layer policy specialization for use with the IsLayerSupported tests. +// Use this version for layers whose constructor takes 1 parameter(name). +#define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void) + +// define a layer policy specialization for use with the IsLayerSupported tests. +// Use this version for layers whose constructor takes 2 parameters(descriptor and name). +#define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor) + +// Layer policy template +template<armnn::LayerType Type, armnn::DataType DataType> +struct LayerTypePolicy; + +// Every entry in the armnn::LayerType enum must be accounted for below. +DECLARE_LAYER_POLICY_2_PARAM(Activation) + +DECLARE_LAYER_POLICY_1_PARAM(Addition) + +DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization) + +DECLARE_LAYER_POLICY_1_PARAM(Constant) + +DECLARE_LAYER_POLICY_2_PARAM(Convolution2d) + +DECLARE_LAYER_POLICY_1_PARAM(MemCopy) + +DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d) + +DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization) + +DECLARE_LAYER_POLICY_1_PARAM(Floor) + +DECLARE_LAYER_POLICY_2_PARAM(FullyConnected) + +DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId) + +DECLARE_LAYER_POLICY_1_PARAM(L2Normalization) + +DECLARE_LAYER_POLICY_2_PARAM(Merger) + +DECLARE_LAYER_POLICY_1_PARAM(Multiplication) + +DECLARE_LAYER_POLICY_2_PARAM(Normalization) + +DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId) + +DECLARE_LAYER_POLICY_2_PARAM(Permute) + +DECLARE_LAYER_POLICY_2_PARAM(Pooling2d) + +DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear) + +DECLARE_LAYER_POLICY_2_PARAM(Softmax) + +DECLARE_LAYER_POLICY_2_PARAM(Splitter) + +DECLARE_LAYER_POLICY_2_PARAM(Reshape) + + +// Generic implementation to get the number of input slots for a given layer type; +template<armnn::LayerType Type> +unsigned int GetNumInputs(const armnn::Layer& layer) +{ + return layer.GetNumInputSlots(); +} + +// Generic implementation to get the number of output slots for a given layer type; +template<armnn::LayerType Type> +unsigned int GetNumOutputs(const armnn::Layer& layer) +{ + return layer.GetNumOutputSlots(); +} + +template<> +unsigned int GetNumInputs<armnn::LayerType::Merger>(const armnn::Layer& layer) +{ + boost::ignore_unused(layer); + return 2; +} + +// Test that the IsLayerSupported() function returns the correct value. +// We determine the correct value by *trying* to create the relevant workload and seeing if it matches what we expect. +// Returns true if expectations are met, otherwise returns false. +template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type> +bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>) +{ + using LayerPolicy = LayerTypePolicy<Type, DataType>; + using LayerType = typename LayerPolicy::Type; + using LayerDesc = typename LayerPolicy::Desc; + DummyLayer<LayerType, LayerDesc> layer; + + unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer); + unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer); + + // Make another dummy layer just to make IsLayerSupported have valid inputs + DummyLayer<armnn::ConstantLayer, void> previousLayer; + // Set output of previous layer to a dummy tensor + armnn::TensorInfo output = MakeDummyTensorInfo<DataType>(); + previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output); + // Connect all outputs of previous layer to inputs of tested layer + for (unsigned int i = 0; i < numIn; i++) + { + armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0); + armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i); + previousLayerOutputSlot.Connect(layerInputSlot); + } + // Set outputs of tested layer to a dummy tensor + for (unsigned int i = 0; i < numOut; i++) + { + layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output); + } + + std::string layerName = LayerPolicy::NameStr; + std::string reasonIfUnsupported; + if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported)) + { + std::string errorMsg = " layer expected support but found none."; + try + { + bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr; + BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg); + return retVal; + } + catch (const armnn::InvalidArgumentException& e) + { + boost::ignore_unused(e); + // This is ok since we throw InvalidArgumentException when creating the dummy workload. + return true; + } + catch(const std::exception& e) + { + errorMsg = e.what(); + BOOST_TEST_ERROR(layerName << ": " << errorMsg); + return false; + } + catch (...) + { + errorMsg = "Unexpected error while testing support for "; + BOOST_TEST_ERROR(errorMsg << layerName); + return false; + } + } + else + { + std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some."; + try + { + bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr; + BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg); + return retVal; + } + // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them + // using parameters that make IsLayerSupported() return false should throw an + // InvalidArgumentException or UnimplementedException + catch(const armnn::InvalidArgumentException& e) + { + boost::ignore_unused(e); + return true; + } + catch (const armnn::UnimplementedException& e) + { + boost::ignore_unused(e); + return true; + } + catch(const std::exception& e) + { + errorMsg = e.what(); + BOOST_TEST_ERROR(layerName << ": " << errorMsg); + return false; + } + catch (...) + { + errorMsg = "Unexpected error while testing support for "; + BOOST_TEST_ERROR(errorMsg << layerName); + return false; + } + } +} + +// Helper function to compute the next type in the LayerType enum +constexpr armnn::LayerType NextType(armnn::LayerType type) +{ + return static_cast<armnn::LayerType>(static_cast<int>(type)+1); +} + +// Termination function for determining the end of the LayerType enumeration +template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type> +bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>) +{ + return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>()); +}; + +// Recursive function to test and entry in the LayerType enum and then iterate on the next entry. +template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type> +bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>) +{ + bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>()); + + return v && + IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)> + (factory, Tag<NextType(Type)>()); +}; + +// Helper function to pass through to the test framework. +template<typename FactoryType, armnn::DataType DataType> +bool IsLayerSupportedTests(FactoryType *factory) +{ + return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>()); +}; + +template<armnn::LayerType Type> +bool TestLayerTypeMatches() +{ + using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>; + using LayerType = typename LayerPolicy::Type; + using LayerDesc = typename LayerPolicy::Desc; + DummyLayer<LayerType, LayerDesc> layer; + + std::stringstream ss; + ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value."; + bool v = Type == layer.m_Layer->GetType(); + BOOST_CHECK_MESSAGE(v, ss.str()); + return v; +}; + +template<armnn::LayerType Type> +bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>) +{ + return TestLayerTypeMatches<Type>(); +}; + +template<armnn::LayerType Type> +bool LayerTypeMatchesTestImpl(Tag<Type>) +{ + return TestLayerTypeMatches<Type>() && + LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>()); +}; + +bool LayerTypeMatchesTest() +{ + return LayerTypeMatchesTestImpl<armnn::LayerType::FirstLayer>(Tag<armnn::LayerType::FirstLayer>()); +}; + +} //namespace diff --git a/src/armnn/backends/test/LayerTests.cpp b/src/armnn/backends/test/LayerTests.cpp new file mode 100644 index 0000000000..76681f9a93 --- /dev/null +++ b/src/armnn/backends/test/LayerTests.cpp @@ -0,0 +1,3884 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "LayerTests.hpp" + +#include "test/TensorHelpers.hpp" +#include "TensorCopyUtils.hpp" + +#include <boost/test/unit_test.hpp> + +#include "armnn/LayerSupport.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" + +#ifdef ARMCOMPUTECL_ENABLED +#include "backends/ClTensorHandle.hpp" +#include "backends/ArmComputeTensorUtils.hpp" +#endif + +#include <algorithm> +#include <boost/cast.hpp> + +#include "WorkloadTestUtils.hpp" +#include "Conv2dTestImpl.hpp" +#include "BatchNormTestImpl.hpp" +#include "ActivationTestImpl.hpp" +#include "Pooling2dTestImpl.hpp" +#include "ReshapeTestImpl.hpp" +#include "FullyConnectedTestImpl.hpp" +#include "SplitterTestImpl.hpp" +#include "SoftmaxTestImpl.hpp" +#include "NormTestImpl.hpp" +#include "PermuteTestImpl.hpp" + +// 3-channel 16x8 image used as common input data for a number of Conv2d tests +static std::vector<float> ConvInput3x8x16({ + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 +}); + +// 2-channel bias used by a number of Conv2d tests +static std::vector<float> Bias2({0, 2}); + +// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled +template<typename T> +boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset) +{ + if(biasEnabled) + { + armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>()); + boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2)); + return bias; + } + else + { + return boost::multi_array<T, 1>(); + } +} + +template<typename T> +LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset, + bool biasEnabled) +{ + // Use common single-batch 3-channel 16x8 image + armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>()); + boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16)); + + // Use a 2-element batch with 3-channel 3x5 kernels + armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>()); + boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + 1, 1, 1, + 1, -1, 1, + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 + }))); + + // Expected output is 2 batch elements of a 1-channel 14x4 image + armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>()); + boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, + -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, + -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, + -23.5f, -23.5f, -23.5f, + -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, + -23.5f, -23.5f, -23.5f, + + 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }))); + + return SimpleConvolution2dTestImpl<T>(workloadFactory, + input, + kernel, + GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset), + expectedOutput, + qScale, + qOffset); +} + +template<typename T> +LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset, + bool biasEnabled) +{ + // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path + + // Use common single-batch 3-channel 16x8 image + armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>()); + boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16)); + + // Use a 2-element batch of 3-channel 3x3 kernels + armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>()); + boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + 1, 1, 1, + 1, -1, 1, + 1, 1, 1, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 + }))); + + // Expected output is 1 batch of a 2-channel 14x6 image + armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>()); + boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, + -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, + -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f, + -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f, + -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f, + -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f, + + 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }))); + + return SimpleConvolution2dTestImpl<T>(workloadFactory, + input, + kernel, + GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset), + expectedOutput, + qScale, + qOffset); +} + +LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled) +{ + return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled); +} + +LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled) +{ + return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled); +} + +LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled) +{ + return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled); +} + +LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled) +{ + return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled); +} + +template<typename T> +LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon( + armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset) +{ + // Use a single-batch 1-channel 3x3 image as input + armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>()); + boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + 11,21,31, + 12,22,32, + 13,23,33 + }))); + + // Use 1 batch of a 1-channel 2x2 kernel + armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>()); + boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + -11,-21, + -12,-22, + }))); + +// Expected output is 1 batch of a 1-channel 6x8 image +// Manually calculated like this: +//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..] +//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..] +//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..] +//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..] +//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..] +//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..] +//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..] + armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>()); + boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + 0, 0, 0, 0, 0, 0, + -242, -594, -934, -372, 0, 0, + -495, -1190, -1850, -725, 0, 0, + -538, -1256, -1916, -748, 0, 0, + -273, -626, -946, -363, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }))); + + return SimpleConvolution2dTestImpl<T>(workloadFactory, + input, + kernel, + GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset), + expectedOutput, + qScale, + qOffset, + 1, // padding left + 2, // padding top + 3, // padding right + 4); // padding bottom +} + +template<typename T> +LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset) +{ + // Use a single-batch 1-channel 5x5 image as input + armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>()); + boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + 11,21,31,41,51, + 12,22,32,42,52, + 13,23,33,43,53, + 14,24,34,44,54, + 15,25,35,45,55, + }))); + + // Use 1 batch of a 1-channel 4x4 kernel + armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + -11,-21,-31,-41, + -12,-22,-32,-42, + -13,-23,-33,-43, + -14,-24,-34,-44, + }))); + + // Expected output is 1 batch of a 1-channel 5x5 image + armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>()); + std::vector<T> myVec(outputDesc.GetNumElements(), 0); + boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + -4723, -7044, -9324, -6253, -3542, + -7140, -10580, -13940, -9300, -5230, + -9590, -14120, -18520, -12290, -6860, + -9980, -14560, -18960, -12560, -7000, + -7518, -10904, -14144, -9318, -5152, + }))); + + return SimpleConvolution2dTestImpl<T>(workloadFactory, + input, + kernel, + GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset), + expectedOutput, + qScale, + qOffset, + 1, // padding left + 2, // padding top + 2, // padding right + 1); // padding bottom +} + +LayerTestResult<float, 4> +Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory) +{ + return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, 0.0f, 0); +} + +LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory) +{ + return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, 0.0f, 0); +} + +LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled) +{ + return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled); +} + +LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled) +{ + return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled); +} + +LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled) +{ + return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled); +} + +LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled) +{ + return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled); +} + +LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled) +{ + return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled); +} + +LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled) +{ + return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled); +} + +LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory) +{ + return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory); +} + +template<typename T> +LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory) +{ + return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory); +} + +template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>( + armnn::IWorkloadFactory&, armnn::IWorkloadFactory&); +template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>( + armnn::IWorkloadFactory&, armnn::IWorkloadFactory&); + +LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory) +{ + auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness; + auto normChannel = armnn::NormalizationAlgorithmChannel::Across; + return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod); +} + +LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory) +{ + auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness; + auto normChannel = armnn::NormalizationAlgorithmChannel::Within; + return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod); +} + +LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta) +{ + return SimpleSoftmaxTestImpl<float>(workloadFactory, beta); +} + +LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta) +{ + return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta); +} + +LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::NormalizationAlgorithmChannel normChannel, + armnn::NormalizationAlgorithmMethod normMethod) +{ + return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod); +} + +LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + float beta) +{ + return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta); +} + +LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + float beta) +{ + return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta); +} + +std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory) +{ + return SplitterTestCommon<float>(workloadFactory); +} + +std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0); +} + +LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory) +{ + return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0); +} + +LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0); +} + +LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory) +{ + unsigned int outputWidth = 5; + unsigned int outputHeight = 6; + unsigned int outputChannels = 3; + + unsigned int inputWidth1 = 2; + unsigned int inputHeight1 = 2; + unsigned int inputChannels1 = 3; + + unsigned int inputWidth2 = 2; + unsigned int inputHeight2 = 4; + unsigned int inputChannels2 = 3; + + unsigned int inputWidth3 = 3; + unsigned int inputHeight3 = 6; + unsigned int inputChannels3 = 2; + + unsigned int inputWidth4 = 3; + unsigned int inputHeight4 = 6; + unsigned int inputChannels4 = 1; + + // Define the tensor descriptors + armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32); + armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32); + armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32); + armnn::TensorInfo inputTensorInfo3({ inputChannels3, inputHeight3, inputWidth3 }, armnn::DataType::Float32); + armnn::TensorInfo inputTensorInfo4({ inputChannels4, inputHeight4, inputWidth4 }, armnn::DataType::Float32); + + LayerTestResult<float,3> ret(outputTensorInfo); + + + ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>( + { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, + 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, + 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, + 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, + 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, + + 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, + 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, + 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, + 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, + 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, + 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, + + 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, + 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, + 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, + 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, + 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, + 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, + + }) + ); + + + auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>( + { + 1.0f, 2.0f, + 6.0f, 7.0f, + + 31.0f, 32.0f, + 36.0f, 37.0f, + + 61.0f, 62.0f, + 66.0f, 67.0f, + }) + ); + + auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>( + { + 11.0f, 12.0f, + 16.0f, 17.0f, + 21.0f, 22.0f, + 26.0f, 27.0f, + + 41.0f, 42.0f, + 46.0f, 47.0f, + 51.0f, 52.0f, + 56.0f, 57.0f, + + 71.0f, 72.0f, + 76.0f, 77.0f, + 81.0f, 82.0f, + 86.0f, 87.0f, + }) + ); + + auto input3 = MakeTensor<float, 3>(inputTensorInfo3, std::vector<float>( + { + 3.0f, 4.0f, 5.0f, + 8.0f, 9.0f, 10.0f, + 13.0f, 14.0f, 15.0f, + 18.0f, 19.0f, 20.0f, + 23.0f, 24.0f, 25.0f, + 28.0f, 29.0f, 30.0f, + + 33.0f, 34.0f, 35.0f, + 38.0f, 39.0f, 40.0f, + 43.0f, 44.0f, 45.0f, + 48.0f, 49.0f, 50.0f, + 53.0f, 54.0f, 55.0f, + 58.0f, 59.0f, 60.0f, + }) + ); + + + auto input4 = MakeTensor<float, 3>(inputTensorInfo4, std::vector<float>( + { + 63.0f, 64.0f, 65.0f, + 68.0f, 69.0f, 70.0f, + 73.0f, 74.0f, 75.0f, + 78.0f, 79.0f, 80.0f, + 83.0f, 84.0f, 85.0f, + 88.0f, 89.0f, 90.0f, + }) + ); + + std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //extent of the window is defined by size of input[0] + armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1); + + std::vector<unsigned int> wOrigin2 = {0, 2, 0}; //extent of the window is defined by size of input[1] + armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2); + + std::vector<unsigned int> wOrigin3 = {0, 0, 2}; //extent of the window is defined by size of input[2] + armnn::MergerQueueDescriptor::ViewOrigin window3(wOrigin3); + + std::vector<unsigned int> wOrigin4 = {2, 0, 2}; //extent of the window is defined by size of input[3] + armnn::MergerQueueDescriptor::ViewOrigin window4(wOrigin4); + + + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + bool subTensorsSupported = workloadFactory.SupportsSubTensors(); + + std::unique_ptr<armnn::ITensorHandle> inputHandle1 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) : + workloadFactory.CreateTensorHandle(inputTensorInfo1); + + std::unique_ptr<armnn::ITensorHandle> inputHandle2 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) : + workloadFactory.CreateTensorHandle(inputTensorInfo2); + + std::unique_ptr<armnn::ITensorHandle> inputHandle3 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo3.GetShape(), wOrigin3.data()) : + workloadFactory.CreateTensorHandle(inputTensorInfo3); + + std::unique_ptr<armnn::ITensorHandle> inputHandle4 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo4.GetShape(), wOrigin4.data()) : + workloadFactory.CreateTensorHandle(inputTensorInfo4); + + + armnn::MergerQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); + AddInputToWorkload(data, info, inputTensorInfo3, inputHandle3.get()); + AddInputToWorkload(data, info, inputTensorInfo4, inputHandle4.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + data.m_ViewOrigins.push_back(window1); + data.m_ViewOrigins.push_back(window2); + data.m_ViewOrigins.push_back(window3); + data.m_ViewOrigins.push_back(window4); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info); + + inputHandle1->Allocate(); + inputHandle2->Allocate(); + inputHandle3->Allocate(); + inputHandle4->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]); + CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]); + CopyDataToITensorHandle(inputHandle3.get(), &input3[0][0][0]); + CopyDataToITensorHandle(inputHandle4.get(), &input4[0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get()); + + return ret; +} + +LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory) +{ + unsigned int batchSize = 2; + unsigned int channels = 2; + unsigned int height = 2; + unsigned int width = 3; + + armnn::TensorInfo inputTensorInfo1, inputTensorInfo2; + armnn::TensorInfo outputTensorInfo; + + unsigned int shape[] = {batchSize, channels, height, width}; + + inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + + + auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>( + { + 0.0f, 2.0f, 1.0f, + 0.2f, 1.0f, 2.0f, + + 1.0f, 2.0f, 1.0f, + 0.2f, 1.0f, 2.0f, + + 0.0f, 2.0f, 1.0f, + 4.2f, 1.0f, 2.0f, + + 0.0f, 0.0f, 1.0f, + 0.2f, 1.0f, 2.0f, + })); + + auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>( + { + 1.0f, 2.0f, 1.0f, + 0.0f, 1.0f, 2.0f, + + 1.0f, 2.0f, -2.0f, + 0.2f, 1.0f, 2.0f, + + 0.0f, 2.0f, 1.0f, + 4.2f, 0.0f, -3.0f, + + 0.0f, 0.0f, 1.0f, + 0.7f, 1.0f, 5.0f, + })); + + LayerTestResult<float,4> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>( + { + 1.0f, 4.0f, 2.0f, + 0.2f, 2.0f, 4.0f, + + 2.0f, 4.0f, -1.0f, + 0.4f, 2.0f, 4.0f, + + 0.0f, 4.0f, 2.0f, + 8.4f, 1.0f, -1.0f, + + 0.0f, 0.0f, 2.0f, + 0.9f, 2.0f, 7.0f, + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::AdditionQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info); + + inputHandle1->Allocate(); + inputHandle2->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); + CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + +template <typename T> +LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset) +{ + armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>()); + armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>()); + + if (armnn::IsQuantizedType<T>()) + { + inputTensorInfo1.SetQuantizationScale(qScale); + inputTensorInfo1.SetQuantizationOffset(qOffset); + inputTensorInfo2.SetQuantizationScale(qScale); + inputTensorInfo2.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, + { + 0.0f, + 1.0f, + + 2.0f, + 3.0f, + + 4.0f, + 5.0f, + })); + + auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset, + { + 0.5f, 1.5f, 2.5f, + 3.5f, 4.5f, 5.5f, + })); + + LayerTestResult<T,4> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, + { + 0.5f, 1.5f, 2.5f, + 4.5f, 5.5f, 6.5f, + + 2.5f, 3.5f, 4.5f, + 6.5f, 7.5f, 8.5f, + + 4.5f, 5.5f, 6.5f, + 8.5f, 9.5f, 10.5f, + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::AdditionQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info); + + inputHandle1->Allocate(); + inputHandle2->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); + CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + +template <typename T> +LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset) +{ + armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>()); + armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>()); + + if (armnn::IsQuantizedType<T>()) + { + inputTensorInfo1.SetQuantizationScale(qScale); + inputTensorInfo1.SetQuantizationOffset(qOffset); + inputTensorInfo2.SetQuantizationScale(qScale); + inputTensorInfo2.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, + { + 0.0f, 1.0f, 2.0f, + 3.0f, 4.0f, 5.0f, + 6.0f, 7.0f, 8.0f, + 9.0f, 10.0f, 11.0f, + 12.0f, 13.0f, 14.0f, + 15.0f, 16.0f, 17.0f, + })); + + auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset, + { + 0.5f, + })); + + LayerTestResult<T,4> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, + { + 0.5f, 1.5f, 2.5f, + 3.5f, 4.5f, 5.5f, + 6.5f, 7.5f, 8.5f, + 9.5f, 10.5f, 11.5f, + 12.5f, 13.5f, 14.5f, + 15.5f, 16.5f, 17.5f, + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::AdditionQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info); + + inputHandle1->Allocate(); + inputHandle2->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); + CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + +LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory) +{ + return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0); +} + +LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0); +} + +LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory) +{ + return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0); +} + +LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128); +} + +LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory) +{ + unsigned int batchSize = 4; + unsigned int channels = 1; + unsigned int height = 2; + unsigned int width = 3; + + armnn::TensorInfo inputTensorInfo1, inputTensorInfo2; + armnn::TensorInfo outputTensorInfo; + + unsigned int shape[] = {batchSize, channels, height, width}; + + inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + + auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232); + auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456); + + LayerTestResult<float,4> ret(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2); + std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::AdditionQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + armnn::AdditionQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get()); + SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get()); + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info); + std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo); + + inputHandle1->Allocate(); + inputHandle2->Allocate(); + outputHandle->Allocate(); + inputHandle1Ref->Allocate(); + inputHandle2Ref->Allocate(); + outputHandleRef->Allocate(); + + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); + CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]); + CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]); + CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]); + + workload->Execute(); + workloadRef->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + + return ret; +} + +LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory) +{ + const unsigned int width = 2; + const unsigned int height = 2; + const unsigned int channelCount = 2; + const unsigned int batchSize = 2; + + armnn::TensorInfo inputTensorInfo0; + armnn::TensorInfo inputTensorInfo1; + armnn::TensorInfo outputTensorInfo; + + constexpr unsigned int shape[] = { batchSize, channelCount, height, width }; + constexpr std::size_t dimensionCount = std::extent<decltype(shape)>::value; + + inputTensorInfo0 = armnn::TensorInfo(dimensionCount, shape, armnn::DataType::Float32); + inputTensorInfo1 = armnn::TensorInfo(dimensionCount, shape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(dimensionCount, shape, armnn::DataType::Float32); + + auto input0 = MakeTensor<float, 4>(inputTensorInfo0, std::vector<float>({ + 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4 })); + + auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>({ + 2, 2, 2, 2, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5 })); + + LayerTestResult<float,4> ret(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0); + std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::MultiplicationQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get()); + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info); + + inputHandle0->Allocate(); + inputHandle1->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({ + 2, 2, 2, 2, 6, 6, 6, 6, + 12, 12, 12, 12, 20, 20, 20, 20 })); + + return ret; +} + +LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory) +{ + const unsigned int width = 16; + const unsigned int height = 32; + const unsigned int channelCount = 2; + const unsigned int batchSize = 5; + + armnn::TensorInfo inputTensorInfo0; + armnn::TensorInfo inputTensorInfo1; + armnn::TensorInfo outputTensorInfo; + + constexpr unsigned int shape[] = { batchSize, channelCount, height, width }; + + inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + + LayerTestResult<float,4> comparisonResult(outputTensorInfo); + + auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992); + auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257); + + std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0); + std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0); + std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::MultiplicationQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get()); + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + armnn::MultiplicationQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get()); + SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get()); + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info); + std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo); + + inputHandle0->Allocate(); + inputHandle1->Allocate(); + outputHandle->Allocate(); + inputHandle0Ref->Allocate(); + inputHandle1Ref->Allocate(); + outputHandleRef->Allocate(); + + CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); + CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]); + CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]); + + workload->Execute(); + workloadRef->Execute(); + + CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get()); + + return comparisonResult; +} + +LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory) +{ + const unsigned int width = 2; + const unsigned int height = 3; + const unsigned int channels = 5; + const unsigned int batchSize = 3; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + armnn::TensorInfo tensorInfo; + + constexpr unsigned int shape[] = {batchSize, channels, height, width}; + constexpr unsigned int tensorShape[] = {channels}; + + inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32); + + auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312); + + auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123); + auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f); + auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123); + auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345); + + LayerTestResult<float,4> ret(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::BatchNormalizationQueueDescriptor data; + armnn::WorkloadInfo info; + armnn::ScopedCpuTensorHandle meanTensor(tensorInfo); + armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo); + armnn::ScopedCpuTensorHandle betaTensor(tensorInfo); + armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo); + + AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]); + AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]); + AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]); + AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]); + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Mean = &meanTensor; + data.m_Variance = &varianceTensor; + data.m_Beta = &betaTensor; + data.m_Gamma = &gammaTensor; + data.m_Parameters.m_Eps = 0.01f; + + armnn::BatchNormalizationQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info); + std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + inputHandleRef->Allocate(); + outputHandleRef->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + + workload->Execute(); + workloadRef->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + + return ret; +} + +void Concatenate(armnn::IWorkloadFactory& workloadFactory, + std::initializer_list<const armnn::TensorInfo> inputTensorInfos, + std::initializer_list<void*> inputs, + const armnn::TensorInfo& outputTensorInfo, + void* output, + unsigned int concatDim) +{ + armnn::MergerQueueDescriptor queueDescriptor; + + std::vector<armnn::TensorShape> shapes; + shapes.reserve(inputTensorInfos.size()); + for (const armnn::TensorInfo& it: inputTensorInfos) + { + shapes.push_back(it.GetShape()); + } + armnn::OriginsDescriptor viewsDescriptor = armnn::CreateMergerDescriptorForConcatenation(shapes.begin(), + shapes.end(), concatDim); + + queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews()); + for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i) + { + queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i), + viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions())); + } + + const size_t inputCount = inputTensorInfos.size(); + + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles; + inputHandles.reserve(inputCount); + + const bool subTensorsSupported = workloadFactory.SupportsSubTensors(); + for (unsigned int i = 0; i < inputCount; ++i) + { + const armnn::TensorInfo& inputTensorInfo = inputTensorInfos.begin()[i]; + + std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(), + queueDescriptor.m_ViewOrigins[i].m_Origin.data()) + : workloadFactory.CreateTensorHandle(inputTensorInfo); + + inputHandles.emplace_back(std::move(inputHandle)); + } + + armnn::WorkloadInfo workloadInfo; + + for (unsigned int i = 0; i < inputCount; ++i) + { + AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos.begin()[i], inputHandles[i].get()); + } + + AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo); + + for (auto& inputHandle : inputHandles) + { + inputHandle->Allocate(); + } + + outputHandle->Allocate(); + + unsigned int nextInputId = 0; + for (auto& inputHandle : inputHandles) + { + CopyDataToITensorHandle(inputHandle.get(), *(inputs.begin() + nextInputId++)); + } + + workload->Execute(); + + CopyDataFromITensorHandle(output, outputHandle.get()); +} + +template <typename T> +LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset) +{ + armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>()); + + auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f })); + auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f })); + auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f })); + + armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>()); + + LayerTestResult<T, 1> result(outputTensorInfo); + + std::vector<T> output; + output.resize(outputTensorInfo.GetNumElements()); + Concatenate(workloadFactory, + { inputTensorInfo, inputTensorInfo, inputTensorInfo }, + { input0.data(), input1.data(), input2.data() }, + outputTensorInfo, + output.data(), + 0); + + result.output = MakeTensor<T, 1>(outputTensorInfo, output); + result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f + })); + + return result; +} + +LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0); +} + +template <typename T> +LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory, + const armnn::TensorInfo& outputTensorInfo, + unsigned int dimension, + const float qScale, + const int32_t qOffset) +{ + armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>()); + + auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 1.0f, 2.0f, 3.0f, + + // Batch 1 + 10.0f, 11.0f, 12.0f, + })); + + auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 4.0f, 5.0f, 6.0f, + + // Batch 1 + 13.0f, 14.0f, 15.0f, + })); + + auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 7.0f, 8.0f, 9.0f, + + // Batch 1 + 16.0f, 17.0f, 18.0f, + })); + + LayerTestResult<T, 2> result(outputTensorInfo); + + std::vector<T> output; + output.resize(outputTensorInfo.GetNumElements()); + Concatenate(workloadFactory, + { inputTensorInfo, inputTensorInfo, inputTensorInfo }, + { input0.data(), input1.data(), input2.data() }, + outputTensorInfo, + output.data(), + dimension); + + result.output = MakeTensor<T, 2>(outputTensorInfo, output); + return result; +} + +template <typename T> +LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, int32_t qOffset) +{ + armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>()); + + LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset); + result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 1.0f, 2.0f, 3.0f, + + // Batch 1 + 10.0f, 11.0f, 12.0f, + + // Batch 2 + 4.0f, 5.0f, 6.0f, + + // Batch 3 + 13.0f, 14.0f, 15.0f, + + // Batch 4 + 7.0f, 8.0f, 9.0f, + + // Batch 5 + 16.0f, 17.0f, 18.0f, + })); + + return result; +} + +LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0); +} + +template <typename T> +LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, int32_t qOffset) +{ + armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>()); + + LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset); + result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, + + // Batch 1 + 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f + })); + + return result; +} + +LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0); +} + +template <typename T> +LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, + int32_t qOffset) +{ + armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>()); + auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 1.0f, 2.0f, 3.0f, + + // Batch 1 + 10.0f, 11.0f, 12.0f, + })); + + armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>()); + auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 4.0f, 5.0f, 6.0f, + + // Batch 1 + 13.0f, 14.0f, 15.0f, + + // Batch 0 + 7.0f, 8.0f, 9.0f, + })); + + armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>()); + auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 1 + 16.0f, 17.0f, 18.0f, + })); + + armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>()); + LayerTestResult<T, 2> result(outputTensorInfo); + + std::vector<T> output; + output.resize(outputTensorInfo.GetNumElements()); + Concatenate(workloadFactory, + { input0TensorInfo, input1TensorInfo, input2TensorInfo }, + { input0.data(), input1.data(), input2.data() }, + outputTensorInfo, + output.data(), + 0); + + result.output = MakeTensor<T, 2>(outputTensorInfo, output); + result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 1.0f, 2.0f, 3.0f, + + // Batch 1 + 10.0f, 11.0f, 12.0f, + + // Batch 2 + 4.0f, 5.0f, 6.0f, + + // Batch 3 + 13.0f, 14.0f, 15.0f, + + // Batch 4 + 7.0f, 8.0f, 9.0f, + + // Batch 5 + 16.0f, 17.0f, 18.0f, + })); + + return result; +} + +LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0); +} + +template <typename T> +LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, + int32_t qOffset) +{ + armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>()); + auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 1.0f, 2.0f, 3.0f, + + // Batch 1 + 10.0f, 11.0f, 12.0f, + })); + + armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>()); + auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, + + // Batch 1 + 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, + })); + + armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>()); + auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 9.0f, + + // Batch 1 + 18.0f + })); + + armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>()); + LayerTestResult<T, 2> result(outputTensorInfo); + + std::vector<T> output; + output.resize(outputTensorInfo.GetNumElements()); + Concatenate(workloadFactory, + { input0TensorInfo, input1TensorInfo, input2TensorInfo }, + { input0.data(), input1.data(), input2.data() }, + outputTensorInfo, + output.data(), + 1); + + result.output = MakeTensor<T, 2>(outputTensorInfo, output); + result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0 + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, + + // Batch 1 + 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, + })); + + return result; +} + +LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0); +} + +template <typename T> +LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory, + const armnn::TensorInfo& outputTensorInfo, + unsigned int dimension, + float qScale, + int32_t qOffset) +{ + armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>()); + + auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 1.0f, 2.0f, + + // Batch 0, Channel 1 + 3.0f, 4.0f, + + // Batch 0, Channel 2 + 5.0f, 6.0f, + + // Batch 1, Channel 0 + 19.0f, 20.0f, + + // Batch 1, Channel 1 + 21.0f, 22.0f, + + // Batch 1, Channel 2 + 23.0f, 24.0f + })); + + auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 7.0f, 8.0f, + + // Batch 0, Channel 1 + 9.0f, 10.0f, + + // Batch 0, Channel 2 + 11.0f, 12.0f, + + // Batch 1, Channel 0 + 25.0f, 26.0f, + + // Batch 1, Channel 1 + 27.0f, 28.0f, + + // Batch 1, Channel 2 + 29.0f, 30.0f + })); + + auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 13.0f, 14.0f, + + // Batch 0, Channel 1 + 15.0f, 16.0f, + + // Batch 0, Channel 2 + 17.0f, 18.0f, + + // Batch 1, Channel 0 + 31.0f, 32.0f, + + // Batch 1, Channel 1 + 33.0f, 34.0f, + + // Batch 1, Channel 2 + 35.0f, 36.0f + })); + + LayerTestResult<T, 3> result(outputTensorInfo); + + std::vector<T> output; + output.resize(outputTensorInfo.GetNumElements()); + Concatenate(workloadFactory, + { inputTensorInfo, inputTensorInfo, inputTensorInfo }, + { input0.data(), input1.data(), input2.data() }, + outputTensorInfo, + output.data(), + dimension); + + result.output = MakeTensor<T, 3>(outputTensorInfo, output); + return result; +} + +template <typename T> +LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, + int32_t qOffset) +{ + armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>()); + + LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0, + qScale, qOffset); + result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 1.0f, 2.0f, + + // Batch 0, Channel 1 + 3.0f, 4.0f, + + // Batch 0, Channel 2 + 5.0f, 6.0f, + + // Batch 1, Channel 0 + 19.0f, 20.0f, + + // Batch 1, Channel 1 + 21.0f, 22.0f, + + // Batch 1, Channel 2 + 23.0f, 24.0f, + + // Batch 2, Channel 0 + 7.0f, 8.0f, + + // Batch 2, Channel 1 + 9.0f, 10.0f, + + // Batch 2, Channel 2 + 11.0f, 12.0f, + + // Batch 3, Channel 0 + 25.0f, 26.0f, + + // Batch 3, Channel 1 + 27.0f, 28.0f, + + // Batch 3, Channel 2 + 29.0f, 30.0f, + + // Batch 4, Channel 0 + 13.0f, 14.0f, + + // Batch 4, Channel 1 + 15.0f, 16.0f, + + // Batch 4, Channel 2 + 17.0f, 18.0f, + + // Batch 5, Channel 0 + 31.0f, 32.0f, + + // Batch 5, Channel 1 + 33.0f, 34.0f, + + // Batch 5, Channel 2 + 35.0f, 36.0f + })); + return result; +} + +LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0); +} + +template <typename T> +LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, int32_t qOffset) +{ + armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>()); + + LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset); + result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 1.0f, 2.0f, + + // Batch 0, Channel 1 + 3.0f, 4.0f, + + // Batch 0, Channel 2 + 5.0f, 6.0f, + + // Batch 0, Channel 3 + 7.0f, 8.0f, + + // Batch 0, Channel 4 + 9.0f, 10.0f, + + // Batch 0, Channel 5 + 11.0f, 12.0f, + + // Batch 0, Channel 6 + 13.0f, 14.0f, + + // Batch 0, Channel 7 + 15.0f, 16.0f, + + // Batch 0, Channel 8 + 17.0f, 18.0f, + + // Batch 1, Channel 0 + 19.0f, 20.0f, + + // Batch 1, Channel 1 + 21.0f, 22.0f, + + // Batch 1, Channel 2 + 23.0f, 24.0f, + + // Batch 1, Channel 3 + 25.0f, 26.0f, + + // Batch 1, Channel 4 + 27.0f, 28.0f, + + // Batch 1, Channel 5 + 29.0f, 30.0f, + + // Batch 1, Channel 6 + 31.0f, 32.0f, + + // Batch 1, Channel 7 + 33.0f, 34.0f, + + // Batch 1, Channel 8 + 35.0f, 36.0f + })); + + return result; +} + +LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0); +} + +template <typename T> +LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, int32_t qOffset) +{ + armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>()); + + LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset); + result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f, + + // Batch 0, Channel 1 + 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f, + + // Batch 0, Channel 2 + 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f, + + // Batch 1, Channel 0 + 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f, + + // Batch 1, Channel 1 + 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f, + + // Batch 1, Channel 2 + 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f, + })); + + return result; +} + +LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0); +} + +template <typename T> +LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, + int32_t qOffset) +{ + armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>()); + auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 1.0f, 2.0f, + + // Batch 0, Channel 1 + 3.0f, 4.0f, + + // Batch 0, Channel 2 + 5.0f, 6.0f, + + // Batch 1, Channel 0 + 19.0f, 20.0f, + + // Batch 1, Channel 1 + 21.0f, 22.0f, + + // Batch 1, Channel 2 + 23.0f, 24.0f + })); + + armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>()); + auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 7.0f, 8.0f, + + // Batch 0, Channel 1 + 9.0f, 10.0f, + + // Batch 0, Channel 2 + 11.0f, 12.0f, + })); + + armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>()); + auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 25.0f, 26.0f, + + // Batch 0, Channel 1 + 27.0f, 28.0f, + + // Batch 0, Channel 2 + 29.0f, 30.0f, + + // Batch 1, Channel 0 + 13.0f, 14.0f, + + // Batch 1, Channel 1 + 15.0f, 16.0f, + + // Batch 1, Channel 2 + 17.0f, 18.0f, + + // Batch 2, Channel 0 + 31.0f, 32.0f, + + // Batch 2, Channel 1 + 33.0f, 34.0f, + + // Batch 2, Channel 2 + 35.0f, 36.0f + })); + + armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>()); + LayerTestResult<T, 3> result(outputTensorInfo); + + std::vector<T> output; + output.resize(outputTensorInfo.GetNumElements()); + Concatenate(workloadFactory, + { input0TensorInfo, input1TensorInfo, input2TensorInfo }, + { input0.data(), input1.data(), input2.data() }, + outputTensorInfo, + output.data(), + 0); + + result.output = MakeTensor<T, 3>(outputTensorInfo, output); + result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 1.0f, 2.0f, + + // Batch 0, Channel 1 + 3.0f, 4.0f, + + // Batch 0, Channel 2 + 5.0f, 6.0f, + + // Batch 1, Channel 0 + 19.0f, 20.0f, + + // Batch 1, Channel 1 + 21.0f, 22.0f, + + // Batch 1, Channel 2 + 23.0f, 24.0f, + + // Batch 2, Channel 0 + 7.0f, 8.0f, + + // Batch 2, Channel 1 + 9.0f, 10.0f, + + // Batch 2, Channel 2 + 11.0f, 12.0f, + + // Batch 3, Channel 0 + 25.0f, 26.0f, + + // Batch 3, Channel 1 + 27.0f, 28.0f, + + // Batch 3, Channel 2 + 29.0f, 30.0f, + + // Batch 4, Channel 0 + 13.0f, 14.0f, + + // Batch 4, Channel 1 + 15.0f, 16.0f, + + // Batch 4, Channel 2 + 17.0f, 18.0f, + + // Batch 5, Channel 0 + 31.0f, 32.0f, + + // Batch 5, Channel 1 + 33.0f, 34.0f, + + // Batch 5, Channel 2 + 35.0f, 36.0f + })); + + return result; +} + +LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0); +} + +template <typename T> +LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, + int32_t qOffset) +{ + armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>()); + auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 1.0f, 2.0f, + + // Batch 0, Channel 1 + 3.0f, 4.0f, + + // Batch 0, Channel 2 + 5.0f, 6.0f, + + // Batch 1, Channel 0 + 19.0f, 20.0f, + + // Batch 1, Channel 1 + 21.0f, 22.0f, + + // Batch 1, Channel 2 + 23.0f, 24.0f + })); + + armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>()); + auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 7.0f, 8.0f, + + // Batch 0, Channel 1 + 9.0f, 10.0f, + + // Batch 0, Channel 2 + 11.0f, 12.0f, + + // Batch 0, Channel 3 + 25.0f, 26.0f, + + // Batch 1, Channel 0 + 27.0f, 28.0f, + + // Batch 1, Channel 1 + 29.0f, 30.0f, + + // Batch 1, Channel 2 + 13.0f, 14.0f, + + // Batch 1, Channel 3 + 15.0f, 16.0f, + })); + + armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>()); + auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 17.0f, 18.0f, + + // Batch 1, Channel 0 + 31.0f, 32.0f, + })); + + armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>()); + LayerTestResult<T, 3> result(outputTensorInfo); + + std::vector<T> output; + output.resize(outputTensorInfo.GetNumElements()); + Concatenate(workloadFactory, + { input0TensorInfo, input1TensorInfo, input2TensorInfo }, + { input0.data(), input1.data(), input2.data() }, + outputTensorInfo, + output.data(), + 1); + + result.output = MakeTensor<T, 3>(outputTensorInfo, output); + result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 1.0f, 2.0f, + + // Batch 0, Channel 1 + 3.0f, 4.0f, + + // Batch 0, Channel 2 + 5.0f, 6.0f, + + // Batch 0, Channel 3 + 7.0f, 8.0f, + + // Batch 0, Channel 4 + 9.0f, 10.0f, + + // Batch 0, Channel 5 + 11.0f, 12.0f, + + // Batch 0, Channel 6 + 25.0f, 26.0f, + + // Batch 0, Channel 7 + 17.0f, 18.0f, + + // Batch 1, Channel 0 + 19.0f, 20.0f, + + // Batch 1, Channel 1 + 21.0f, 22.0f, + + // Batch 1, Channel 2 + 23.0f, 24.0f, + + // Batch 1, Channel 3 + 27.0f, 28.0f, + + // Batch 1, Channel 4 + 29.0f, 30.0f, + + // Batch 1, Channel 5 + 13.0f, 14.0f, + + // Batch 1, Channel 6 + 15.0f, 16.0f, + + // Batch 1, Channel 7 + 31.0f, 32.0f, + })); + + return result; +} + +LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0); +} + +template <typename T> +LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, + int32_t qOffset) +{ + armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>()); + auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 1.0f, 2.0f, + + // Batch 0, Channel 1 + 3.0f, 4.0f, + + // Batch 0, Channel 2 + 5.0f, 6.0f, + + // Batch 1, Channel 0 + 19.0f, 20.0f, + + // Batch 1, Channel 1 + 21.0f, 22.0f, + + // Batch 1, Channel 2 + 23.0f, 24.0f + })); + + armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>()); + auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 7.0f, + + // Batch 0, Channel 1 + 9.0f, + + // Batch 0, Channel 2 + 11.0f, + + // Batch 1, Channel 0 + 25.0f, + + // Batch 1, Channel 1 + 27.0f, + + // Batch 1, Channel 2 + 29.0f + })); + + armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>()); + auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 13.0f, 14.0f, 50.0f, + + // Batch 0, Channel 1 + 15.0f, 16.0f, 51.0f, + + // Batch 0, Channel 2 + 17.0f, 18.0f, 52.0f, + + // Batch 1, Channel 0 + 31.0f, 32.0f, 53.0f, + + // Batch 1, Channel 1 + 33.0f, 34.0f, 54.0f, + + // Batch 1, Channel 2 + 35.0f, 36.0f, 55.0f, + })); + + armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>()); + LayerTestResult<T, 3> result(outputTensorInfo); + + std::vector<T> output; + output.resize(outputTensorInfo.GetNumElements()); + Concatenate(workloadFactory, + { input0TensorInfo, input1TensorInfo, input2TensorInfo }, + { input0.data(), input1.data(), input2.data() }, + outputTensorInfo, + output.data(), + 2); + + result.output = MakeTensor<T, 3>(outputTensorInfo, output); + result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f, + + // Batch 0, Channel 1 + 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f, + + // Batch 0, Channel 2 + 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f, + + // Batch 1, Channel 0 + 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f, + + // Batch 1, Channel 1 + 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f, + + // Batch 1, Channel 2 + 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f, + })); + + return result; +} + +LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0); +} + +LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 4; + constexpr unsigned int inputHeight = 4; + constexpr unsigned int inputChannels = 1; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = inputWidth; + constexpr unsigned int outputHeight = inputHeight; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::Float32); + + auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + 1.0f, 2.0f, 3.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 5.0f, + 3.0f, 4.0f, 5.0f, 6.0f, + 4.0f, 5.0f, 6.0f, 7.0f + })); + + LayerTestResult<float, 4> result(outputTensorInfo); + result.outputExpected = input; + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 2; + constexpr unsigned int inputHeight = 2; + constexpr unsigned int inputChannels = 1; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = inputWidth / 2; + constexpr unsigned int outputHeight = inputHeight / 2; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::Float32); + + auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + 1.0f, 255.0f, + 200.0f, 250.f, + })); + + // The 'resize bilinear' operation projects the top-left corner of output texels into the input image, + // then figures out the interpolants and weights. Note this is different to projecting the centre of the + // output texel - and thus we'll expect the output 1x1 matrix to contain as its single element the value + // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting + // the centre). + LayerTestResult<float, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({ + 1.0f + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 4; + constexpr unsigned int inputHeight = 4; + constexpr unsigned int inputChannels = 1; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = inputWidth / 2; + constexpr unsigned int outputHeight = inputHeight / 2; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::Float32); + + auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + 1.0f, 2.0f, 3.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 5.0f, + 3.0f, 4.0f, 5.0f, 6.0f, + 4.0f, 5.0f, 6.0f, 7.0f + })); + + LayerTestResult<float, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({ + 1.f, 3.f, + 3.f, 5.f + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 5; + constexpr unsigned int inputHeight = 3; + constexpr unsigned int inputChannels = 1; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = 3; + constexpr unsigned int outputHeight = 2; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::Float32); + + auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + 1.0f, 2.0f, 3.0f, 5.0f, 8.0f, + 13.0f, 21.0f, 34.0f, 55.0f, 89.0f, + 144.0f, 233.0f, 377.0f, 610.0f, 987.0f + })); + + LayerTestResult<float, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({ + 1.0f, 2.6666f, 6.0f, + 78.5f, 179.3333f, 401.f + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 2; + constexpr unsigned int inputHeight = 3; + constexpr unsigned int inputChannels = 1; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = 5; + constexpr unsigned int outputHeight = 3; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::Float32); + + auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + 1.0f, 2.0f, + 13.0f, 21.0f, + 144.0f, 233.0f + })); + + LayerTestResult<float, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({ + 1.0f, 1.4f, 1.8f, 2.f, 2.f, + 13.f, 16.2f, 19.4f, 21.f, 21.f, + 144.f, 179.6f, 215.2f, 233.f, 233.f + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int width = 2; + constexpr unsigned int height = 3; + + const armnn::TensorInfo tensorInfo({height, width }, + armnn::DataType::Float32); + auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({ + -10.0f, -5.0f, + 0.0f, 5.0f, + 10.0f, 10.0f + })); + + LayerTestResult<float, 2> ret(tensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo); + + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo); + + armnn::FakeQuantizationQueueDescriptor data; + armnn::WorkloadInfo info; + + AddInputToWorkload(data, info, tensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, tensorInfo, outputHandle.get()); + float min = -10.f; + float max = 10.f; + + data.m_Parameters.m_Min = min; + data.m_Parameters.m_Max = max; + + armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]); + armnn::FakeQuantizationQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + + ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({ + 0.0f, 63.0f, + 128.0f, 191.0f, + 255.0f, 255.0f + })); + return ret; +} + +LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 1; + constexpr unsigned int inputHeight = 1; + constexpr unsigned int inputChannels = 10; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = inputWidth; + constexpr unsigned int outputHeight = inputHeight; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::Float32); + + auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f + })); + + const float approxInvL2Norm = 0.050964719f; + LayerTestResult<float, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + 1.0f * approxInvL2Norm, + 2.0f * approxInvL2Norm, + 3.0f * approxInvL2Norm, + 4.0f * approxInvL2Norm, + 5.0f * approxInvL2Norm, + 6.0f * approxInvL2Norm, + 7.0f * approxInvL2Norm, + 8.0f * approxInvL2Norm, + 9.0f * approxInvL2Norm, + 10.0f * approxInvL2Norm + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::L2NormalizationQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +namespace +{ + +float CalcInvL2Norm(std::initializer_list<float> elements) +{ + const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f, + [](float acc, float element) { return acc + element * element; }); + return 1.0f / sqrtf(reduction); +} + +} + +LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 5; + constexpr unsigned int inputHeight = 1; + constexpr unsigned int inputChannels = 2; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = inputWidth; + constexpr unsigned int outputHeight = inputHeight; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::Float32); + + auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + 1.0f, 3.0f, 5.0f, 7.0f, 9.0f, + 2.0f, 4.0f, 6.0f, 8.0f, 10.0f + })); + + LayerTestResult<float, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }), + 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }), + 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }), + 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }), + 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }), + + 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }), + 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }), + 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }), + 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }), + 10.0f * CalcInvL2Norm({ 9.0f, 10.0f }) + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::L2NormalizationQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 3; + constexpr unsigned int inputHeight = 4; + constexpr unsigned int inputChannels = 2; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = inputWidth; + constexpr unsigned int outputHeight = inputHeight; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::Float32); + + auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + // Channel 0 + 119.0f, 21.0f, 150.0f, + 149.0f, 32.0f, 179.0f, + 15.0f, 227.0f, 141.0f, + 147.0f, 199.0f, 220.0f, + + // Channel 1 + 110.0f, 140.0f, 73.0f, + 211.0f, 212.0f, 89.0f, + 24.0f, 138.0f, 188.0f, + 162.0f, 12.0f, 161.0f, + })); + + LayerTestResult<float, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }), + 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }), + 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }), + 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }), + 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }), + 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }), + 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }), + 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }), + 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }), + 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }), + 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }), + 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }), + + 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }), + 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }), + 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }), + 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }), + 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }), + 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }), + 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }), + 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }), + 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }), + 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }), + 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }), + 161.0f * CalcInvL2Norm({ 220.0f, 161.0f }), + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::L2NormalizationQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 3; + constexpr unsigned int inputHeight = 4; + constexpr unsigned int inputChannels = 3; + constexpr unsigned int inputBatchSize = 2; + + constexpr unsigned int outputWidth = inputWidth; + constexpr unsigned int outputHeight = inputHeight; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::Float32); + + auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + // Batch 0, Channel 0 + 235.0f, 46.0f, 178.0f, + 100.0f, 123.0f, 19.0f, + 172.0f, 74.0f, 250.0f, + 6.0f, 195.0f, 80.0f, + + // Batch 0, Channel 1 + 113.0f, 95.0f, 202.0f, + 77.0f, 114.0f, 71.0f, + 122.0f, 246.0f, 166.0f, + 82.0f, 28.0f, 37.0f, + + // Batch 0, Channel 2 + 56.0f, 170.0f, 162.0f, + 194.0f, 89.0f, 254.0f, + 12.0f, 209.0f, 200.0f, + 1.0f, 64.0f, 54.0f, + + // Batch 1, Channel 0 + 67.0f, 90.0f, 49.0f, + 7.0f, 163.0f, 18.0f, + 25.0f, 117.0f, 103.0f, + 247.0f, 59.0f, 189.0f, + + // Batch 1, Channel 1 + 239.0f, 104.0f, 199.0f, + 17.0f, 124.0f, 153.0f, + 222.0f, 217.0f, 75.0f, + 32.0f, 126.0f, 21.0f, + + // Batch 1, Channel 2 + 97.0f, 145.0f, 215.0f, + 115.0f, 116.0f, 238.0f, + 226.0f, 16.0f, 132.0f, + 92.0f, 125.0f, 88.0f, + })); + + LayerTestResult<float, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + + // Batch 0, Channel 0 + 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }), + 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }), + 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }), + 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }), + 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }), + 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }), + 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }), + 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }), + 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }), + 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }), + 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }), + 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }), + + // Batch 0, Channel 1 + 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }), + 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }), + 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }), + 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }), + 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }), + 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }), + 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }), + 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }), + 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }), + 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }), + 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }), + 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }), + + // Batch 0, Channel 2 + 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }), + 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }), + 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }), + 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }), + 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }), + 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }), + 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }), + 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }), + 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }), + 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }), + 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }), + 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }), + + // Batch 1, Channel 0 + 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }), + 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }), + 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }), + 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }), + 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }), + 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }), + 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }), + 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }), + 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }), + 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }), + 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }), + 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }), + + // Batch 1, Channel 1 + 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }), + 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }), + 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }), + 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }), + 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }), + 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }), + 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }), + 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }), + 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }), + 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }), + 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }), + 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }), + + // Batch 1, Channel 2 + 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }), + 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }), + 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }), + 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }), + 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }), + 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }), + 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }), + 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }), + 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }), + 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }), + 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }), + 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }), + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::L2NormalizationQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +template <typename T> +LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory, + float qScale, + int32_t qOffset) +{ + constexpr unsigned int inputWidth = 3; + constexpr unsigned int inputHeight = 4; + constexpr unsigned int inputChannels = 3; + constexpr unsigned int inputBatchSize = 2; + + constexpr unsigned int outputWidth = inputWidth; + constexpr unsigned int outputHeight = inputHeight; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::GetDataType<T>()); + + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + // Batch 0, Channel 0 + 235.0f, 46.0f, 178.0f, + 100.0f, 123.0f, 19.0f, + 172.0f, 74.0f, 250.0f, + 6.0f, 195.0f, 80.0f, + + // Batch 0, Channel 1 + 113.0f, 95.0f, 202.0f, + 77.0f, 114.0f, 71.0f, + 122.0f, 246.0f, 166.0f, + 82.0f, 28.0f, 37.0f, + + // Batch 0, Channel 2 + 56.0f, 170.0f, 162.0f, + 194.0f, 89.0f, 254.0f, + 12.0f, 209.0f, 200.0f, + 1.0f, 64.0f, 54.0f, + + // Batch 1, Channel 0 + 67.0f, 90.0f, 49.0f, + 7.0f, 163.0f, 18.0f, + 25.0f, 117.0f, 103.0f, + 247.0f, 59.0f, 189.0f, + + // Batch 1, Channel 1 + 239.0f, 104.0f, 199.0f, + 17.0f, 124.0f, 153.0f, + 222.0f, 217.0f, 75.0f, + 32.0f, 126.0f, 21.0f, + + // Batch 1, Channel 2 + 97.0f, 145.0f, 215.0f, + 115.0f, 116.0f, 238.0f, + 226.0f, 16.0f, 132.0f, + 92.0f, 125.0f, 88.0f, + }))); + + LayerTestResult<T, 4> result(outputTensorInfo); + result.outputExpected = input; + + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo); + AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]); + + armnn::ConstantQueueDescriptor descriptor; + descriptor.m_LayerOutput = &constantTensor; + + armnn::WorkloadInfo info; + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info); + + outputHandle->Allocate(); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory) +{ + return ConstantTestImpl<float>(workloadFactory, 0.0f, 0); +} + +LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory) +{ + return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0); +} + +LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + unsigned int outputWidth = 5; + unsigned int outputHeight = 6; + unsigned int outputChannels = 3; + + unsigned int inputWidth1 = 2; + unsigned int inputHeight1 = 2; + unsigned int inputChannels1 = 3; + + unsigned int inputWidth2 = 2; + unsigned int inputHeight2 = 4; + unsigned int inputChannels2 = 3; + + unsigned int inputWidth3 = 3; + unsigned int inputHeight3 = 6; + unsigned int inputChannels3 = 2; + + unsigned int inputWidth4 = 3; + unsigned int inputHeight4 = 6; + unsigned int inputChannels4 = 1; + + // Define the tensor descriptors + armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8); + armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8); + armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8); + armnn::TensorInfo inputTensorInfo3({ inputChannels3, inputHeight3, inputWidth3 }, armnn::DataType::QuantisedAsymm8); + armnn::TensorInfo inputTensorInfo4({ inputChannels4, inputHeight4, inputWidth4 }, armnn::DataType::QuantisedAsymm8); + + // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize + const float scale = 0.13497836f; + const int32_t offset = -7; + + outputTensorInfo.SetQuantizationScale(scale); + outputTensorInfo.SetQuantizationOffset(offset); + inputTensorInfo1.SetQuantizationScale(scale); + inputTensorInfo1.SetQuantizationOffset(offset); + inputTensorInfo2.SetQuantizationScale(scale); + inputTensorInfo2.SetQuantizationOffset(offset); + inputTensorInfo3.SetQuantizationScale(scale); + inputTensorInfo3.SetQuantizationOffset(offset); + inputTensorInfo4.SetQuantizationScale(scale); + inputTensorInfo4.SetQuantizationOffset(offset); + + LayerTestResult<uint8_t, 3> ret(outputTensorInfo); + + ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>( + { + 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, + + 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, + + 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, + }) + ); + + + auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>( + { + 1, 2, + 6, 7, + + 31, 32, + 36, 37, + + 61, 62, + 66, 67, + }) + ); + + auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>( + { + 11, 12, + 16, 17, + 21, 22, + 26, 27, + + 41, 42, + 46, 47, + 51, 52, + 56, 57, + + 71, 72, + 76, 77, + 81, 82, + 86, 87, + }) + ); + + auto input3 = MakeTensor<uint8_t, 3>(inputTensorInfo3, std::vector<uint8_t>( + { + 3, 4, 5, + 8, 9, 10, + 13, 14, 15, + 18, 19, 20, + 23, 24, 25, + 28, 29, 30, + + 33, 34, 35, + 38, 39, 40, + 43, 44, 45, + 48, 49, 50, + 53, 54, 55, + 58, 59, 60, + }) + ); + + + auto input4 = MakeTensor<uint8_t, 3>(inputTensorInfo4, std::vector<uint8_t>( + { + 63, 64, 65, + 68, 69, 70, + 73, 74, 75, + 78, 79, 80, + 83, 84, 85, + 88, 89, 90, + }) + ); + + std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //extent of the window is defined by size of input[0] + armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1); + + std::vector<unsigned int> wOrigin2 = { 0, 2, 0 }; //extent of the window is defined by size of input[1] + armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2); + + std::vector<unsigned int> wOrigin3 = { 0, 0, 2 }; //extent of the window is defined by size of input[2] + armnn::MergerQueueDescriptor::ViewOrigin window3(wOrigin3); + + std::vector<unsigned int> wOrigin4 = { 2, 0, 2 }; //extent of the window is defined by size of input[3] + armnn::MergerQueueDescriptor::ViewOrigin window4(wOrigin4); + + + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + bool subTensorsSupported = workloadFactory.SupportsSubTensors(); + + std::unique_ptr<armnn::ITensorHandle> inputHandle1 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) : + workloadFactory.CreateTensorHandle(inputTensorInfo1); + + std::unique_ptr<armnn::ITensorHandle> inputHandle2 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) : + workloadFactory.CreateTensorHandle(inputTensorInfo2); + + std::unique_ptr<armnn::ITensorHandle> inputHandle3 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo3.GetShape(), wOrigin3.data()) : + workloadFactory.CreateTensorHandle(inputTensorInfo3); + + std::unique_ptr<armnn::ITensorHandle> inputHandle4 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo4.GetShape(), wOrigin4.data()) : + workloadFactory.CreateTensorHandle(inputTensorInfo4); + + + armnn::MergerQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); + AddInputToWorkload(data, info, inputTensorInfo3, inputHandle3.get()); + AddInputToWorkload(data, info, inputTensorInfo4, inputHandle4.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + data.m_ViewOrigins.push_back(window1); + data.m_ViewOrigins.push_back(window2); + data.m_ViewOrigins.push_back(window3); + data.m_ViewOrigins.push_back(window4); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info); + + inputHandle1->Allocate(); + inputHandle2->Allocate(); + inputHandle3->Allocate(); + inputHandle4->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]); + CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]); + CopyDataToITensorHandle(inputHandle3.get(), &input3[0][0][0]); + CopyDataToITensorHandle(inputHandle4.get(), &input4[0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get()); + + return ret; +} + +LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + unsigned int batchSize = 1; + unsigned int channels = 2; + unsigned int height = 2; + unsigned int width = 3; + + const float scale = 7.0f; + const int32_t offset = 3; + + armnn::TensorInfo inputTensorInfo1, inputTensorInfo2; + armnn::TensorInfo outputTensorInfo; + + const unsigned int shape[] = { batchSize, channels, height, width }; + inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8); + inputTensorInfo1.SetQuantizationScale(scale); + inputTensorInfo1.SetQuantizationOffset(offset); + + inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8); + inputTensorInfo2.SetQuantizationScale(scale); + inputTensorInfo2.SetQuantizationOffset(offset); + + outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8); + outputTensorInfo.SetQuantizationScale(scale); + outputTensorInfo.SetQuantizationOffset(offset); + + // See dequantized values to the right + auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>( + { + 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763 + 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616 + })); + + // See dequantized values to the right + auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>( + { + 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449 + 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861 + })); + + // See dequantized values to the right + LayerTestResult<uint8_t, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>( + { + 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped) + 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477 + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::AdditionQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info); + + inputHandle1->Allocate(); + inputHandle2->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); + CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + + return result; +} + +LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + unsigned int batchSize = 1; + unsigned int channels = 2; + unsigned int height = 2; + unsigned int width = 3; + + armnn::TensorInfo inputTensorInfo1, inputTensorInfo2; + armnn::TensorInfo outputTensorInfo; + + const unsigned int shape[] = { batchSize, channels, height, width }; + inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8); + inputTensorInfo1.SetQuantizationScale(4.0f); + inputTensorInfo1.SetQuantizationOffset(1); + + inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8); + inputTensorInfo2.SetQuantizationScale(3.0f); + inputTensorInfo2.SetQuantizationOffset(-2); + + outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8); + outputTensorInfo.SetQuantizationScale(1366.255f); // Scale/offset chosen to have output values out of range + outputTensorInfo.SetQuantizationOffset(-5); + + // See dequantized values to the right + auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>( + { + 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440, + 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120 + })); + + // See dequantized values to the right + auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>( + { + 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747, + 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297 + })); + + // See dequantized values to the right + LayerTestResult<uint8_t, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>( + { + 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680, + 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640 + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::MultiplicationQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info); + + inputHandle1->Allocate(); + inputHandle2->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); + CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + + return result; +} + +LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 4; + constexpr unsigned int inputHeight = 4; + constexpr unsigned int inputChannels = 1; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = inputWidth; + constexpr unsigned int outputHeight = inputHeight; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::QuantisedAsymm8); + inputTensorInfo.SetQuantizationScale(1.5f); + inputTensorInfo.SetQuantizationOffset(-3); + + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::QuantisedAsymm8); + outputTensorInfo.SetQuantizationScale(1.5f); + outputTensorInfo.SetQuantizationOffset(-3); + + auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({ + 1, 2, 3, 4, + 2, 3, 4, 5, + 3, 4, 5, 6, + 4, 5, 6, 7 + })); + + LayerTestResult<uint8_t, 4> result(outputTensorInfo); + result.outputExpected = input; + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 2; + constexpr unsigned int inputHeight = 2; + constexpr unsigned int inputChannels = 1; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = inputWidth / 2; + constexpr unsigned int outputHeight = inputHeight / 2; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::QuantisedAsymm8); + inputTensorInfo.SetQuantizationScale(0.1567f); + inputTensorInfo.SetQuantizationOffset(1); + + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::QuantisedAsymm8); + outputTensorInfo.SetQuantizationScale(0.1567f); + outputTensorInfo.SetQuantizationOffset(1); + + auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({ + 1, 255, + 200, 250 + })); + + // The 'resize bilinear' operation projects the top-left corner of output texels into the input image, + // then figures out the interpolants and weights. Note this is different to projecting the centre of the + // output texel - and thus we'll expect the output 1x1 matrix to contain as its single element the value + // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting + // the centre). + LayerTestResult<uint8_t, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({ + 1 + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 4; + constexpr unsigned int inputHeight = 4; + constexpr unsigned int inputChannels = 1; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = inputWidth / 2; + constexpr unsigned int outputHeight = inputHeight / 2; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::QuantisedAsymm8); + inputTensorInfo.SetQuantizationScale(3.141592f); + inputTensorInfo.SetQuantizationOffset(3); + + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::QuantisedAsymm8); + outputTensorInfo.SetQuantizationScale(3.141592f); + outputTensorInfo.SetQuantizationOffset(3); + + auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({ + 1, 2, 3, 4, + 2, 3, 4, 5, + 3, 4, 5, 6, + 4, 5, 6, 7 + })); + + LayerTestResult<uint8_t, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({ + 1, 3, + 3, 5 + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 3; + constexpr unsigned int inputHeight = 2; + constexpr unsigned int inputChannels = 1; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = 2; + constexpr unsigned int outputHeight = 1; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::QuantisedAsymm8); + inputTensorInfo.SetQuantizationScale(1.5f); + inputTensorInfo.SetQuantizationOffset(-1); + + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::QuantisedAsymm8); + outputTensorInfo.SetQuantizationScale(1.5f); + outputTensorInfo.SetQuantizationOffset(-1); + + auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({ + 1, 2, 3, // 3.0, 4.5, 6.0 + 5, 8, 13 // 9.0, 13.5, 21.0 + })); + + LayerTestResult<uint8_t, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({ + 1, 3 // 3.0, 5.25 + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + constexpr unsigned int inputWidth = 2; + constexpr unsigned int inputHeight = 3; + constexpr unsigned int inputChannels = 1; + constexpr unsigned int inputBatchSize = 1; + + constexpr unsigned int outputWidth = 5; + constexpr unsigned int outputHeight = 3; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputBatchSize = inputBatchSize; + + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::DataType::QuantisedAsymm8); + inputTensorInfo.SetQuantizationScale(0.010765f); + inputTensorInfo.SetQuantizationOffset(7); + + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::DataType::QuantisedAsymm8); + outputTensorInfo.SetQuantizationScale(0.010132f); + outputTensorInfo.SetQuantizationOffset(-18); + + auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({ + 24, 228, // 0.183005, 2.379065, + 105, 128, // 1.05497, 1.302565 + 230, 71 // 2.400595, 0.68896 + })); + + LayerTestResult<uint8_t, 4> result(outputTensorInfo); + result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({ + 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504 + 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498 + 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002 + })); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeBilinearQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory) +{ + auto ret = BatchNormTestImpl<float>(workloadFactory, 0.f, 0); + return ret; +} + +LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + auto ret = BatchNormTestImpl<uint8_t>(workloadFactory, 1.f/20.f, 50); + return ret; +} + +LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1); +} + +LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1); +} + +LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1); +} + +LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1); +} + +LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1); +} + +LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1); +} + +LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1); +} + +LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1); +} + +LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1); +} + +LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1); +} + +LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1); +} + +LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1); +} + +LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory, + bool forceNoPadding) +{ + return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding); +} + +LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory, + bool forceNoPadding) +{ + return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5); +} + +LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory, + bool forceNoPadding) +{ + return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding); +} + +LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory, + bool forceNoPadding) +{ + return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128); +} + +LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory) +{ + return SimpleAveragePooling2dTestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1); +} + +LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory) +{ + return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1); +} + +LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory) +{ + return SimpleL2Pooling2dTestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory) +{ + return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory) +{ + return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory) +{ + return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory) +{ + return L2Pooling2dSize7TestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory) +{ + return L2Pooling2dSize9TestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory) +{ + return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::PoolingAlgorithm poolingType) +{ + return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType); +} + +LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::PoolingAlgorithm poolingType) +{ + return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128); +} + +LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory, + bool transposeWeights) +{ + return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights); +} + +LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5); +} + +LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5); +} + +LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test( + armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory); +} + +LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory); +} + +LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory) +{ + return SimplePermuteFloat32TestCommon(workloadFactory); +}; + +LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + return SimplePermuteUint8TestCommon(workloadFactory); +}; diff --git a/src/armnn/backends/test/LayerTests.hpp b/src/armnn/backends/test/LayerTests.hpp new file mode 100644 index 0000000000..fc0c9c7b14 --- /dev/null +++ b/src/armnn/backends/test/LayerTests.hpp @@ -0,0 +1,305 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "armnn/ArmNN.hpp" +#include "armnn/Tensor.hpp" + +#include <boost/multi_array.hpp> +#include <boost/assert.hpp> +#include <array> + +// Layer callables + +namespace armnn +{ +class IWorkloadFactory; +} + +template <std::size_t n> +boost::array<unsigned int, n> GetTensorShapeAsArray(const armnn::TensorInfo& tensorInfo) +{ + BOOST_ASSERT_MSG(n == tensorInfo.GetNumDimensions(), + "Attempting to construct a shape array of mismatching size"); + + boost::array<unsigned int, n> shape; + for (unsigned int i = 0; i < n; i++) + { + shape[i] = tensorInfo.GetShape()[i]; + } + return shape; +} + +template <typename T, std::size_t n> +struct LayerTestResult +{ + LayerTestResult(const armnn::TensorInfo& outputInfo) + { + auto shape( GetTensorShapeAsArray<n>(outputInfo) ); + output.resize(shape); + outputExpected.resize(shape); + supported = true; + } + + boost::multi_array<T, n> output; + boost::multi_array<T, n> outputExpected; + bool supported; +}; + +LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled); + +LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled); + +LayerTestResult<float, 4> +Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory); + + +LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled); +LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled); + +LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled); + +LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled); + +LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory, + bool forceNoPadding); +LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory, + bool forceNoPadding); +LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory, + bool forceNoPadding); +LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory, + bool forceNoPadding ); +LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test( + armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::PoolingAlgorithm poolingType); +LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::PoolingAlgorithm poolingType); + +LayerTestResult<float, 4> ConstantLinearActivationTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta); +LayerTestResult<uint8_t, 2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta); + +LayerTestResult<float, 4> SimpleSigmoidTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> SimpleReshapeFloat32Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> SimpleReshapeUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> SimpleFloorTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory); + +template<typename T> +LayerTestResult<T, 4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory); + +LayerTestResult<float, 4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::NormalizationAlgorithmChannel normChannel, + armnn::NormalizationAlgorithmMethod normMethod); + +LayerTestResult<float, 2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, float beta); + +LayerTestResult<float, 2> FullyConnectedFloat32Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled, + bool transposeWeights); + +std::vector<LayerTestResult<float, 3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 3> MergerTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> AdditionTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory); + +LayerTestResult<float, 4> CompareActivationTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::ActivationFunction f, + unsigned int batchSize); + +LayerTestResult<float, 4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory); + +LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory); + +LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> CompareBoundedReLuTest(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + float upperBound, + float lowerBound); + +// Tests that the output should be identical to the input when the output dimensions match the input ones +LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory); + +// Tests the behaviour of the resize bilinear operation when rescaling a 2x2 image into a 1x1 image +LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory); + +// Tests resize bilinear for minification of a square input matrix (also: input dimensions are a +// multiple of output dimensions) +LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory); + +// Tests resize bilinear for minification (output dimensions smaller than input dimensions) +LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory); + +// Tests resize bilinear for magnification (output dimensions bigger than input dimensions) +LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<uint8_t, 4> BoundedReLuUint8Test(armnn::IWorkloadFactory& workloadFactory, float upperBound); +LayerTestResult<uint8_t, 4> BoundedReLuUint8Test(armnn::IWorkloadFactory& workloadFactory, + float upperBound, + float lowerBound); + +LayerTestResult<uint8_t, 2> FullyConnectedUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled); + +std::vector<LayerTestResult<uint8_t, 3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<uint8_t, 4> CompareActivationUint8Test(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::ActivationFunction f); + +LayerTestResult<uint8_t, 2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + float beta); + +LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled); + +LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled); + +LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled); + +LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory, + bool biasEnabled); + +LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory); + +LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory); + + +LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory, + bool transposeWeights); +LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory); + diff --git a/src/armnn/backends/test/MemCopyTests.cpp b/src/armnn/backends/test/MemCopyTests.cpp new file mode 100644 index 0000000000..8e4dae35f2 --- /dev/null +++ b/src/armnn/backends/test/MemCopyTests.cpp @@ -0,0 +1,156 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> +#include <boost/multi_array.hpp> + +#include "armnn/ArmNN.hpp" +#include "backends/RefWorkloadFactory.hpp" +#if ARMCOMPUTECL_ENABLED +#include "backends/ClWorkloadFactory.hpp" +#endif +#if ARMCOMPUTENEON_ENABLED +#include "backends/NeonWorkloadFactory.hpp" +#endif +#include "backends/CpuTensorHandle.hpp" +#include "test/TensorHelpers.hpp" + +#include "TensorCopyUtils.hpp" +#include "WorkloadTestUtils.hpp" + +BOOST_AUTO_TEST_SUITE(MemCopyTestSuite) + +void MemCopyTest(armnn::IWorkloadFactory& srcWorkloadFactory, armnn::IWorkloadFactory& dstWorkloadFactory, + bool withSubtensors) +{ + const std::array<unsigned int, 4> shapeData = { 1u, 1u, 6u, 5u }; + const armnn::TensorShape tensorShape(4, shapeData.data()); + const armnn::TensorInfo tensorInfo(tensorShape, armnn::DataType::Float32); + boost::multi_array<float, 4> inputData = MakeTensor<float, 4>(tensorInfo, std::vector<float>( + { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + + 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, + + 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, + + 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, + + 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, + + 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, + }) + ); + + boost::multi_array<float, 4> outputData(shapeData); + + auto inputTensorHandle = srcWorkloadFactory.CreateTensorHandle(tensorInfo); + auto outputTensorHandle = dstWorkloadFactory.CreateTensorHandle(tensorInfo); + + AllocateAndCopyDataToITensorHandle(inputTensorHandle.get(), inputData.data()); + outputTensorHandle->Allocate(); + + armnn::MemCopyQueueDescriptor memCopyQueueDesc; + armnn::WorkloadInfo workloadInfo; + + const unsigned int origin[4] = {}; + + auto workloadInput = (withSubtensors && srcWorkloadFactory.SupportsSubTensors()) + ? srcWorkloadFactory.CreateSubTensorHandle(*inputTensorHandle, tensorShape, origin) + : std::move(inputTensorHandle); + auto workloadOutput = (withSubtensors && dstWorkloadFactory.SupportsSubTensors()) + ? dstWorkloadFactory.CreateSubTensorHandle(*outputTensorHandle, tensorShape, origin) + : std::move(outputTensorHandle); + + AddInputToWorkload(memCopyQueueDesc, workloadInfo, tensorInfo, workloadInput.get()); + AddOutputToWorkload(memCopyQueueDesc, workloadInfo, tensorInfo, workloadOutput.get()); + + dstWorkloadFactory.CreateMemCopy(memCopyQueueDesc, workloadInfo)->Execute(); + + CopyDataFromITensorHandle(outputData.data(), workloadOutput.get()); + + BOOST_TEST(CompareTensors(inputData, outputData)); +} + +template <typename SrcWorkloadFactory, typename DstWorkloadFactory> +void MemCopyTest(bool withSubtensors) +{ + SrcWorkloadFactory srcWorkloadFactory; + DstWorkloadFactory dstWorkloadFactory; + MemCopyTest(srcWorkloadFactory, dstWorkloadFactory, withSubtensors); +} + +#if ARMCOMPUTECL_ENABLED + +BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpu) +{ + MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory>(false); +} + +BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpu) +{ + MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory>(false); +} + +BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpuWithSubtensors) +{ + MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory>(true); +} + +BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpuWithSubtensors) +{ + MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory>(true); +} + +#endif // ARMCOMPUTECL_ENABLED + +#if ARMCOMPUTENEON_ENABLED + +BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeon) +{ + MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory>(false); +} + +BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpu) +{ + MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory>(false); +} + +BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeonWithSubtensors) +{ + MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory>(true); +} + +BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpuWithSubtensors) +{ + MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory>(true); +} + +#endif // ARMCOMPUTENEON_ENABLED + +#if ARMCOMPUTECL_ENABLED && ARMCOMPUTENEON_ENABLED + +BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpu) +{ + MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory>(false); +} + +BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeon) +{ + MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory>(false); +} + +BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpuWithSubtensors) +{ + MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory>(true); +} + +BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeonWithSubtensors) +{ + MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory>(true); +} + +#endif + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/backends/test/NormTestImpl.hpp b/src/armnn/backends/test/NormTestImpl.hpp new file mode 100644 index 0000000000..1f6aadc9df --- /dev/null +++ b/src/armnn/backends/test/NormTestImpl.hpp @@ -0,0 +1,238 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "armnn/Exceptions.hpp" +#include "armnn/LayerSupport.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" + +LayerTestResult<float,4> SimpleNormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory, + armnn::NormalizationAlgorithmChannel normChannel, + armnn::NormalizationAlgorithmMethod normMethod) +{ + const unsigned int inputHeight = 2; + const unsigned int inputWidth = 2; + const unsigned int inputChannels = 1; + const unsigned int inputNum = 2; + + unsigned int outputHeight = inputHeight; + unsigned int outputWidth = inputWidth; + unsigned int outputChannels = inputChannels; + unsigned int outputNum = inputNum; + + unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth }; + unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth }; + + auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + LayerTestResult<float,4> ret(outputTensorInfo); + + auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({ + // Batch #0 + 1.0f, 2.0f, + 3.0f, 4.0f, + // Batch #1 + 5.0f, 6.0f, + 7.0f, 8.0f + })); + + float alpha = 1.f; + float beta = 1.f; + float kappa = 1.f; + uint32_t normSize = 3; + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::NormalizationQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Parameters.m_NormChannelType = normChannel; + data.m_Parameters.m_NormMethodType = normMethod; + data.m_Parameters.m_NormSize = normSize; + data.m_Parameters.m_Alpha = alpha; + data.m_Parameters.m_Beta = beta; + data.m_Parameters.m_K = kappa; + + armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]); + armnn::NormalizationQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + switch (normMethod) + { + case armnn::NormalizationAlgorithmMethod::LocalBrightness: + { + switch (normChannel) + { + case armnn::NormalizationAlgorithmChannel::Within: + { + // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index. + // Therefore, all output values should equal the inputs, but divided by: + // pow((kappa + (accumulatedScale * alpha)), beta) + // ...where accumulatedScale is the sum of every element squared + float divisor[inputNum]; + for(int i = 0; i < boost::numeric_cast<int>(inputNum); i++) + { + float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] + + input[i][0][0][1]*input[i][0][0][1] + + input[i][0][1][0]*input[i][0][1][0] + + input[i][0][1][1]*input[i][0][1][1]; + divisor[i] = powf((kappa + accumulatedScale * alpha), beta); + } + ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, + std::vector<float>({input[0][0][0][0]/divisor[0], + input[0][0][0][1]/divisor[0], + input[0][0][1][0]/divisor[0], + input[0][0][1][1]/divisor[0], + input[1][0][0][0]/divisor[1], + input[1][0][0][1]/divisor[1], + input[1][0][1][0]/divisor[1], + input[1][0][1][1]/divisor[1]})); + break; + } + case armnn::NormalizationAlgorithmChannel::Across: + { + // When normalising across channels, all output values should equal the inputs, but multiplied by: + // pow((kappa + (accumulatedScale * alpha)), -beta) + // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared + // ...where adjacent channels means within half the normSize for the channel + // The test data has only one channel, so this is simplified below. + std::vector<float> outputVector; + for (int n = 0; n < boost::numeric_cast<int>(inputNum); ++n) + { + for (int h = 0; h < boost::numeric_cast<int>(inputHeight); ++h) + { + for (int w = 0; w < boost::numeric_cast<int>(inputWidth); ++w) + { + float accumulatedScale = input[n][0][h][w]*input[n][0][h][w]; + float scale = powf((kappa + accumulatedScale * alpha), -beta); + outputVector.push_back(input[n][0][h][w] * scale); + } + } + } + ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector); + break; + } + default: + { + throw armnn::UnimplementedException("Unsupported normalisation channel type, " + "only Across and Within are supported"); + } + } + break; + } + case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough + default: + { + throw armnn::UnimplementedException("Unsupported normalisation method type, " + "only LocalBrightness is supported"); + } + } + + return ret; +} + +LayerTestResult<float,4> CompareNormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::NormalizationAlgorithmChannel normChannel, + armnn::NormalizationAlgorithmMethod normMethod) +{ + constexpr unsigned int inputNum = 5; + constexpr unsigned int inputChannels = 3; + constexpr unsigned int inputHeight = 32; + constexpr unsigned int inputWidth = 24; + + constexpr unsigned int outputNum = inputNum; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputHeight = inputHeight; + constexpr unsigned int outputWidth = inputWidth; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth}; + unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth}; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + LayerTestResult<float,4> ret(outputTensorInfo); + + auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234); + + constexpr float alpha = 1.f; + constexpr float beta = 1.f; + constexpr float kappa = 1.f; + constexpr uint32_t normSize = 5; + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::NormalizationQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Parameters.m_NormChannelType = normChannel; + data.m_Parameters.m_NormMethodType = normMethod; + data.m_Parameters.m_NormSize = normSize; + data.m_Parameters.m_Alpha = alpha; + data.m_Parameters.m_Beta = beta; + data.m_Parameters.m_K = kappa; + + std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo); + + armnn::NormalizationQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); + + // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised. + armnn::Compute compute = workloadFactory.GetCompute(); + const size_t reasonIfUnsupportedMaxLen = 255; + char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1]; + ret.supported = armnn::IsNormalizationSupported(compute, inputTensorInfo, outputTensorInfo, data.m_Parameters, + reasonIfUnsupported, reasonIfUnsupportedMaxLen); + if (!ret.supported) + { + return ret; + } + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info); + std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo); + + outputHandleRef->Allocate(); + inputHandleRef->Allocate(); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + + workload->Execute(); + workloadRef->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get()); + + return ret; +} + diff --git a/src/armnn/backends/test/PermuteTestImpl.hpp b/src/armnn/backends/test/PermuteTestImpl.hpp new file mode 100644 index 0000000000..4eafa1a211 --- /dev/null +++ b/src/armnn/backends/test/PermuteTestImpl.hpp @@ -0,0 +1,121 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/ArmNN.hpp> +#include <armnn/Tensor.hpp> +#include <armnn/TypesUtils.hpp> +#include <backends/WorkloadInfo.hpp> + +#include "test/TensorHelpers.hpp" +#include "QuantizeHelper.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" + +template<typename T> +LayerTestResult<T, 4> SimplePermuteTestImpl( + armnn::IWorkloadFactory& workloadFactory, + armnn::PermuteDescriptor descriptor, + armnn::TensorInfo inputTensorInfo, + armnn::TensorInfo outputTensorInfo, + const std::vector<T>& inputData, + const std::vector<T>& outputExpectedData) +{ + auto input = MakeTensor<T, 4>(inputTensorInfo, inputData); + + LayerTestResult<T, 4> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::PermuteQueueDescriptor data; + data.m_Parameters = descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + +LayerTestResult<float, 4> SimplePermuteFloat32TestCommon(armnn::IWorkloadFactory& workloadFactory) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 1, 2, 2, 2 }; + unsigned int outputShape[] = { 1, 2, 2, 2 }; + + armnn::PermuteDescriptor descriptor; + descriptor.m_DimMappings = {0U, 3U, 1U, 2U}; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + std::vector<float> input = std::vector<float>( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + + 5.0f, 6.0f, + 7.0f, 8.0f + }); + + std::vector<float> outputExpected = std::vector<float>( + { + 1.0f, 5.0f, 2.0f, 6.0f, + 3.0f, 7.0f, 4.0f, 8.0f + }); + + return SimplePermuteTestImpl<float>(workloadFactory, descriptor, inputTensorInfo, + outputTensorInfo, input, outputExpected); +} + +LayerTestResult<uint8_t, 4> SimplePermuteUint8TestCommon(armnn::IWorkloadFactory& workloadFactory) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 1, 2, 2, 2 }; + unsigned int outputShape[] = { 1, 2, 2, 2 }; + + armnn::PermuteDescriptor descriptor; + descriptor.m_DimMappings = {0U, 3U, 1U, 2U}; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::QuantisedAsymm8); + inputTensorInfo.SetQuantizationScale(1.0f); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::QuantisedAsymm8); + outputTensorInfo.SetQuantizationScale(1.0f); + + std::vector<uint8_t> input = std::vector<uint8_t>( + { + 1, 2, + 3, 4, + + 5, 6, + 7, 8 + }); + + std::vector<uint8_t> outputExpected = std::vector<uint8_t>( + { + 1, 5, 2, 6, + 3, 7, 4, 8 + }); + + return SimplePermuteTestImpl<uint8_t>(workloadFactory, descriptor, inputTensorInfo, + outputTensorInfo, input, outputExpected); +} diff --git a/src/armnn/backends/test/Pooling2dTestImpl.hpp b/src/armnn/backends/test/Pooling2dTestImpl.hpp new file mode 100644 index 0000000000..fc84ddb2ca --- /dev/null +++ b/src/armnn/backends/test/Pooling2dTestImpl.hpp @@ -0,0 +1,1039 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/ArmNN.hpp> +#include <armnn/Tensor.hpp> +#include <armnn/TypesUtils.hpp> +#include <backends/WorkloadInfo.hpp> + +#include "test/TensorHelpers.hpp" +#include "QuantizeHelper.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" + +#include <algorithm> + +template<typename T> +LayerTestResult<T, 4> SimplePooling2dTestImpl( + armnn::IWorkloadFactory& workloadFactory, + armnn::Pooling2dDescriptor descriptor, + float qScale, + int32_t qOffset, + const boost::multi_array<T, 4>& input, + const boost::multi_array<T, 4>& outputExpected) +{ + unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[2]); + unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[3]); + unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[1]); + unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]); + + unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]); + unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]); + unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]); + unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]); + + armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, + armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, + armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + LayerTestResult<T, 4> result(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::Pooling2dQueueDescriptor queueDescriptor; + queueDescriptor.m_Parameters = descriptor; + armnn::WorkloadInfo workloadInfo; + AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get()); + + // Don't execute if Pooling is not supported, as an exception will be raised. + armnn::Compute compute = workloadFactory.GetCompute(); + const size_t reasonIfUnsupportedMaxLen = 255; + char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1]; + result.supported = armnn::IsPooling2dSupported(compute, inputTensorInfo, outputTensorInfo, + queueDescriptor.m_Parameters, + reasonIfUnsupported, reasonIfUnsupportedMaxLen); + if (!result.supported) + { + return result; + } + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + + result.outputExpected = outputExpected; + + return result; +} + +// +// Tests max pooling with the following parameters: +// +// Pooling size: 3x3 +// Stride: (2,4) +// input size: 8x13 +// channels: 2 +// batch size: 2 +// +template<typename T> +LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(armnn::IWorkloadFactory& workloadFactory, + bool forceNoPadding, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Max; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 4; + // forceNoPadding is mainly used for compatibility with ARM Compute. + // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size. + descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3; + descriptor.m_PadTop = descriptor.m_PadBottom = 0; + descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + unsigned int inputWidth = 8; + unsigned int inputHeight = 13; + unsigned int outputWidth = + (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) / + descriptor.m_StrideX; + unsigned int outputHeight = + (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) / + descriptor.m_StrideY; + unsigned int channels = 2; + unsigned int batchSize = 2; + + armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + std::vector<float> singleChannelData({ + 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f, + 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f, + 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f, + 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f, + 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f, + 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f, + 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f, + 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f, + 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f, + 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f, + 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f, + 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f, + 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f, + }); + + // Construct input data + std::vector<float> inputData; + auto negator = [](float f) { return -f; }; + + // First image (two channels where the second channel is the negative of the first one) + inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end()); + std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator); + + // Second image (same as first image) + inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end()); + std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator); + + auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData)); + + // these were calculated manually + auto shape(GetTensorShapeAsArray<4>(outputTensorInfo)); + boost::multi_array<T, 4> outputExpected(shape); + if (forceNoPadding) + { + outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 8.0f, 8.0f, 8.0f, + 9.0f, 7.0f, 9.0f, + 9.0f, 9.0f, 9.0f, + + 0.0f, 0.0f, -3.0f, + -1.0f, 0.0f, 0.0f, + -1.0f, -1.0f, -1.0f, + + 8.0f, 8.0f, 8.0f, + 9.0f, 7.0f, 9.0f, + 9.0f, 9.0f, 9.0f, + + 0.0f, 0.0f, -3.0f, + -1.0f, 0.0f, 0.0f, + -1.0f, -1.0f, -1.0f + })); + } + else + { + outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f, + 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f, + 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f, + + 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f, + 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f, + + 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f, + 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f, + 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f, + + 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f, + 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f + })); + } + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; + descriptor.m_StrideX = descriptor.m_StrideY = 2; + descriptor.m_PadLeft = 1; + descriptor.m_PadRight = 1; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 1; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 1.0f, 2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, 3.0f, 4.0f, + })); + + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 1.0f, 2.5f, 4.0f, + 1.0f, 2.5f, 4.0f, + 1.0f, 2.5f, 4.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100; + descriptor.m_StrideX = descriptor.m_StrideY = 5; + descriptor.m_PadLeft = 50; + descriptor.m_PadRight = 50; + descriptor.m_PadTop = 50; + descriptor.m_PadBottom = 50; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + std::vector<T> inputVec; + + for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i) + { + inputVec.push_back(1); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec); + + std::vector<T> outputVec; + + for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i) + { + outputVec.push_back(1); + } + + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::L2; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; + descriptor.m_StrideX = descriptor.m_StrideY = 2; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 1.0f, 7.0f, 1.0f, 7.0f, + 1.0f, 7.0f, 1.0f, 7.0f, + 1.0f, 7.0f, 1.0f, 7.0f, + 1.0f, 7.0f, 1.0f, 7.0f, + })); + + armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>()); + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 5.0f, 5.0f, + 5.0f, 5.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::L2; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; + descriptor.m_StrideX = descriptor.m_StrideY = 1; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 2.0f, 1.0f, 5.0f, 2.0f, + 1.0f, 2.0f, 2.0f, 1.0f, + 5.0f, 4.0f, 1.0f, 5.0f, + 2.0f, 1.0f, 5.0f, 2.0f, + })); + + armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>()); + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 3.0f, 3.0f, + 3.0f, 3.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::L2; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; + descriptor.m_StrideX = descriptor.m_StrideY = 3; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>()); + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, + 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, + 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, + 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, + 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, + 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, + 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, + 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, + 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, + })); + + armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>()); + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 3.0f, 3.0f, 3.0f, + 3.0f, 3.0f, 3.0f, + 3.0f, 3.0f, 3.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::L2; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; + descriptor.m_StrideX = descriptor.m_StrideY = 4; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>()); + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f, + 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f, + 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f, + 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f, + 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f, + })); + + armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>()); + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 3.0f, 3.0f, + 3.0f, 3.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::L2; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7; + descriptor.m_StrideX = descriptor.m_StrideY = 7; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>()); + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f, + 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f, + 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + })); + + armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>()); + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 3.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::L2; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9; + descriptor.m_StrideX = descriptor.m_StrideY = 9; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>()); + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, + 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, + 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, + 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, + 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, + 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, + 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, + 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, + 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, + })); + + armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>()); + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 3.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>()); + + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Max; + descriptor.m_PoolWidth = 2; + descriptor.m_PoolHeight = 3; + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 1; + descriptor.m_PadLeft = 2; + descriptor.m_PadRight = 0; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 2; + descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + // Construct input data + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 1.0f, 3.0f, 4.0f, + })); + + // these were calculated manually + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 0.0f, 3.0f, 0.0f, 3.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> ComparePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + armnn::PoolingAlgorithm poolingType, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + const unsigned int inputWidth = 16; + const unsigned int inputHeight = 32; + const unsigned int channelCount = 2; + const unsigned int batchSize = 5; + + const unsigned int poolSize = 3; + const unsigned int strideX = 2; + const unsigned int strideY = 4; + const unsigned int padX = 0; + const unsigned int padY = 0; + + const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX; + const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth }; + unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>()); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715); + + LayerTestResult<T, 4> comparisonResult(outputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::Pooling2dQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + data.m_Parameters.m_PoolType = poolingType; + data.m_Parameters.m_PoolWidth = poolSize; + data.m_Parameters.m_PoolHeight = poolSize; + data.m_Parameters.m_StrideX = strideX; + data.m_Parameters.m_StrideY = strideY; + data.m_Parameters.m_PadLeft = padX; + data.m_Parameters.m_PadRight = padX; + data.m_Parameters.m_PadTop = padY; + data.m_Parameters.m_PadBottom = padY; + data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor; + + std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo); + + // Don't execute if Pooling is not supported, as an exception will be raised. + armnn::Compute compute = workloadFactory.GetCompute(); + const size_t reasonIfUnsupportedMaxLen = 255; + char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1]; + comparisonResult.supported = armnn::IsPooling2dSupported(compute, inputTensorInfo, outputTensorInfo, + data.m_Parameters, + reasonIfUnsupported, reasonIfUnsupportedMaxLen); + if (!comparisonResult.supported) + { + return comparisonResult; + } + + armnn::Pooling2dQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info); + std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo); + + outputHandleRef->Allocate(); + inputHandleRef->Allocate(); + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]); + + workload->Execute(); + workloadRef->Execute(); + + CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get()); + CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get()); + + return comparisonResult; +} + +// +// Tests max pooling with the following parameters: +// +// Pooling size: 2x2 +// Stride: (2,2) +// input size: 4x4 +// channels: 1 +// batch size: 1 +// +template<typename T> +LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(armnn::IWorkloadFactory& workloadFactory, + bool forceNoPadding, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Max; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; + descriptor.m_StrideX = 2; + descriptor.m_StrideY = 2; + descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3; + descriptor.m_PadTop = descriptor.m_PadBottom = 0; + descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + + unsigned int inputWidth = 4; + unsigned int inputHeight = 4; + unsigned int outputWidth = + (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) / + descriptor.m_StrideX; + unsigned int outputHeight = + (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) / + descriptor.m_StrideY; + unsigned int channels = 1; + unsigned int batchSize = 1; + + std::vector<float> inputData = { + 510.0f, 222.0f, 780.0f, 654.0f, + 141.0f, 276.0f, 15.0f, 546.0f, + 303.0f, 618.0f, 582.0f, 339.0f, + 438.0f, 564.0f, 573.0f, 402.0f + }; + + // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here + std::vector<float> expectedOutputDataWithPadding = { + 0.0f, 510.0f, 780.0f, 654.0f, 0.0f, + 0.0f, 438.0f, 618.0f, 402.0f, 0.0f + }; + + std::vector<float> expectedOutputDataNoPadding = { + 510.0f, 780.0f, + 618.0f, 582.0f + }; + + armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>()); + + // Scale and offset should match input - we're just calculating maximum values. + armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData)); + + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) : + QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding)); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Max; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; + descriptor.m_StrideX = descriptor.m_StrideY = 2; + descriptor.m_PadLeft = 1; + descriptor.m_PadRight = 1; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 1; + descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + -1.0f, -2.0f, 3.0f, 4.0f, + -1.0f, -2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, -3.0f, -4.0f, + })); + + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + -1.0f, 3.0f, 4.0f, + 1.0f, 3.0f, 4.0f, + 1.0f, 2.0f, -4.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Max; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; + descriptor.m_StrideX = descriptor.m_StrideY = 1; + descriptor.m_PadLeft = 1; + descriptor.m_PadRight = 1; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 1; + descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + -1.0f, -2.0f, 3.0f, 4.0f, + -1.0f, -2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, -3.0f, -4.0f, + 1.0f, 2.0f, -3.0f, -4.0f, + })); + + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + -1.0f, 3.0f, 4.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 4.0f, + 2.0f, 2.0f, 2.0f, -3.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; + descriptor.m_StrideX = descriptor.m_StrideY = 2; + descriptor.m_PadLeft = 1; + descriptor.m_PadRight = 1; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 1; + descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 12.0f, 20.0f, 32.0f, 40.0f, + 12.0f, 20.0f, 32.0f, 40.0f, + 12.0f, 20.0f, 32.0f, 40.0f, + 12.0f, 20.0f, 32.0f, 40.0f, + })); + + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 3.0f, 13.0f, 10.0f, + 6.0f, 26.0f, 20.0f, + 3.0f, 13.0f, 10.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; + descriptor.m_StrideX = descriptor.m_StrideY = 2; + descriptor.m_PadLeft = 0; + descriptor.m_PadRight = 0; + descriptor.m_PadTop = 0; + descriptor.m_PadBottom = 0; + descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; + descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 1.0f, 2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, 3.0f, 4.0f, + })); + + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 2.0f, 3.5f, + 2.0f, 3.5f + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; + descriptor.m_StrideX = descriptor.m_StrideY = 1; + descriptor.m_PadLeft = 1; + descriptor.m_PadRight = 1; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 1; + descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 9.0f, 27.0f, 18.0f, 36.0f, + 18.0f, 9.0f, 18.0f, 9.0f, + 27.0f, 18.0f, 9.0f, 27.0f, + 9.0f, 27.0f, 9.0f, 18.0f, + })); + + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 7.0f, 11.0f, 13.0f, 9.0f, + 12.0f, 17.0f, 19.0f, 13.0f, + 12.0f, 16.0f, 16.0f, 10.0f, + 9.0f, 11.0f, 12.0f, 7.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::L2; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; + descriptor.m_StrideX = descriptor.m_StrideY = 2; + descriptor.m_PadLeft = 1; + descriptor.m_PadRight = 1; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 1; + descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 2.0f, 4.0f, 8.0f, 16.0f, + 4.0f, 2.0f, 2.0f, 4.0f, + 8.0f, 2.0f, 4.0f, 2.0f, + 16.0f, 2.0f, 2.0f, 8.0f, + })); + + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 1.0f, 4.4721f, 8.0f, + 4.4721f, 2.6457f, 2.236f, + 8.0f, 1.4142f, 4.0f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} + +template<typename T> +LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 1.0f, + int32_t qOffset = 0) +{ + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::L2; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; + descriptor.m_StrideX = descriptor.m_StrideY = 1; + descriptor.m_PadLeft = 1; + descriptor.m_PadRight = 1; + descriptor.m_PadTop = 1; + descriptor.m_PadBottom = 1; + descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + auto input = MakeTensor<T, 4>(inputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 1.0f, 2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, 3.0f, 4.0f, + })); + + auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, + QuantizedVector<T>(qScale, qOffset, { + 1.0540f, 1.7638f, 2.5385f, 2.3570f, + 1.2909f, 2.1602f, 3.1091f, 2.8867f, + 1.2909f, 2.1602f, 3.1091f, 2.8867f, + 1.0540f, 1.7638f, 2.5385f, 2.3570f, + })); + + return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); +} diff --git a/src/armnn/backends/test/QuantizeHelper.hpp b/src/armnn/backends/test/QuantizeHelper.hpp new file mode 100644 index 0000000000..bfaf9342f0 --- /dev/null +++ b/src/armnn/backends/test/QuantizeHelper.hpp @@ -0,0 +1,91 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/ArmNN.hpp> +#include <armnn/TypesUtils.hpp> + +#include <initializer_list> +#include <iterator> +#include <vector> +#include <boost/core/ignore_unused.hpp> + +template<typename T, bool DoQuantize=true> +struct SelectiveQuantizer +{ + static T Quantize(float value, float scale, int32_t offset) + { + return armnn::Quantize<T>(value, scale, offset); + } + + static float Dequantize(T value, float scale, int32_t offset) + { + return armnn::Dequantize(value, scale, offset); + } +}; + +template<typename T> +struct SelectiveQuantizer<T, false> +{ + static T Quantize(float value, float scale, int32_t offset) + { + boost::ignore_unused(scale, offset); + return value; + } + + static float Dequantize(T value, float scale, int32_t offset) + { + boost::ignore_unused(scale, offset); + return value; + } +}; + +template<typename T> +T SelectiveQuantize(float value, float scale, int32_t offset) +{ + return SelectiveQuantizer<T, armnn::IsQuantizedType<T>()>::Quantize(value, scale, offset); +}; + +template<typename T> +float SelectiveDequantize(T value, float scale, int32_t offset) +{ + return SelectiveQuantizer<T, armnn::IsQuantizedType<T>()>::Dequantize(value, scale, offset); +}; + +template<typename ItType> +struct IsFloatingPointIterator +{ + static constexpr bool value=std::is_floating_point<typename std::iterator_traits<ItType>::value_type>::value; +}; + +template <typename T, typename FloatIt, +typename std::enable_if<IsFloatingPointIterator<FloatIt>::value, int>::type=0 // Make sure valid fp iterator +> +std::vector<T> QuantizedVector(float qScale, int32_t qOffset, FloatIt first, FloatIt last) +{ + std::vector<T> quantized; + quantized.reserve(boost::numeric_cast<size_t>(std::distance(first, last))); + + for (auto it = first; it != last; ++it) + { + auto f = *it; + T q =SelectiveQuantize<T>(f, qScale, qOffset); + quantized.push_back(q); + } + + return quantized; +} + +template<typename T> +std::vector<T> QuantizedVector(float qScale, int32_t qOffset, const std::vector<float>& array) +{ + return QuantizedVector<T>(qScale, qOffset, array.begin(), array.end()); +} + +template<typename T> +std::vector<T> QuantizedVector(float qScale, int32_t qOffset, std::initializer_list<float> array) +{ + return QuantizedVector<T>(qScale, qOffset, array.begin(), array.end()); +} diff --git a/src/armnn/backends/test/Reference.cpp b/src/armnn/backends/test/Reference.cpp new file mode 100644 index 0000000000..87d82f1781 --- /dev/null +++ b/src/armnn/backends/test/Reference.cpp @@ -0,0 +1,231 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> + +#include "LayerTests.hpp" +#include "test/TensorHelpers.hpp" + +#include "backends/RefWorkloadFactory.hpp" + +#include "test/UnitTests.hpp" + +BOOST_AUTO_TEST_SUITE(Compute_Reference) +using FactoryType = armnn::RefWorkloadFactory; + +// ============================================================================ +// UNIT tests + +// Convolution +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5, SimpleConvolution2d3x5Test, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x5Uint8, SimpleConvolution2d3x5Uint8Test, true) + +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false) +ARMNN_AUTO_TEST_CASE(UnbiasedConvolutionUint8, SimpleConvolution2d3x5Uint8Test, false) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution1d, Convolution1dTest, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution1dUint8, Convolution1dUint8Test, true) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3, SimpleConvolution2d3x3Test, true) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true) + +ARMNN_AUTO_TEST_CASE(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false) + +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPaddingLargerThanHalfKernelSize, + Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest) +ARMNN_AUTO_TEST_CASE(SimpleConvolution2dAsymmetricPadding, Convolution2dAsymmetricPaddingTest) + +// Depthwise Convolution +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d, DepthwiseConvolution2dTest, true) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dUint8, DepthwiseConvolution2dUint8Test, true) + +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2d, DepthwiseConvolution2dTest, false) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dUint8, DepthwiseConvolution2dUint8Test, false) + +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, true) + +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, false) +ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dDepthMul1Uint8, DepthwiseConvolution2dDepthMul1Uint8Test, false) + +// Pooling +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize2x2Stride2x2, SimpleMaxPooling2dSize2x2Stride2x2Test, false) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize2x2Stride2x2Uint8, SimpleMaxPooling2dSize2x2Stride2x2Uint8Test, false) + +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, false) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, false) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPadding, IgnorePaddingSimpleAveragePooling2dNoPaddingTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8, + IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test) +ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) + +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) +ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) + +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test) +ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test) + +ARMNN_AUTO_TEST_CASE(AsymmNonSquarePooling2d, AsymmetricNonSquarePooling2dTest) +ARMNN_AUTO_TEST_CASE(AsymmNonSquarePooling2dUint8, AsymmetricNonSquarePooling2dUint8Test) + +// Activation +ARMNN_AUTO_TEST_CASE(ConstantLinearActivation, ConstantLinearActivationTest) +ARMNN_AUTO_TEST_CASE(ConstantLinearActivationUint8, ConstantLinearActivationUint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest) +ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest) + +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f) +ARMNN_AUTO_TEST_CASE(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f) + +ARMNN_AUTO_TEST_CASE(SimpleSigmoid, SimpleSigmoidTest) +ARMNN_AUTO_TEST_CASE(SimpleSigmoidUint8, SimpleSigmoidUint8Test) + +ARMNN_AUTO_TEST_CASE(ReLu1, BoundedReLuUpperAndLowerBoundTest) +ARMNN_AUTO_TEST_CASE(ReLu6, BoundedReLuUpperBoundOnlyTest) +ARMNN_AUTO_TEST_CASE(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest) +ARMNN_AUTO_TEST_CASE(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest) + +// Fully Conected +ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true) +ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true) + +ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false) +ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true) + +// Splitter +BOOST_AUTO_TEST_CASE(SimpleSplitter) +{ + armnn::RefWorkloadFactory workloadFactory; + auto testResult = SplitterTest(workloadFactory); + for (unsigned int i = 0; i < testResult.size(); ++i) + { + BOOST_TEST(CompareTensors(testResult[i].output, testResult[i].outputExpected)); + } +} + +BOOST_AUTO_TEST_CASE(SplitterUint8) +{ + armnn::RefWorkloadFactory workloadFactory; + auto testResult = SplitterUint8Test(workloadFactory); + for (unsigned int i = 0; i < testResult.size(); ++i) + { + BOOST_TEST(CompareTensors(testResult[i].output, testResult[i].outputExpected)); + } +} + +ARMNN_AUTO_TEST_CASE(CopyViaSplitter, CopyViaSplitterTest) +ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test) + +// Merger +ARMNN_AUTO_TEST_CASE(SimpleMerger, MergerTest) +ARMNN_AUTO_TEST_CASE(MergerUint8, MergerUint8Test) + +// Add +ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest) +ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(AddBroadcast, AdditionBroadcastTest) + +ARMNN_AUTO_TEST_CASE(AdditionUint8, AdditionUint8Test) +ARMNN_AUTO_TEST_CASE(AddBroadcastUint8, AdditionBroadcastUint8Test) +ARMNN_AUTO_TEST_CASE(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Test) + +// Mul +ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest) +ARMNN_AUTO_TEST_CASE(MultiplicationUint8, MultiplicationUint8Test) + +// Batch Norm +ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest) +ARMNN_AUTO_TEST_CASE(BatchNormUint8, BatchNormUint8Test) + +// Resize Bilinear +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearUint8Test) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopUint8Test) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinUint8Test) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinUint8Test) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagUint8Test) + +// Fake Quantization +ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest) + +// L2 Noramlization +ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest) +ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest) + +// Constant +ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) +ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantUint8Test) + +// Concat +ARMNN_AUTO_TEST_CASE(Concatenation1d, Concatenation1dTest) +ARMNN_AUTO_TEST_CASE(Concatenation1dUint8, Concatenation1dUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0, Concatenation2dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0Uint8, Concatenation2dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1, Concatenation2dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1Uint8, Concatenation2dDim1Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDims, Concatenation2dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim0DiffInputDimsUint8, Concatenation2dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDims, Concatenation2dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation2dDim1DiffInputDimsUint8, Concatenation2dDim1DiffInputDimsUint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0, Concatenation3dDim0Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0Uint8, Concatenation3dDim0Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1, Concatenation3dDim1Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1Uint8, Concatenation3dDim1Uint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2, Concatenation3dDim2Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2Uint8, Concatenation3dDim2Uint8Test) + +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDims, Concatenation3dDim0DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim0DiffInputDimsUint8, Concatenation3dDim0DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDims, Concatenation3dDim1DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim1DiffInputDimsUint8, Concatenation3dDim1DiffInputDimsUint8Test) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDims, Concatenation3dDim2DiffInputDimsTest) +ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDimsUint8, Concatenation3dDim2DiffInputDimsUint8Test) + +// Floor +ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) + +// Reshape +ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test) +ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test) + +// Permute +ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test) +ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test) + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/backends/test/ReshapeTestImpl.hpp b/src/armnn/backends/test/ReshapeTestImpl.hpp new file mode 100644 index 0000000000..1a31aa3bce --- /dev/null +++ b/src/armnn/backends/test/ReshapeTestImpl.hpp @@ -0,0 +1,177 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/ArmNN.hpp> +#include <armnn/Tensor.hpp> +#include <armnn/TypesUtils.hpp> +#include <backends/WorkloadInfo.hpp> + +#include "test/TensorHelpers.hpp" +#include "QuantizeHelper.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" + +template<typename T> +LayerTestResult<T, 4> SimpleReshapeTestImpl( + armnn::IWorkloadFactory& workloadFactory, + armnn::TensorInfo inputTensorInfo, + armnn::TensorInfo outputTensorInfo, + const std::vector<T>& inputData, + const std::vector<T>& outputExpectedData) +{ + auto input = MakeTensor<T, 4>(inputTensorInfo, inputData); + + LayerTestResult<T, 4> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ReshapeQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReshape(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + +LayerTestResult<float, 4> SimpleReshapeFloat32Test(armnn::IWorkloadFactory& workloadFactory) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 2, 2, 3, 3 }; + unsigned int outputShape[] = { 2, 2, 9, 1 }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + std::vector<float> input = std::vector<float>( + { + 0.0f, 1.0f, 2.0f, + 3.0f, 4.0f, 5.0f, + 6.0f, 7.0f, 8.0f, + + 9.0f, 10.0f, 11.0f, + 12.0f, 13.0f, 14.0f, + 15.0f, 16.0f, 17.0f, + + 18.0f, 19.0f, 20.0f, + 21.0f, 22.0f, 23.0f, + 24.0f, 25.0f, 26.0f, + + 27.0f, 28.0f, 29.0f, + 30.0f, 31.0f, 32.0f, + 33.0f, 34.0f, 35.0f, + }); + + std::vector<float> outputExpected = std::vector<float>( + { + 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, + + 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, + + 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, + + 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, + }); + + return SimpleReshapeTestImpl<float>(workloadFactory, inputTensorInfo, outputTensorInfo, input, outputExpected); +} + +LayerTestResult<float, 4> SimpleFloorTest(armnn::IWorkloadFactory& workloadFactory) +{ + const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo(inputTensorInfo); + + auto input = MakeTensor<float, 4>(inputTensorInfo, + { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, + 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }); + + LayerTestResult<float, 4> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, + { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f, + 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f }); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::FloorQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFloor(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} + +LayerTestResult<uint8_t, 4> SimpleReshapeUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 2, 2, 3, 3 }; + unsigned int outputShape[] = { 2, 2, 9, 1 }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::QuantisedAsymm8); + inputTensorInfo.SetQuantizationScale(1.0f); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::QuantisedAsymm8); + outputTensorInfo.SetQuantizationScale(1.0f); + + std::vector<uint8_t> input = std::vector<uint8_t>( + { + 0, 1, 2, + 3, 4, 5, + 6, 7, 8, + + 9, 10, 11, + 12, 13, 14, + 15, 16, 17, + + 18, 19, 20, + 21, 22, 23, + 24, 25, 26, + + 27, 28, 29, + 30, 31, 32, + 33, 34, 35, + }); + + std::vector<uint8_t> outputExpected = std::vector<uint8_t>( + { + 0, 1, 2, 3, 4, 5, 6, 7, 8, + + 9, 10, 11, 12, 13, 14, 15, 16, 17, + + 18, 19, 20, 21, 22, 23, 24, 25, 26, + + 27, 28, 29, 30, 31, 32, 33, 34, 35, + }); + + return SimpleReshapeTestImpl<uint8_t>(workloadFactory, inputTensorInfo, outputTensorInfo, input, outputExpected); +} diff --git a/src/armnn/backends/test/SoftmaxTestImpl.hpp b/src/armnn/backends/test/SoftmaxTestImpl.hpp new file mode 100644 index 0000000000..5aa74f9618 --- /dev/null +++ b/src/armnn/backends/test/SoftmaxTestImpl.hpp @@ -0,0 +1,150 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/ArmNN.hpp> +#include <armnn/Tensor.hpp> +#include <armnn/TypesUtils.hpp> +#include <backends/WorkloadInfo.hpp> + +#include "test/TensorHelpers.hpp" +#include "QuantizeHelper.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" + +#include <algorithm> + +template<typename T> +LayerTestResult<T, 2> SimpleSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory, float beta) +{ + using std::exp; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { 2, 4 }; + + inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>()); + float qScale = 1.f / 256.f; + int qOffset = 0; + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + + outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>()); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + + LayerTestResult<T, 2> ret(outputTensorInfo); + + // Each row is independently softmax'd + auto input = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>( + QuantizedVector<T>(qScale, 0, { + 0.f, 1.f, 0.f, 0.f, + .5f, 0.f, 0.f, 0.f, + }))); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::SoftmaxQueueDescriptor data; + data.m_Parameters.m_Beta = beta; + + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + + float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta), + exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) }; + float sum0 = x0[0] + x0[1] + x0[2] + x0[3]; + float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta), + exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) }; + float sum1 = x1[0] + x1[1] + x1[2] + x1[3]; + + ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0, + x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1 + }))); + + return ret; +} + +template<typename T> +LayerTestResult<T, 2> CompareSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory, + armnn::IWorkloadFactory& refWorkloadFactory, + float beta) +{ + + const int batchSize = 20; + const int channels = 30; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { batchSize, channels }; + + inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>()); + outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>()); + float qScale = 1.f / 256.f; + int qOffset = 0; + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + + + LayerTestResult<T, 2> ret(outputTensorInfo); + auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::SoftmaxQueueDescriptor data; + data.m_Parameters.m_Beta = beta; + + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo); + + + armnn::SoftmaxQueueDescriptor refData = data; + armnn::WorkloadInfo refInfo = info; + SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get()); + SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info); + std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo); + + outputHandleRef->Allocate(); + inputHandleRef->Allocate(); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0]); + CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]); + + workload->Execute(); + workloadRef->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get()); + CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get()); + + return ret; +}
\ No newline at end of file diff --git a/src/armnn/backends/test/SplitterTestImpl.hpp b/src/armnn/backends/test/SplitterTestImpl.hpp new file mode 100644 index 0000000000..b72046e4bc --- /dev/null +++ b/src/armnn/backends/test/SplitterTestImpl.hpp @@ -0,0 +1,328 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/ArmNN.hpp> +#include <armnn/Tensor.hpp> +#include <backends/WorkloadInfo.hpp> + +#include "test/TensorHelpers.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadFactory.hpp" + +#include "backends/test/QuantizeHelper.hpp" + + +template<typename T> +std::vector<LayerTestResult<T,3>> SplitterTestCommon(armnn::IWorkloadFactory& workloadFactory, + float qScale = 0.0f, + int32_t qOffset = 0) +{ + unsigned int inputWidth = 5; + unsigned int inputHeight = 6; + unsigned int inputChannels = 3; + + unsigned int outputWidth1 = 2; + unsigned int outputHeight1 = 2; + unsigned int outputChannels1 = 3; + + unsigned int outputWidth2 = 2; + unsigned int outputHeight2 = 4; + unsigned int outputChannels2 = 3; + + unsigned int outputWidth3 = 3; + unsigned int outputHeight3 = 6; + unsigned int outputChannels3 = 2; + + unsigned int outputWidth4 = 3; + unsigned int outputHeight4 = 6; + unsigned int outputChannels4 = 1; + + + // Define the tensor descriptors + armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo3({ outputChannels3, outputHeight3, outputWidth3 }, armnn::GetDataType<T>()); + armnn::TensorInfo outputTensorInfo4({ outputChannels4, outputHeight4, outputWidth4 }, armnn::GetDataType<T>()); + // note that output 5 should match output 2 + armnn::TensorInfo outputTensorInfo5({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>()); + + // Set quantization parameters if the requested type is a quantized type. + // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize + if(armnn::IsQuantizedType<T>()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo1.SetQuantizationScale(qScale); + outputTensorInfo1.SetQuantizationOffset(qOffset); + outputTensorInfo2.SetQuantizationScale(qScale); + outputTensorInfo2.SetQuantizationOffset(qOffset); + outputTensorInfo3.SetQuantizationScale(qScale); + outputTensorInfo3.SetQuantizationOffset(qOffset); + outputTensorInfo4.SetQuantizationScale(qScale); + outputTensorInfo4.SetQuantizationOffset(qOffset); + outputTensorInfo5.SetQuantizationScale(qScale); + outputTensorInfo5.SetQuantizationOffset(qOffset); + } + + LayerTestResult<T,3> ret1(outputTensorInfo1); + LayerTestResult<T,3> ret2(outputTensorInfo2); + LayerTestResult<T,3> ret3(outputTensorInfo3); + LayerTestResult<T,3> ret4(outputTensorInfo4); + LayerTestResult<T,3> ret5(outputTensorInfo5); + + auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, + 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, + 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, + 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, + 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, + + 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, + 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, + 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, + 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, + 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, + 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, + + 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, + 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, + 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, + 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, + 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, + 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, + }) + )); + + + ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + 1.0f, 2.0f, + 6.0f, 7.0f, + + 31.0f, 32.0f, + 36.0f, 37.0f, + + 61.0f, 62.0f, + 66.0f, 67.0f, + }) + )); + + ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + 11.0f, 12.0f, + 16.0f, 17.0f, + 21.0f, 22.0f, + 26.0f, 27.0f, + + 41.0f, 42.0f, + 46.0f, 47.0f, + 51.0f, 52.0f, + 56.0f, 57.0f, + + 71.0f, 72.0f, + 76.0f, 77.0f, + 81.0f, 82.0f, + 86.0f, 87.0f, + }) + )); + + ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + 3.0f, 4.0f, 5.0f, + 8.0f, 9.0f, 10.0f, + 13.0f, 14.0f, 15.0f, + 18.0f, 19.0f, 20.0f, + 23.0f, 24.0f, 25.0f, + 28.0f, 29.0f, 30.0f, + + 33.0f, 34.0f, 35.0f, + 38.0f, 39.0f, 40.0f, + 43.0f, 44.0f, 45.0f, + 48.0f, 49.0f, 50.0f, + 53.0f, 54.0f, 55.0f, + 58.0f, 59.0f, 60.0f, + }) + )); + + ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + 63.0f, 64.0f, 65.0f, + 68.0f, 69.0f, 70.0f, + 73.0f, 74.0f, 75.0f, + 78.0f, 79.0f, 80.0f, + 83.0f, 84.0f, 85.0f, + 88.0f, 89.0f, 90.0f, + }) + )); + + + ret5.outputExpected = MakeTensor<T, 3>(outputTensorInfo5, std::vector<T>( + QuantizedVector<T>(qScale, qOffset, { + 11.0f, 12.0f, + 16.0f, 17.0f, + 21.0f, 22.0f, + 26.0f, 27.0f, + + 41.0f, 42.0f, + 46.0f, 47.0f, + 51.0f, 52.0f, + 56.0f, 57.0f, + + 71.0f, 72.0f, + 76.0f, 77.0f, + 81.0f, 82.0f, + 86.0f, 87.0f, + }) + )); + + std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //extent of the window is defined by size of output[0] + armnn::SplitterQueueDescriptor::ViewOrigin window1(wOrigin1); + + std::vector<unsigned int> wOrigin2 = {0, 2, 0}; //extent of the window is defined by size of output[1] + armnn::SplitterQueueDescriptor::ViewOrigin window2(wOrigin2); + + std::vector<unsigned int> wOrigin3 = {0, 0, 2}; //extent of the window is defined by size of output[2] + armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3); + + std::vector<unsigned int> wOrigin4 = {2, 0, 2}; //extent of the window is defined by size of output[3] + armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4); + + bool subTensorsSupported = workloadFactory.SupportsSubTensors(); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + + std::unique_ptr<armnn::ITensorHandle> outputHandle1 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) : + workloadFactory.CreateTensorHandle(outputTensorInfo1); + + std::unique_ptr<armnn::ITensorHandle> outputHandle2 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) : + workloadFactory.CreateTensorHandle(outputTensorInfo2); + + std::unique_ptr<armnn::ITensorHandle> outputHandle3 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo3.GetShape(), wOrigin3.data()) : + workloadFactory.CreateTensorHandle(outputTensorInfo3); + + std::unique_ptr<armnn::ITensorHandle> outputHandle4 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo4.GetShape(), wOrigin4.data()) : + workloadFactory.CreateTensorHandle(outputTensorInfo4); + + std::unique_ptr<armnn::ITensorHandle> outputHandle5 = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo5.GetShape(), wOrigin2.data()) : + workloadFactory.CreateTensorHandle(outputTensorInfo5); + + armnn::SplitterQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get()); + AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get()); + AddOutputToWorkload(data, info, outputTensorInfo3, outputHandle3.get()); + AddOutputToWorkload(data, info, outputTensorInfo4, outputHandle4.get()); + AddOutputToWorkload(data, info, outputTensorInfo5, outputHandle5.get()); + + data.m_ViewOrigins.push_back(window1); + data.m_ViewOrigins.push_back(window2); + data.m_ViewOrigins.push_back(window3); + data.m_ViewOrigins.push_back(window4); + //add window2 again (to have an overlapping split) + data.m_ViewOrigins.push_back(window2); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info); + + inputHandle->Allocate(); + outputHandle1->Allocate(); + outputHandle2->Allocate(); + outputHandle3->Allocate(); + outputHandle4->Allocate(); + outputHandle5->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get()); + CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get()); + CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get()); + CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get()); + CopyDataFromITensorHandle(&ret5.output[0][0][0], outputHandle5.get()); + + std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4, ret5}; + + return ret; +} + + +template <typename T> +LayerTestResult<T, 3> CopyViaSplitterTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset) +{ + const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType<T>()); + auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset, + { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, + 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, + 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, + 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, + 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, + + 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, + 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, + 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, + 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, + 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, + 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, + + 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, + 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, + 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, + 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, + 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, + 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, + })); + + std::vector<unsigned int> origin = { 0, 0, 0 }; + armnn::SplitterQueueDescriptor::ViewOrigin window(origin); + + const bool subTensorsSupported = workloadFactory.SupportsSubTensors(); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo); + + std::unique_ptr<armnn::ITensorHandle> outputHandle = + subTensorsSupported ? + workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) : + workloadFactory.CreateTensorHandle(tensorInfo); + + armnn::SplitterQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, tensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, tensorInfo, outputHandle.get()); + + data.m_ViewOrigins.push_back(window); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]); + + workload->Execute(); + + LayerTestResult<T, 3> ret(tensorInfo); + CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get()); + ret.outputExpected = input; + + return ret; +} diff --git a/src/armnn/backends/test/TensorCopyUtils.cpp b/src/armnn/backends/test/TensorCopyUtils.cpp new file mode 100644 index 0000000000..e15c12a76f --- /dev/null +++ b/src/armnn/backends/test/TensorCopyUtils.cpp @@ -0,0 +1,152 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include <algorithm> +#include <cstring> +#include <boost/cast.hpp> + +#include "TensorCopyUtils.hpp" + +#ifdef ARMCOMPUTECL_ENABLED +#include "backends/ClTensorHandle.hpp" +#endif + +#if ARMCOMPUTENEON_ENABLED +#include "backends/NeonTensorHandle.hpp" +#endif + +#if ARMCOMPUTECLENABLED || ARMCOMPUTENEON_ENABLED +#include "backends/ArmComputeTensorUtils.hpp" +#endif + +#include "backends/CpuTensorHandle.hpp" + +void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem) +{ + switch (tensorHandle->GetType()) + { + case armnn::ITensorHandle::Cpu: + { + auto handle = boost::polymorphic_downcast<armnn::ScopedCpuTensorHandle*>(tensorHandle); + memcpy(handle->GetTensor<void>(), mem, handle->GetTensorInfo().GetNumBytes()); + break; + } +#ifdef ARMCOMPUTECL_ENABLED + case armnn::ITensorHandle::CL: + { + using armnn::armcomputetensorutils::CopyArmComputeITensorData; + auto handle = boost::polymorphic_downcast<armnn::IClTensorHandle*>(tensorHandle); + handle->Map(true); + switch(handle->GetDataType()) + { + case arm_compute::DataType::F32: + CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor()); + break; + case arm_compute::DataType::QASYMM8: + CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor()); + break; + default: + { + throw armnn::UnimplementedException(); + } + } + handle->UnMap(); + break; + } +#endif +#if ARMCOMPUTENEON_ENABLED + case armnn::ITensorHandle::Neon: + { + using armnn::armcomputetensorutils::CopyArmComputeITensorData; + auto handle = boost::polymorphic_downcast<armnn::INeonTensorHandle*>(tensorHandle); + switch (handle->GetDataType()) + { + case arm_compute::DataType::F32: + CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor()); + break; + case arm_compute::DataType::QASYMM8: + CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor()); + break; + default: + { + throw armnn::UnimplementedException(); + } + } + break; + } +#endif + default: + { + throw armnn::UnimplementedException(); + } + } +} + +void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle) +{ + switch (tensorHandle->GetType()) + { + case armnn::ITensorHandle::Cpu: + { + auto handle = boost::polymorphic_downcast<const armnn::ScopedCpuTensorHandle*>(tensorHandle); + memcpy(mem, handle->GetTensor<void>(), handle->GetTensorInfo().GetNumBytes()); + break; + } +#ifdef ARMCOMPUTECL_ENABLED + case armnn::ITensorHandle::CL: + { + using armnn::armcomputetensorutils::CopyArmComputeITensorData; + auto handle = boost::polymorphic_downcast<const armnn::IClTensorHandle*>(tensorHandle); + const_cast<armnn::IClTensorHandle*>(handle)->Map(true); + switch(handle->GetDataType()) + { + case arm_compute::DataType::F32: + CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem)); + break; + case arm_compute::DataType::QASYMM8: + CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem)); + break; + default: + { + throw armnn::UnimplementedException(); + } + } + const_cast<armnn::IClTensorHandle*>(handle)->UnMap(); + break; + } +#endif +#if ARMCOMPUTENEON_ENABLED + case armnn::ITensorHandle::Neon: + { + using armnn::armcomputetensorutils::CopyArmComputeITensorData; + auto handle = boost::polymorphic_downcast<const armnn::INeonTensorHandle*>(tensorHandle); + switch (handle->GetDataType()) + { + case arm_compute::DataType::F32: + CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem)); + break; + case arm_compute::DataType::QASYMM8: + CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem)); + break; + default: + { + throw armnn::UnimplementedException(); + } + } + break; + } +#endif + default: + { + throw armnn::UnimplementedException(); + } + } +} + +void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem) +{ + tensorHandle->Allocate(); + CopyDataToITensorHandle(tensorHandle, mem); +} diff --git a/src/armnn/backends/test/TensorCopyUtils.hpp b/src/armnn/backends/test/TensorCopyUtils.hpp new file mode 100644 index 0000000000..360eec61df --- /dev/null +++ b/src/armnn/backends/test/TensorCopyUtils.hpp @@ -0,0 +1,14 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "armnn/Tensor.hpp" +#include "backends/ITensorHandle.hpp" + +void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem); + +void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle); + +void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem);
\ No newline at end of file diff --git a/src/armnn/backends/test/WorkloadDataValidation.cpp b/src/armnn/backends/test/WorkloadDataValidation.cpp new file mode 100644 index 0000000000..c3a9d40116 --- /dev/null +++ b/src/armnn/backends/test/WorkloadDataValidation.cpp @@ -0,0 +1,450 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> +#include <backends/CpuTensorHandle.hpp> +#include <backends/Workload.hpp> +#include <backends/RefWorkloads.hpp> +#include <backends/RefWorkloadFactory.hpp> + +#include <armnn/Exceptions.hpp> + +#include "WorkloadTestUtils.hpp" + +using namespace armnn; + +BOOST_AUTO_TEST_SUITE(WorkloadInfoValidation) + + + +BOOST_AUTO_TEST_CASE(QueueDescriptor_Validate_WrongNumOfInputsOutputs) +{ + InputQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + //invalid argument exception is expected, because no inputs and no outputs were defined + BOOST_CHECK_THROW(RefWorkloadFactory().CreateInput(invalidData, invalidInfo), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_CASE(RefPooling2dFloat32Workload_Validate_WrongDimTensor) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = {2, 3, 4}; // <- invalid - input tensor has to be 4D + unsigned int outputShape[] = {2, 3, 4, 5}; + + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::DataType::Float32); + + Pooling2dQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr); + + // invalid argument exception is expected, input tensor has to be 4D + BOOST_CHECK_THROW(RefPooling2dFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_CASE(SoftmaxQueueDescriptor_Validate_WrongInputHeight) +{ + unsigned int inputHeight = 1; + unsigned int inputWidth = 1; + unsigned int inputChannels = 4; + unsigned int inputNum = 2; + + unsigned int outputChannels = inputChannels; + unsigned int outputHeight = inputHeight + 1; //makes data invalid - Softmax expects height and width to be 1 + unsigned int outputWidth = inputWidth; + unsigned int outputNum = inputNum; + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth }; + unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + SoftmaxQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr); + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + + //invalid argument exception is expected, because height != 1 + BOOST_CHECK_THROW(RefSoftmaxFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_CASE(FullyConnectedQueueDescriptor_Validate_RequiredDataMissing) +{ + unsigned int inputWidth = 1; + unsigned int inputHeight = 1; + unsigned int inputChannels = 5; + unsigned int inputNum = 2; + + unsigned int outputWidth = 1; + unsigned int outputHeight = 1; + unsigned int outputChannels = 3; + unsigned int outputNum = 2; + + // Define the tensor descriptors + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + armnn::TensorInfo weightsDesc; + armnn::TensorInfo biasesDesc; + + unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth }; + unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth }; + unsigned int weightsShape[] = { 1, 1, inputChannels, outputChannels }; + unsigned int biasShape[] = { 1, outputChannels, outputHeight, outputWidth }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + weightsDesc = armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32); + biasesDesc = armnn::TensorInfo(4, biasShape, armnn::DataType::Float32); + + FullyConnectedQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + ScopedCpuTensorHandle weightTensor(weightsDesc); + ScopedCpuTensorHandle biasTensor(biasesDesc); + + AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr); + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + invalidData.m_Weight = &weightTensor; + invalidData.m_Bias = &biasTensor; + invalidData.m_Parameters.m_BiasEnabled = true; + invalidData.m_Parameters.m_TransposeWeightMatrix = false; + + + //invalid argument exception is expected, because not all required fields have been provided + //in particular inputsData[0], outputsData[0] and weightsData can not be null + BOOST_CHECK_THROW(RefFullyConnectedFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); +} + + +BOOST_AUTO_TEST_CASE(NormalizationQueueDescriptor_Validate_WrongInputHeight) +{ + constexpr unsigned int inputNum = 5; + constexpr unsigned int inputHeight = 32; + constexpr unsigned int inputWidth = 24; + constexpr unsigned int inputChannels = 3; + + constexpr unsigned int outputNum = inputNum; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputHeight = inputHeight + 1; //makes data invalid - normalization requires + //input and output to have the same dimensions + constexpr unsigned int outputWidth = inputWidth; + + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth}; + unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth}; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + + armnn::NormalizationAlgorithmMethod normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness; + armnn::NormalizationAlgorithmChannel normChannel = armnn::NormalizationAlgorithmChannel::Across; + float alpha = 1.f; + float beta = 1.f; + float kappa = 1.f; + uint32_t normSize = 5; + + NormalizationQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr); + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + invalidData.m_Parameters.m_NormChannelType = normChannel; + invalidData.m_Parameters.m_NormMethodType = normMethod; + invalidData.m_Parameters.m_NormSize = normSize; + invalidData.m_Parameters.m_Alpha = alpha; + invalidData.m_Parameters.m_Beta = beta; + invalidData.m_Parameters.m_K = kappa; + + //invalid argument exception is expected, because input height != output height + BOOST_CHECK_THROW(RefNormalizationFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_CASE(SplitterQueueDescriptor_Validate_WrongWindow) +{ + constexpr unsigned int inputNum = 1; + constexpr unsigned int inputHeight = 32; + constexpr unsigned int inputWidth = 24; + constexpr unsigned int inputChannels = 3; + + constexpr unsigned int outputNum = inputNum; + constexpr unsigned int outputChannels = inputChannels; + constexpr unsigned int outputHeight = 18; + constexpr unsigned int outputWidth = inputWidth; + + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth}; + unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth}; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + SplitterQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr); + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + + // invalid since it has only 3 dimensions while the input tensor is 4d + std::vector<unsigned int> wOrigin = {0, 0, 0}; + armnn::SplitterQueueDescriptor::ViewOrigin window(wOrigin); + invalidData.m_ViewOrigins.push_back(window); + + BOOST_TEST_INFO("Invalid argument exception is expected, because split window dimensionality does not " + "match input."); + BOOST_CHECK_THROW(RefSplitterFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); + + // invalid since window extends past the boundary of input tensor + std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0}; + armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3); + invalidData.m_ViewOrigins[0] = window3; + BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ outputHeight > inputHeight"); + BOOST_CHECK_THROW(RefSplitterFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); + + + std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0}; + armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4); + invalidData.m_ViewOrigins[0] = window4; + + std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2}; + armnn::SplitterQueueDescriptor::ViewOrigin window5(wOrigin4); + invalidData.m_ViewOrigins.push_back(window5); + + BOOST_TEST_INFO("Invalid exception due to number of split windows not matching number of outputs."); + BOOST_CHECK_THROW(RefSplitterFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); +} + + +BOOST_AUTO_TEST_CASE(MergerQueueDescriptor_Validate_WrongWindow) +{ + constexpr unsigned int inputNum = 1; + constexpr unsigned int inputChannels = 3; + constexpr unsigned int inputHeight = 32; + constexpr unsigned int inputWidth = 24; + + constexpr unsigned int outputNum = 1; + constexpr unsigned int outputChannels = 3; + constexpr unsigned int outputHeight = 32; + constexpr unsigned int outputWidth = 24; + + + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth}; + unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth}; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + MergerQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr); + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + + // invalid since it has only 3 dimensions while the input tensor is 4d + std::vector<unsigned int> wOrigin = {0, 0, 0}; + armnn::MergerQueueDescriptor::ViewOrigin window(wOrigin); + invalidData.m_ViewOrigins.push_back(window); + + BOOST_TEST_INFO("Invalid argument exception is expected, because merge window dimensionality does not " + "match input."); + BOOST_CHECK_THROW(RefMergerFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); + + // invalid since window extends past the boundary of output tensor + std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0}; + armnn::MergerQueueDescriptor::ViewOrigin window3(wOrigin3); + invalidData.m_ViewOrigins[0] = window3; + BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight"); + BOOST_CHECK_THROW(RefMergerFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); + + + std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0}; + armnn::MergerQueueDescriptor::ViewOrigin window4(wOrigin4); + invalidData.m_ViewOrigins[0] = window4; + + std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2}; + armnn::MergerQueueDescriptor::ViewOrigin window5(wOrigin4); + invalidData.m_ViewOrigins.push_back(window5); + + BOOST_TEST_INFO("Invalid exception due to number of merge windows not matching number of inputs."); + BOOST_CHECK_THROW(RefMergerFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_CASE(AdditionQueueDescriptor_Validate_InputNumbers) +{ + armnn::TensorInfo input1TensorInfo; + armnn::TensorInfo input2TensorInfo; + armnn::TensorInfo input3TensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int shape[] = {1, 1, 1, 1}; + + input1TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + input2TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + input3TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32); + + AdditionQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr); + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + + // too few inputs + BOOST_CHECK_THROW(RefAdditionFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); + + AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr); + + // correct + BOOST_CHECK_NO_THROW(RefAdditionFloat32Workload(invalidData, invalidInfo)); + + AddInputToWorkload(invalidData, invalidInfo, input3TensorInfo, nullptr); + + // too many inputs + BOOST_CHECK_THROW(RefAdditionFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_CASE(AdditionQueueDescriptor_Validate_InputShapes) +{ + armnn::TensorInfo input1TensorInfo; + armnn::TensorInfo input2TensorInfo; + armnn::TensorInfo outputTensorInfo; + + unsigned int shape1[] = {1, 1, 2, 1}; + unsigned int shape2[] = {1, 1, 3, 2}; + + // Incompatible shapes even with broadcasting + { + input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32); + input2TensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32); + + AdditionQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr); + AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr); + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + + BOOST_CHECK_THROW(RefAdditionFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); + } + + // Output size not compatible with input sizes + { + input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32); + input2TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32); + + AdditionQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr); + AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr); + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + + // output differs + BOOST_CHECK_THROW(RefAdditionFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); + } +} + +BOOST_AUTO_TEST_CASE(MultiplicationQueueDescriptor_Validate_InputTensorDimensionMismatch) +{ + armnn::TensorInfo input0TensorInfo; + armnn::TensorInfo input1TensorInfo; + armnn::TensorInfo outputTensorInfo; + + constexpr unsigned int input0Shape[] = { 2, 2, 4, 4 }; + constexpr std::size_t dimensionCount = std::extent<decltype(input0Shape)>::value; + + // Check dimension consistency for input tensors + for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex) + { + unsigned int input1Shape[dimensionCount]; + for (unsigned int i = 0; i < dimensionCount; ++i) + { + input1Shape[i] = input0Shape[i]; + } + + ++input1Shape[dimIndex]; + + input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32); + input1TensorInfo = armnn::TensorInfo(dimensionCount, input1Shape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32); + + MultiplicationQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr); + AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr); + + BOOST_CHECK_THROW(RefMultiplicationFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); + } + + // Check dimension consistency for input and output tensors + for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex) + { + unsigned int outputShape[dimensionCount]; + for (unsigned int i = 0; i < dimensionCount; ++i) + { + outputShape[i] = input0Shape[i]; + } + + ++outputShape[dimIndex]; + + input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32); + input1TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(dimensionCount, outputShape, armnn::DataType::Float32); + + MultiplicationQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr); + AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr); + + BOOST_CHECK_THROW(RefMultiplicationFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); + } +} + +BOOST_AUTO_TEST_CASE(ReshapeQueueDescriptor_Validate_MismatchingNumElements) +{ + armnn::TensorInfo inputTensorInfo; + armnn::TensorInfo outputTensorInfo; + + // The input and output shapes should have the same number of elements, but these don't + unsigned int inputShape[] = { 1, 1, 2, 3 }; + unsigned int outputShape[] = { 1, 1, 1, 2 }; + + inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32); + outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32); + + ReshapeQueueDescriptor invalidData; + WorkloadInfo invalidInfo; + + AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr); + AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr); + + // InvalidArgumentException is expected, because the number of elements don't match + BOOST_CHECK_THROW(RefReshapeFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/backends/test/WorkloadTestUtils.hpp b/src/armnn/backends/test/WorkloadTestUtils.hpp new file mode 100644 index 0000000000..bac958f57c --- /dev/null +++ b/src/armnn/backends/test/WorkloadTestUtils.hpp @@ -0,0 +1,55 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/Tensor.hpp> +#include <backends/WorkloadInfo.hpp> + +namespace armnn +{ +class ITensorHandle; +} + +template <typename QueueDescriptor> +void AddInputToWorkload(QueueDescriptor& descriptor, + armnn::WorkloadInfo& info, + const armnn::TensorInfo& tensorInfo, + armnn::ITensorHandle* tensorHandle) +{ + descriptor.m_Inputs.push_back(tensorHandle); + info.m_InputTensorInfos.push_back(tensorInfo); +} + +template <typename QueueDescriptor> +void AddOutputToWorkload(QueueDescriptor& descriptor, + armnn::WorkloadInfo& info, + const armnn::TensorInfo& tensorInfo, + armnn::ITensorHandle* tensorHandle) +{ + descriptor.m_Outputs.push_back(tensorHandle); + info.m_OutputTensorInfos.push_back(tensorInfo); +} + +template <typename QueueDescriptor> +void SetWorkloadInput(QueueDescriptor& descriptor, + armnn::WorkloadInfo& info, + unsigned int index, + const armnn::TensorInfo& tensorInfo, + armnn::ITensorHandle* tensorHandle) +{ + descriptor.m_Inputs[index] = tensorHandle; + info.m_InputTensorInfos[index] = tensorInfo; +} + +template <typename QueueDescriptor> +void SetWorkloadOutput(QueueDescriptor& descriptor, + armnn::WorkloadInfo& info, + unsigned int index, + const armnn::TensorInfo& tensorInfo, + armnn::ITensorHandle* tensorHandle) +{ + descriptor.m_Outputs[index] = tensorHandle; + info.m_OutputTensorInfos[index] = tensorInfo; +}
\ No newline at end of file diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp new file mode 100644 index 0000000000..70f78d44af --- /dev/null +++ b/src/armnn/optimizations/All.hpp @@ -0,0 +1,11 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "OptimizeInversePermutes.hpp" +#include "PermuteAsReshape.hpp" +#include "OptimizeConsecutiveReshapes.hpp" +#include "SquashEqualSiblings.hpp" +#include "MovePermuteUp.hpp" diff --git a/src/armnn/optimizations/MovePermuteUp.hpp b/src/armnn/optimizations/MovePermuteUp.hpp new file mode 100644 index 0000000000..8c59986762 --- /dev/null +++ b/src/armnn/optimizations/MovePermuteUp.hpp @@ -0,0 +1,82 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Optimization.hpp" +#include "Permute.hpp" + +namespace armnn +{ +namespace optimizations +{ +class MovePermuteUpImpl +{ +public: + /// Run for every connection between a base Layer (any) and a child PermuteLayer. If the type + /// of the base layer allows it, it moves the permutation to the inputs of the base layer. + /// I.e., adds equivalent permutations before the inputs of the base layer and moves the + /// connections in the output of the child permute layer to the output of the base layer. + void Run(Graph& graph, InputSlot& connection) const + { + OutputSlot& baseOutput = *connection.GetConnectedOutputSlot(); + + if (baseOutput.GetNumConnections() == 1U) + { + Layer& base = baseOutput.GetOwningLayer(); + + if (CanMovePermuteToInputs(base)) + { + auto permute = boost::polymorphic_downcast<PermuteLayer*>(&connection.GetOwningLayer()); + const PermutationVector& perm = permute->GetPermutation(); + + // Insert an equivalent permute before every input of the base layer. + for (auto baseInput = base.BeginInputSlots(); baseInput != base.EndInputSlots(); ++baseInput) + { + // Insert new permute layer. + const std::string name = std::string("moved_up-") + permute->GetName(); + PermuteLayer& permLayer = *graph.InsertNewLayer<PermuteLayer>(*baseInput, perm, name.c_str()); + + // Set output tensor info for the new layer. + OutputSlot& parentOutput = *permLayer.GetInputSlot(0).GetConnectedOutputSlot(); + const TensorInfo permOutInfo = armnnUtils::Permuted(parentOutput.GetTensorInfo(), perm); + permLayer.GetOutputHandler().SetTensorInfo(permOutInfo); + } + + // Set permuted output tensor info + const TensorInfo& childOutInfo = permute->GetOutputHandler().GetTensorInfo(); + base.GetOutputHandler().SetTensorInfo(childOutInfo); + + // Bypass permute. It will be removed as it's left unconnected. + permute->GetOutputSlot().MoveAllConnections(base.GetOutputSlot()); + } + } + } + +protected: + MovePermuteUpImpl() = default; + ~MovePermuteUpImpl() = default; + +private: + static bool CanMovePermuteToInputs(const Layer& base) + { + switch (base.GetType()) + { + case LayerType::Activation: + case LayerType::Addition: + case LayerType::FakeQuantization: + case LayerType::Floor: + case LayerType::MemCopy: + case LayerType::Multiplication: + return true; + default: + return false; + } + } +}; + +using MovePermuteUp = OptimizeForConnection<Layer, PermuteLayer, MovePermuteUpImpl>; + +} // namespace optimizations +} // namespace armnn diff --git a/src/armnn/optimizations/Optimization.hpp b/src/armnn/optimizations/Optimization.hpp new file mode 100644 index 0000000000..89e03ff88d --- /dev/null +++ b/src/armnn/optimizations/Optimization.hpp @@ -0,0 +1,123 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Graph.hpp" +#include "LayersFwd.hpp" + +namespace armnn +{ + +class Optimization +{ +public: + virtual void Run(Graph& graph, Graph::Iterator& pos) const = 0; +protected: + ~Optimization() = default; +}; + +// Wrappers +// The implementation of the following wrappers make use of the CRTP C++ idiom +// (curiously recurring template pattern). +// For details, see https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern + +/// Wrapper Optimization base class that calls Wrapped::Run for every layer of type BaseType. +/// - Wrapped class mustn't remove the base layer. +/// - Base layer is removed if left unconnected after applying the wrapped optimization. +template <typename BaseType, typename Wrapped> +class OptimizeForTypeImpl : public armnn::Optimization, public Wrapped +{ +public: + using Wrapped::Wrapped; + + void Run(Graph& graph, Graph::Iterator& pos) const override + { + Layer* const base = *pos; + + if (base->GetType() == LayerEnumOf<BaseType>()) + { + Wrapped::Run(graph, *boost::polymorphic_downcast<BaseType*>(base)); + } + } + +protected: + ~OptimizeForTypeImpl() = default; +}; + +/// Specialization that calls Wrapped::Run for any layer type +template <typename Wrapped> +class OptimizeForTypeImpl<Layer, Wrapped> : public armnn::Optimization, public Wrapped +{ +public: + using Wrapped::Wrapped; + + void Run(Graph& graph, Graph::Iterator& pos) const override + { + Wrapped::Run(graph, **pos); + } + +protected: + ~OptimizeForTypeImpl() = default; +}; + +template <typename BaseType, typename Wrapped> +class OptimizeForType final : public OptimizeForTypeImpl<BaseType, Wrapped> +{ +public: + using OptimizeForTypeImpl<BaseType, Wrapped>::OptimizeForTypeImpl; +}; + +/// Wrapper Optimization class that calls Wrapped::Run for every connection BaseType -> ChildType. +/// - Wrapped class mustn't remove the base layer. +/// - Wrapped class mustn't affect existing connections in the same output. It might add new ones. +/// - Base and children layers are removed if left unconnected after applying the wrapped optimization. +template <typename BaseType, typename ChildType, typename Wrapped> +class OptimizeForConnectionImpl : public Wrapped +{ +public: + using Wrapped::Wrapped; + + void Run(Graph& graph, BaseType& base) const + { + for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output) + { + for (auto&& childInput : output->GetConnections()) + { + if (childInput->GetOwningLayer().GetType() == LayerEnumOf<ChildType>()) + { + Wrapped::Run(graph, *childInput); + } + } + + // Remove unconnected children + for (unsigned int i = 0; i < output->GetNumConnections();) + { + Layer* child = &output->GetConnection(i)->GetOwningLayer(); + + if (child->IsOutputUnconnected()) + { + graph.EraseLayer(child); + } + else + { + ++i; + } + } + } + } + +protected: + ~OptimizeForConnectionImpl() = default; +}; + +template <typename BaseType, typename ChildType, typename Wrapped> +class OptimizeForConnection final + : public OptimizeForTypeImpl<BaseType, OptimizeForConnectionImpl<BaseType, ChildType, Wrapped>> +{ +public: + using OptimizeForTypeImpl<BaseType, OptimizeForConnectionImpl<BaseType, ChildType, Wrapped>>::OptimizeForTypeImpl; +}; + +} // namespace armnn diff --git a/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp new file mode 100644 index 0000000000..deb49c6884 --- /dev/null +++ b/src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp @@ -0,0 +1,60 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Optimization.hpp" + +namespace armnn +{ +namespace optimizations +{ + +class OptimizeConsecutiveReshapesImpl +{ +public: + /// Run for every connection between a base RashapeLayer and a child ReshapeLayer. + /// Inserts an equivalent ReshapeLayer that bypasses both for that connection. + void Run(Graph& graph, InputSlot& connection) const + { + auto& base = connection.GetConnectedOutputSlot()->GetOwningLayer(); + auto& child = connection.GetOwningLayer(); + + BOOST_ASSERT(base.GetType() == LayerType::Reshape); + BOOST_ASSERT(child.GetType() == LayerType::Reshape); + + OutputSlot* parentOut = base.GetInputSlot(0).GetConnectedOutputSlot(); + + const TensorInfo& inInfo = parentOut->GetTensorInfo(); + const TensorInfo& outInfo = child.GetOutputHandler().GetTensorInfo(); + + if (inInfo.GetShape() != outInfo.GetShape()) + { + // Insert equivalent reshape before base layer + const std::string name = std::string("merged-") + base.GetName() + std::string("-with-") + child.GetName(); + const ReshapeDescriptor descriptor{outInfo.GetShape()}; + auto& newReshape = *graph.InsertNewLayer<ReshapeLayer>(base.GetInputSlot(0), descriptor, name.c_str()); + // Set tensor info for new layer + newReshape.GetOutputHandler().SetTensorInfo(outInfo); + // Reconnect base with original parent + newReshape.GetOutputSlot().MoveAllConnections(*parentOut); + // Parent is now the new layer + parentOut = &newReshape.GetOutputSlot(); + } + + // Move connections in child output to parent layer. + // Child layer will be removed as it's left unconnected. + // Base layer will be removed if left unconnected. + child.GetOutputSlot().MoveAllConnections(*parentOut); + } + +protected: + OptimizeConsecutiveReshapesImpl() = default; + ~OptimizeConsecutiveReshapesImpl() = default; +}; + +using OptimizeConsecutiveReshapes = OptimizeForConnection<ReshapeLayer, ReshapeLayer, OptimizeConsecutiveReshapesImpl>; + +} // namespace optimizations +} // namespace armnn diff --git a/src/armnn/optimizations/OptimizeInversePermutes.hpp b/src/armnn/optimizations/OptimizeInversePermutes.hpp new file mode 100644 index 0000000000..63820cb7d3 --- /dev/null +++ b/src/armnn/optimizations/OptimizeInversePermutes.hpp @@ -0,0 +1,40 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Optimization.hpp" + +namespace armnn +{ +namespace optimizations +{ + +class OptimizeInversePermutesImpl +{ +public: + /// Run for every connection between a base PermuteLayer and a child PermuteLayer. + /// Bypasses both layers for that connection if one is the inverse of the other. + void Run(Graph& graph, InputSlot& connection) const + { + Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer(); + auto child = boost::polymorphic_downcast<PermuteLayer*>(&connection.GetOwningLayer()); + + if (child->IsInverse(*boost::polymorphic_downcast<PermuteLayer*>(&base))) + { + // Bypass both layers. Child will be removed as it's left unconnected. + // Base layer will be removed if left unconnected. + child->GetOutputSlot().MoveAllConnections(*base.GetInputSlot(0).GetConnectedOutputSlot()); + } + } + +protected: + OptimizeInversePermutesImpl() = default; + ~OptimizeInversePermutesImpl() = default; +}; + +using OptimizeInversePermutes = OptimizeForConnection<PermuteLayer, PermuteLayer, OptimizeInversePermutesImpl>; + +} // namespace optimizations +} // namespace armnn diff --git a/src/armnn/optimizations/PermuteAsReshape.hpp b/src/armnn/optimizations/PermuteAsReshape.hpp new file mode 100644 index 0000000000..a8e4c2df5e --- /dev/null +++ b/src/armnn/optimizations/PermuteAsReshape.hpp @@ -0,0 +1,70 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Optimization.hpp" + +namespace armnn +{ +namespace optimizations +{ + +class PermuteAsReshapeImpl +{ +public: + /// Run for every PermuteLayer. Replaces it with a ReshapeLayer if they are equivalent. + void Run(Graph& graph, PermuteLayer& permute) const + { + if (IsReshape(permute)) + { + const TensorInfo& outInfo = permute.GetOutputHandler().GetTensorInfo(); + + const std::string name = std::string("as_reshape-") + permute.GetName(); + const ReshapeDescriptor descriptor{outInfo.GetShape()}; + // Insert so layers don't need to be re-sorted + auto reshape = graph.InsertNewLayer<ReshapeLayer>(permute.GetInputSlot(0), descriptor, name.c_str()); + reshape->GetOutputHandler().SetTensorInfo(outInfo); + + // Bypass permute. It will be deleted since it's left unconnected. + permute.GetOutputSlot().MoveAllConnections(reshape->GetOutputSlot()); + } + } + +protected: + PermuteAsReshapeImpl() = default; + ~PermuteAsReshapeImpl() = default; + +private: + static bool IsReshape(const PermuteLayer& layer) + { + const TensorShape& outShape = layer.GetOutputHandler().GetTensorInfo().GetShape(); + const PermutationVector& permutation = layer.GetPermutation(); + + const unsigned int numDimensions = permutation.GetSize(); + + unsigned int lastGtOne = 0; + while ((lastGtOne < numDimensions) && (outShape[(permutation[lastGtOne])] == 1U)) + { + ++lastGtOne; + } + + bool isReshape = true; + for (unsigned int i = lastGtOne + 1U; isReshape && (i < numDimensions); ++i) + { + if (outShape[permutation[i]] > 1U) + { + isReshape = permutation[lastGtOne] < permutation[i]; + lastGtOne = i; + } + } + + return isReshape; + } +}; + +using PermuteAsReshape = OptimizeForType<PermuteLayer, PermuteAsReshapeImpl>; + +} // namespace optimizations +} // namespace armnn diff --git a/src/armnn/optimizations/SquashEqualSiblings.hpp b/src/armnn/optimizations/SquashEqualSiblings.hpp new file mode 100644 index 0000000000..2dfe91fdcc --- /dev/null +++ b/src/armnn/optimizations/SquashEqualSiblings.hpp @@ -0,0 +1,57 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Optimization.hpp" + +namespace armnn +{ +namespace optimizations +{ + +template <typename Comparable> +class SquashEqualSiblingsImpl +{ +public: + /// Run for every connection between a base Layer (any) and a child ComparableLayer. + /// For all siblings of the child layer that compare equal to it, bypasses and removes + /// them. I.e., moves the connections in the outputs of the siblings to the outputs of + /// the child layer, so the siblings are left unconnected (and later removed). + void Run(Graph& graph, InputSlot& connection) const + { + auto& child = connection.GetOwningLayer(); + + if (!child.IsOutputUnconnected()) + { + OutputSlot& baseOutput = *connection.GetConnectedOutputSlot(); + auto& comparableChild = *boost::polymorphic_downcast<Comparable*>(&child); + + for (auto&& it : baseOutput.GetConnections()) + { + Layer& sibling = it->GetOwningLayer(); + if ((&sibling != &child) && comparableChild.IsEqual(sibling)) + { + // Bypass sibling. It will be removed as it's left unconnected. + auto siblingOut = sibling.BeginOutputSlots(); + for (auto childOut = child.BeginOutputSlots(); childOut != child.EndOutputSlots(); ++childOut) + { + siblingOut->MoveAllConnections(*childOut); + ++siblingOut; + } + } + } + } + } + +protected: + SquashEqualSiblingsImpl() = default; + ~SquashEqualSiblingsImpl() = default; +}; + +using SquashEqualPermuteSiblings = OptimizeForConnection<Layer, PermuteLayer, SquashEqualSiblingsImpl<PermuteLayer>>; +using SquashEqualReshapeSiblings = OptimizeForConnection<Layer, ReshapeLayer, SquashEqualSiblingsImpl<ReshapeLayer>>; + +} // namespace optimizations +} // namespace armnn diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp new file mode 100644 index 0000000000..d8aa208eb7 --- /dev/null +++ b/src/armnn/test/CreateWorkload.hpp @@ -0,0 +1,814 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <boost/test/unit_test.hpp> + +#include <boost/cast.hpp> + +#include "backends/WorkloadData.hpp" +#include "Layers.hpp" +#include "Graph.hpp" + +#include <utility> + +#include "backends/CpuTensorHandle.hpp" + +using namespace armnn; + +namespace +{ + +using namespace std; + +// Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type +template<typename Workload> +std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer, Graph& graph, const IWorkloadFactory& factory) +{ + std::unique_ptr<IWorkload> workload = layer.CreateWorkload(graph, factory); + BOOST_TEST(workload.get() == boost::polymorphic_downcast<Workload*>(workload.get()), + "Cannot convert to derived class"); + std::string reasonIfUnsupported; + BOOST_TEST(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported)); + return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release())); +} + +// connects two layers +void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0) +{ + from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex)); + from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo); +} + +// helper function to create tensor handlers for workloads, assuming they all use the same factory +void CreateTensorHandles(armnn::Graph& graph, armnn::IWorkloadFactory& factory) +{ + for (auto&& layer : graph.TopologicalSort()) + { + layer->CreateTensorHandles(graph, factory); + } +} + +///////////////////////////////////////////////////////////////////////////////////////////// +// The following functions are called by backends/test/CreateWorkload*.cpp +// They build very simple graphs, and then create a workload. +// Some checks are performed on the workload to ensure parameters have been passed correctly. +// They return the created workloads so that backend-specific checks can be performed. +///////////////////////////////////////////////////////////////////////////////////////////// + +template <typename ActivationWorkload> +std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + ActivationDescriptor layerDesc; + layerDesc.m_Function = ActivationFunction::Abs; + layerDesc.m_A = 3.5f; + layerDesc.m_B = -10.0f; + + ActivationLayer* const layer = graph.AddLayer<ActivationLayer>(layerDesc, "layer"); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + armnn::TensorInfo tensorInfo({1, 1}, ActivationWorkload::ms_DataType); + + Connect(input, layer, tensorInfo); + Connect(layer, output, tensorInfo); + + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<ActivationWorkload>(*layer, graph, factory); + + ActivationQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_A == 3.5f); + BOOST_TEST(queueDescriptor.m_Parameters.m_B == -10.0f); + BOOST_TEST((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs)); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename AdditionWorkload> +std::unique_ptr<AdditionWorkload> CreateAdditionWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + Layer* const layer = graph.AddLayer<AdditionLayer>("layer"); + + // create extra layers + Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1"); + Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + armnn::TensorInfo tensorInfo({2, 3}, AdditionWorkload::ms_DataType); + Connect(input1, layer, tensorInfo, 0, 0); + Connect(input2, layer, tensorInfo, 0, 1); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<AdditionWorkload>(*layer, graph, factory); + + AdditionQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Inputs.size() == 2); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename BatchNormalizationFloat32Workload> +std::unique_ptr<BatchNormalizationFloat32Workload> CreateBatchNormalizationWorkloadTest( + armnn::IWorkloadFactory& factory, armnn::Graph& graph) +{ + // create the layer we're testing + BatchNormalizationDescriptor layerDesc; + layerDesc.m_Eps = 0.05f; + + BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer"); + + armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32); + layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo); + layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo); + layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo); + layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo); + layer->m_Mean->Allocate(); + layer->m_Variance->Allocate(); + layer->m_Beta->Allocate(); + layer->m_Gamma->Allocate(); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32); + Connect(input, layer, tensorInfo); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<BatchNormalizationFloat32Workload>(*layer, graph, factory); + + BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Parameters.m_Eps == 0.05f); + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + BOOST_TEST((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType::Float32))); + BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType::Float32))); + BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType::Float32))); + BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType::Float32))); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename Convolution2dWorkload> +std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + Convolution2dDescriptor layerDesc; + layerDesc.m_PadLeft = 3; + layerDesc.m_PadRight = 3; + layerDesc.m_PadTop = 1; + layerDesc.m_PadBottom = 1; + layerDesc.m_StrideX = 2; + layerDesc.m_StrideY = 4; + layerDesc.m_BiasEnabled = true; + + Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer"); + + layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3}, + Convolution2dWorkload::ms_DataType)); + layer->m_Bias = std::make_unique<ScopedCpuTensorHandle> + (TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType))); + + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + Connect(input, layer, TensorInfo({2, 3, 8, 16}, Convolution2dWorkload::ms_DataType)); + Connect(layer, output, TensorInfo({2, 2, 2, 10}, Convolution2dWorkload::ms_DataType)); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, graph, factory); + + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2); + BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true); + + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 5, 3}, + Convolution2dWorkload::ms_DataType))); + BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == + TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType)))); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename Convolution2dWorkload> +std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + Convolution2dDescriptor layerDesc; + layerDesc.m_PadLeft = 1; + layerDesc.m_PadRight = 1; + layerDesc.m_PadTop = 1; + layerDesc.m_PadBottom = 1; + layerDesc.m_StrideX = 1; + layerDesc.m_StrideY = 1; + layerDesc.m_BiasEnabled = true; + + Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer"); + + float inputsQScale = Convolution2dWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 1.0f : 0.0; + float outputQScale = Convolution2dWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 2.0f : 0.0; + + layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, + Convolution2dWorkload::ms_DataType, inputsQScale)); + layer->m_Bias = std::make_unique<ScopedCpuTensorHandle> + (TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType), inputsQScale)); + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + Connect(input, layer, TensorInfo({2, 3, 6, 6}, Convolution2dWorkload::ms_DataType, inputsQScale)); + Connect(layer, output, TensorInfo({2, 2, 6, 6}, Convolution2dWorkload::ms_DataType, outputQScale)); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, graph, factory); + + Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true); + + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3}, + Convolution2dWorkload::ms_DataType, inputsQScale))); + BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() + == TensorInfo({2}, GetBiasDataType(Convolution2dWorkload::ms_DataType), inputsQScale))); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename DepthwiseConvolution2dFloat32Workload> +std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolution2dWorkloadTest( + armnn::IWorkloadFactory& factory, armnn::Graph& graph) +{ + // create the layer we're testing + DepthwiseConvolution2dDescriptor layerDesc; + layerDesc.m_PadLeft = 3; + layerDesc.m_PadRight = 3; + layerDesc.m_PadTop = 1; + layerDesc.m_PadBottom = 1; + layerDesc.m_StrideX = 2; + layerDesc.m_StrideY = 4; + layerDesc.m_BiasEnabled = true; + + DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer"); + + layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32)); + layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({9}, DataType::Float32)); + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32)); + Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32)); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<DepthwiseConvolution2dFloat32Workload>(*layer, graph, factory); + + DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2); + BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true); + + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({3, 3, 5, 3}, DataType::Float32))); + BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({9}, DataType::Float32))); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename FullyConnectedWorkload> +std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + FullyConnectedDescriptor layerDesc; + layerDesc.m_BiasEnabled = true; + layerDesc.m_TransposeWeightMatrix = true; + + FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer"); + + float inputsQScale = FullyConnectedWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 1.0f : 0.0; + float outputQScale = FullyConnectedWorkload::ms_DataType == DataType::QuantisedAsymm8 ? 2.0f : 0.0; + + layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, + FullyConnectedWorkload::ms_DataType, inputsQScale, 0)); + layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, + GetBiasDataType(FullyConnectedWorkload::ms_DataType), inputsQScale)); + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + Connect(input, layer, TensorInfo({3, 1, 4, 5}, FullyConnectedWorkload::ms_DataType, inputsQScale)); + Connect(layer, output, TensorInfo({3, 7}, FullyConnectedWorkload::ms_DataType, outputQScale)); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, graph, factory); + + FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true); + BOOST_TEST(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true); + + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == + TensorInfo({7, 20}, FullyConnectedWorkload::ms_DataType, inputsQScale))); + BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == + TensorInfo({7}, GetBiasDataType(FullyConnectedWorkload::ms_DataType), inputsQScale))); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename MultiplicationWorkload> +std::unique_ptr<MultiplicationWorkload> CreateMultiplicationWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + Layer* const layer = graph.AddLayer<MultiplicationLayer>("layer"); + + // create extra layers + Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1"); + Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + armnn::TensorInfo tensorInfo({2, 3}, MultiplicationWorkload::ms_DataType); + Connect(input1, layer, tensorInfo, 0, 0); + Connect(input2, layer, tensorInfo, 0, 1); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<MultiplicationWorkload>(*layer, graph, factory); + + MultiplicationQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Inputs.size() == 2); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename NormalizationFloat32Workload> +std::unique_ptr<NormalizationFloat32Workload> CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + NormalizationDescriptor layerDesc; + layerDesc.m_NormChannelType = NormalizationAlgorithmChannel::Across; + layerDesc.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness; + layerDesc.m_NormSize = 3; + layerDesc.m_Alpha = 0.5f; + layerDesc.m_Beta = -1.0f; + layerDesc.m_K = 0.2f; + + NormalizationLayer* layer = graph.AddLayer<NormalizationLayer>(layerDesc, "layer"); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + Connect(input, layer, TensorInfo({3, 5, 5, 1}, armnn::DataType::Float32)); + Connect(layer, output, TensorInfo({3, 5, 5, 1}, armnn::DataType::Float32)); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<NormalizationFloat32Workload>(*layer, graph, factory); + + NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across)); + BOOST_TEST((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness)); + BOOST_TEST(queueDescriptor.m_Parameters.m_NormSize == 3); + BOOST_TEST(queueDescriptor.m_Parameters.m_Alpha == 0.5f); + BOOST_TEST(queueDescriptor.m_Parameters.m_Beta == -1.0f); + BOOST_TEST(queueDescriptor.m_Parameters.m_K == 0.2f); + + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename Pooling2dWorkload> +std::unique_ptr<Pooling2dWorkload> CreatePooling2dWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + Pooling2dDescriptor layerDesc; + layerDesc.m_PoolType = PoolingAlgorithm::Average; + layerDesc.m_PoolWidth = 3; + layerDesc.m_PoolHeight = 3; + layerDesc.m_PadLeft = 2; + layerDesc.m_PadRight = 2; + layerDesc.m_PadTop = 1; + layerDesc.m_PadBottom = 1; + layerDesc.m_StrideX = 2; + layerDesc.m_StrideY = 3; + layerDesc.m_OutputShapeRounding = OutputShapeRounding::Floor; + + Pooling2dLayer* const layer = graph.AddLayer<Pooling2dLayer>(layerDesc, "layer"); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + Connect(input, layer, TensorInfo({3, 2, 5, 5}, Pooling2dWorkload::ms_DataType)); + Connect(layer, output, TensorInfo({3, 2, 2, 4}, Pooling2dWorkload::ms_DataType)); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<Pooling2dWorkload>(*layer, graph, factory); + + Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average)); + BOOST_TEST((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor)); + BOOST_TEST(queueDescriptor.m_Parameters.m_PoolWidth == 3); + BOOST_TEST(queueDescriptor.m_Parameters.m_PoolHeight == 3); + BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2); + BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 3); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 2); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 2); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1); + BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1); + + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename SoftmaxWorkload> +std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + SoftmaxDescriptor softmaxDescriptor; + Layer* const layer = graph.AddLayer<SoftmaxLayer>(softmaxDescriptor, "layer"); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + armnn::TensorInfo tensorInfo({4, 1}, SoftmaxWorkload::ms_DataType); + Connect(input, layer, tensorInfo); + Connect(layer, output, tensorInfo); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<SoftmaxWorkload>(*layer, graph, factory); + + SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + + // return so we can do extra, backend-specific tests + return workload; +} + +template<typename SplitterWorkload> +std::unique_ptr<SplitterWorkload> + CreateSplitterWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) +{ + // create the layer we're testing + ViewsDescriptor layerDesc(3, 2); + layerDesc.SetViewOriginCoord(0, 1, 2); // deliberately add these in a weird order + layerDesc.SetViewOriginCoord(2, 1, 0); + layerDesc.SetViewOriginCoord(1, 1, 3); + + Layer* const layer = graph.AddLayer<SplitterLayer>(layerDesc, "layer"); + + // add extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output0 = graph.AddLayer<OutputLayer>(0, "output0"); + Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1"); + Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2"); + + // connect up + armnn::TensorInfo tensorInfo({1, 7}, SplitterWorkload::ms_DataType); + Connect(input, layer, tensorInfo); + + armnn::TensorInfo output0Info({1, 2}, SplitterWorkload::ms_DataType); + armnn::TensorInfo output1Info({1, 1}, SplitterWorkload::ms_DataType); + armnn::TensorInfo output2Info({1, 4}, SplitterWorkload::ms_DataType); + Connect(layer, output1, output1Info, 1, 0); // deliberately connect these up in a weird order + Connect(layer, output0, output0Info, 2, 0); + Connect(layer, output2, output2Info, 0, 0); + + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<SplitterWorkload>(*layer, graph, factory); + + SplitterQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 3); + BOOST_TEST(queueDescriptor.m_ViewOrigins.size() == 3); + + BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0); + BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 0); + BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 0); + BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 2); + BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 3); + BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0); + + // return so we can do extra, backend-specific tests + return workload; +} + +/// This function constructs a graph with both a splitter and a merger, and returns a pair of the workloads +template<typename SplitterWorkload, typename MergerWorkload> +std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<MergerWorkload>> + CreateSplitterMergerWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph) +{ + static_assert(SplitterWorkload::ms_DataType == MergerWorkload::ms_DataType, + "Splitter and merger workloads must have the same data type"); + + armnn::TensorInfo inputTensorInfo({ 1, 1, 100, 10 }, SplitterWorkload::ms_DataType); + armnn::TensorInfo splitTensorInfo1({ 1, 1, 60, 10 }, SplitterWorkload::ms_DataType); + armnn::TensorInfo splitTensorInfo2({ 1, 1, 40, 10 }, SplitterWorkload::ms_DataType); + + //construct the graph + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + + armnn::ViewsDescriptor splitterViews(2); + splitterViews.SetViewOriginCoord(0, 0, 0); + splitterViews.SetViewOriginCoord(0, 1, 0); + splitterViews.SetViewOriginCoord(0, 2, 0); + splitterViews.SetViewOriginCoord(0, 3, 0); + + splitterViews.SetViewOriginCoord(1, 0, 0); + splitterViews.SetViewOriginCoord(1, 1, 0); + splitterViews.SetViewOriginCoord(1, 2, 60); + splitterViews.SetViewOriginCoord(1, 3, 0); + + Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter"); + + armnn::OriginsDescriptor mergerViews(2); + mergerViews.SetViewOriginCoord(0, 0, 0); + mergerViews.SetViewOriginCoord(0, 1, 0); + mergerViews.SetViewOriginCoord(0, 2, 0); + mergerViews.SetViewOriginCoord(0, 3, 0); + + mergerViews.SetViewOriginCoord(1, 0, 0); + mergerViews.SetViewOriginCoord(1, 1, 0); + mergerViews.SetViewOriginCoord(1, 2, 40); + mergerViews.SetViewOriginCoord(1, 3, 0); + + Layer* const merger = graph.AddLayer<MergerLayer>(mergerViews, "merger"); + + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // add connections + Connect(input, splitter, inputTensorInfo, 0, 0); + Connect(splitter, merger, splitTensorInfo1, 0, 1); // The splitter & merger are connected up + Connect(splitter, merger, splitTensorInfo2, 1, 0); // so that the outputs are flipped round + Connect(merger, output, inputTensorInfo, 0, 0); + + CreateTensorHandles(graph, factory); + + auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, graph, factory); + auto workloadMerger = MakeAndCheckWorkload<MergerWorkload>(*merger, graph, factory); + + return {std::move(workloadSplitter), std::move(workloadMerger)}; +} + + +/// This function constructs a graph with a splitter with two outputs. Each of the outputs is then +/// connected to two different activation layers +template<typename SplitterWorkload, typename ActivationWorkload> +void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph, + std::unique_ptr<SplitterWorkload>& wlSplitter, + std::unique_ptr<ActivationWorkload>& wlActiv0_0, + std::unique_ptr<ActivationWorkload>& wlActiv0_1, + std::unique_ptr<ActivationWorkload>& wlActiv1_0, + std::unique_ptr<ActivationWorkload>& wlActiv1_1) +{ + static_assert(SplitterWorkload::ms_DataType == ActivationWorkload::ms_DataType, + "Splitter and activation workloads must have the same data type"); + + armnn::TensorInfo inputTensorInfo({ 1, 1, 100, 10 }, SplitterWorkload::ms_DataType); + armnn::TensorInfo splitTensorInfo1({ 1, 1, 60, 10 }, SplitterWorkload::ms_DataType); + armnn::TensorInfo splitTensorInfo2({ 1, 1, 40, 10 }, SplitterWorkload::ms_DataType); + + //construct the graph + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + + armnn::ViewsDescriptor splitterViews(2); + splitterViews.SetViewOriginCoord(0, 0, 0); + splitterViews.SetViewOriginCoord(0, 1, 0); + splitterViews.SetViewOriginCoord(0, 2, 0); + splitterViews.SetViewOriginCoord(0, 3, 0); + + splitterViews.SetViewOriginCoord(1, 0, 0); + splitterViews.SetViewOriginCoord(1, 1, 0); + splitterViews.SetViewOriginCoord(1, 2, 60); + splitterViews.SetViewOriginCoord(1, 3, 0); + + Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter"); + + armnn::ActivationDescriptor activationDesc; + + Layer* const activ0_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_0"); + Layer* const activ0_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_1"); + Layer* const activ1_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_0"); + Layer* const activ1_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_1"); + + Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1"); + Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2"); + Layer* const output3 = graph.AddLayer<OutputLayer>(3, "output3"); + Layer* const output4 = graph.AddLayer<OutputLayer>(4, "output4"); + + // add connections + Connect(input, splitter, inputTensorInfo, 0, 0); + Connect(splitter, activ0_0, splitTensorInfo1, 0, 0); + Connect(splitter, activ0_1, splitTensorInfo1, 0, 0); + + Connect(splitter, activ1_0, splitTensorInfo2, 1, 0); + Connect(splitter, activ1_1, splitTensorInfo2, 1, 0); + + Connect(activ0_0, output1, splitTensorInfo1, 0, 0); + Connect(activ0_1, output2, splitTensorInfo1, 0, 0); + Connect(activ1_0, output3, splitTensorInfo2, 0, 0); + Connect(activ1_1, output4, splitTensorInfo2, 0, 0); + + CreateTensorHandles(graph, factory); + + auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, graph, factory); + auto workloadActiv0_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_0, graph, factory); + auto workloadActiv0_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_1, graph, factory); + auto workloadActiv1_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_0, graph, factory); + auto workloadActiv1_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_1, graph, factory); + + wlSplitter = std::move(workloadSplitter); + wlActiv0_0 = std::move(workloadActiv0_0); + wlActiv0_1 = std::move(workloadActiv0_1); + wlActiv1_0 = std::move(workloadActiv1_0); + wlActiv1_1 = std::move(workloadActiv1_1); +} + +template <typename ResizeBilinearWorkload> +std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + TensorShape outputShape({ 2, 3, 2, 2 }); + ResizeBilinearDescriptor resizeDesc; + resizeDesc.m_TargetWidth = outputShape[3]; + resizeDesc.m_TargetHeight = outputShape[2]; + Layer* const layer = graph.AddLayer<ResizeBilinearLayer>(resizeDesc, "layer"); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + armnn::TensorInfo inputTensorInfo({ 2, 3, 4, 4 }, ResizeBilinearWorkload::ms_DataType); + armnn::TensorInfo outputTensorInfo(outputShape, ResizeBilinearWorkload::ms_DataType); + Connect(input, layer, inputTensorInfo); + Connect(layer, output, outputTensorInfo); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<ResizeBilinearWorkload>(*layer, graph, factory); + + ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename L2NormalizationWorkload> +std::unique_ptr<L2NormalizationWorkload> CreateL2NormalizationWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + Layer* const layer = graph.AddLayer<L2NormalizationLayer>("l2norm"); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + armnn::TensorInfo inputTensorInfo({ 5, 20, 50, 67 }, L2NormalizationWorkload::ms_DataType); + armnn::TensorInfo outputTensorInfo({ 5, 20, 50, 67 }, L2NormalizationWorkload::ms_DataType); + Connect(input, layer, inputTensorInfo); + Connect(layer, output, outputTensorInfo); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, graph, factory); + + L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + + // return so we can do extra, backend-specific tests + return workload; +} + +template <typename ReshapeWorkload> +std::unique_ptr<ReshapeWorkload> CreateReshapeWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph) +{ + // create the layer we're testing + TensorShape outputShape({ 1, 4 }); + ReshapeDescriptor reshapeDesc; + reshapeDesc.m_TargetShape = outputShape; + Layer* const layer = graph.AddLayer<ReshapeLayer>(reshapeDesc, "layer"); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + armnn::TensorInfo inputTensorInfo({ 4, 1 }, ReshapeWorkload::ms_DataType); + armnn::TensorInfo outputTensorInfo(outputShape, ReshapeWorkload::ms_DataType); + Connect(input, layer, inputTensorInfo); + Connect(layer, output, outputTensorInfo); + CreateTensorHandles(graph, factory); + + // make the workload and check it + auto workload = MakeAndCheckWorkload<ReshapeWorkload>(*layer, graph, factory); + + ReshapeQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + + // return so we can do extra, backend-specific tests + return workload; +} + +} diff --git a/src/armnn/test/CreateWorkloadClNeon.hpp b/src/armnn/test/CreateWorkloadClNeon.hpp new file mode 100644 index 0000000000..a41a70755f --- /dev/null +++ b/src/armnn/test/CreateWorkloadClNeon.hpp @@ -0,0 +1,107 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "CreateWorkload.hpp" +#include "backends/RefWorkloadFactory.hpp" + +#if ARMCOMPUTECL_ENABLED +#include "backends/ClTensorHandle.hpp" +#endif + +#if ARMCOMPUTENEON_ENABLED +#include "backends/NeonTensorHandle.hpp" +#endif + + +using namespace armnn; + +namespace +{ + +using namespace std; + +template<typename IComputeTensorHandle> +boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandle* tensorHandle, + std::initializer_list<unsigned int> expectedDimensions) +{ + arm_compute::ITensorInfo* info = tensorHandle->GetTensor().info(); + + auto infoNumDims = info->num_dimensions(); + auto numExpectedDims = expectedDimensions.size(); + if (infoNumDims != numExpectedDims) + { + boost::test_tools::predicate_result res(false); + res.message() << "Different number of dimensions [" << info->num_dimensions() + << "!=" << expectedDimensions.size() << "]"; + return res; + } + + size_t i = info->num_dimensions() - 1; + + for (unsigned int expectedDimension : expectedDimensions) + { + if (info->dimension(i) != expectedDimension) + { + boost::test_tools::predicate_result res(false); + res.message() << "Different dimension [" << info->dimension(i) << "!=" << expectedDimension << "]"; + return res; + } + + i--; + } + + return true; +} + +template<template <DataType> class CopyFromCpuWorkload, template <DataType> class CopyToCpuWorkload, + typename IComputeTensorHandle> +void CreateMemCopyWorkloads(IWorkloadFactory& factory) +{ + Graph graph; + RefWorkloadFactory refFactory; + + // create the layers we're testing + Layer* const layer1 = graph.AddLayer<MemCopyLayer>("layer1"); + Layer* const layer2 = graph.AddLayer<MemCopyLayer>("layer2"); + + // create extra layers + Layer* const input = graph.AddLayer<InputLayer>(0, "input"); + Layer* const output = graph.AddLayer<OutputLayer>(0, "output"); + + // connect up + TensorInfo tensorInfo({2, 3}, DataType::Float32); + Connect(input, layer1, tensorInfo); + Connect(layer1, layer2, tensorInfo); + Connect(layer2, output, tensorInfo); + + input->CreateTensorHandles(graph, refFactory); + layer1->CreateTensorHandles(graph, factory); + layer2->CreateTensorHandles(graph, refFactory); + output->CreateTensorHandles(graph, refFactory); + + // make the workloads and check them + auto workload1 = MakeAndCheckWorkload<CopyFromCpuWorkload<DataType::Float32>>(*layer1, graph, factory); + auto workload2 = MakeAndCheckWorkload<CopyToCpuWorkload<DataType::Float32>>(*layer2, graph, refFactory); + + MemCopyQueueDescriptor queueDescriptor1 = workload1->GetData(); + BOOST_TEST(queueDescriptor1.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor1.m_Outputs.size() == 1); + auto inputHandle1 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor1.m_Inputs[0]); + auto outputHandle1 = boost::polymorphic_downcast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]); + BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32))); + BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3})); + + + MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData(); + BOOST_TEST(queueDescriptor2.m_Inputs.size() == 1); + BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1); + auto inputHandle2 = boost::polymorphic_downcast<IComputeTensorHandle*>(queueDescriptor2.m_Inputs[0]); + auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor2.m_Outputs[0]); + BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3})); + BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32))); +} + +}
\ No newline at end of file diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp new file mode 100644 index 0000000000..77a1f071a8 --- /dev/null +++ b/src/armnn/test/EndToEndTest.cpp @@ -0,0 +1,411 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> + +#include "armnn/Descriptors.hpp" +#include "armnn/IRuntime.hpp" +#include "armnn/INetwork.hpp" + +#include "backends/test/QuantizeHelper.hpp" +#include <boost/core/ignore_unused.hpp> + +BOOST_AUTO_TEST_SUITE(EndToEnd) + +namespace +{ +template<typename T> +bool IsFloatIterFunc(T iter) +{ + boost::ignore_unused(iter); + return IsFloatingPointIterator<T>::value; +} +} //namespace + +BOOST_AUTO_TEST_CASE(QuantizedHelper) +{ + std::vector<float> fArray; + BOOST_TEST(IsFloatIterFunc(fArray.begin()) == true); + BOOST_TEST(IsFloatIterFunc(fArray.cbegin()) == true); + + std::vector<double> dArray; + BOOST_TEST(IsFloatIterFunc(dArray.begin()) == true); + + std::vector<int> iArray; + BOOST_TEST(IsFloatIterFunc(iArray.begin()) == false); + + float floats[5]; + BOOST_TEST(IsFloatIterFunc(&floats[0]) == true); + + int ints[5]; + BOOST_TEST(IsFloatIterFunc(&ints[0]) == false); +} + +BOOST_AUTO_TEST_CASE(Unsigned8) +{ + using namespace armnn; + + // Create runtime in which test will run + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(armnn::Compute::CpuRef)); + + // build up the structure of the network + armnn::INetworkPtr net(INetwork::Create()); + + IConnectableLayer* input = net->AddInputLayer(0, "input"); + IConnectableLayer* softmax = net->AddSoftmaxLayer(SoftmaxDescriptor(), "softmax"); + IConnectableLayer* output = net->AddOutputLayer(0, "output"); + + input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0)); + softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + // set the tensors in the network + TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8); + inputTensorInfo.SetQuantizationOffset(100); + inputTensorInfo.SetQuantizationScale(10000.0f); + input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + + TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8); + outputTensorInfo.SetQuantizationOffset(0); + outputTensorInfo.SetQuantizationScale(1.0f/255.0f); + softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + // optimize the network + IOptimizedNetworkPtr optNet = Optimize(*net, runtime->GetDeviceSpec()); + + // load it into the runtime + NetworkId netId; + runtime->LoadNetwork(netId, std::move(optNet)); + + // create structures for input & output + std::vector<uint8_t> inputData + { + 1, 10, 3, 200, 5 // some inputs - one of which is sufficiently larger than the others to saturate softmax + }; + std::vector<uint8_t> outputData(5); + + armnn::InputTensors inputTensors + { + {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())} + }; + armnn::OutputTensors outputTensors + { + {0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())} + }; + + // do the inference + runtime->EnqueueWorkload(netId, inputTensors, outputTensors); + + // check the results + BOOST_TEST(outputData[0] == 0); + BOOST_TEST(outputData[1] == 0); + BOOST_TEST(outputData[2] == 0); + BOOST_TEST(outputData[3] == 255); // softmax has been saturated + BOOST_TEST(outputData[4] == 0); +} + +template <typename T> +void ConstantUsageTest(armnn::Compute computeDevice, + const armnn::TensorInfo& commonTensorInfo, + const std::vector<T>& inputData, + const std::vector<T>& constantData, + const std::vector<T>& expectedOutputData) +{ + using namespace armnn; + + // Create runtime in which test will run + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(computeDevice)); + + // build up the structure of the network + INetworkPtr net(INetwork::Create()); + + IConnectableLayer* input = net->AddInputLayer(0); + IConnectableLayer* constant = net->AddConstantLayer(ConstTensor(commonTensorInfo, constantData)); + IConnectableLayer* add = net->AddAdditionLayer(); + IConnectableLayer* output = net->AddOutputLayer(0); + + input->GetOutputSlot(0).Connect(add->GetInputSlot(0)); + constant->GetOutputSlot(0).Connect(add->GetInputSlot(1)); + add->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + // set the tensors in the network + input->GetOutputSlot(0).SetTensorInfo(commonTensorInfo); + constant->GetOutputSlot(0).SetTensorInfo(commonTensorInfo); + add->GetOutputSlot(0).SetTensorInfo(commonTensorInfo); + + // optimize the network + IOptimizedNetworkPtr optNet = Optimize(*net, runtime->GetDeviceSpec()); + + // load it into the runtime + NetworkId netId; + runtime->LoadNetwork(netId, std::move(optNet)); + + // create structures for input & output + std::vector<T> outputData(inputData.size()); + + InputTensors inputTensors + { + {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())} + }; + OutputTensors outputTensors + { + {0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())} + }; + + // do the inference + runtime->EnqueueWorkload(netId, inputTensors, outputTensors); + + // check the results + BOOST_TEST(outputData == expectedOutputData); +} + +static void ConstantUsageFloat32Test(armnn::Compute computeDevice) +{ + const armnn::TensorInfo commonTensorInfo({ 2, 3 }, armnn::DataType::Float32); + + ConstantUsageTest(computeDevice, + commonTensorInfo, + std::vector<float>{ 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }, // input + std::vector<float>{ 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }, // const input + std::vector<float>{ 7.f, 7.f, 7.f, 7.f, 7.f, 7.f } // expected output + ); +} + +static void ConstantUsageUint8Test(armnn::Compute computeDevice) +{ + armnn::TensorInfo commonTensorInfo({ 2, 3 }, armnn::DataType::QuantisedAsymm8); + + const float scale = 0.023529f; + const int8_t offset = -43; + + commonTensorInfo.SetQuantizationScale(scale); + commonTensorInfo.SetQuantizationOffset(offset); + + ConstantUsageTest(computeDevice, + commonTensorInfo, + QuantizedVector<uint8_t>(scale, offset, { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }), // input + QuantizedVector<uint8_t>(scale, offset, { 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }), // const input + QuantizedVector<uint8_t>(scale, offset, { 7.f, 7.f, 7.f, 7.f, 7.f, 7.f }) // expected output + ); +} + +BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32) +{ + ConstantUsageFloat32Test(armnn::Compute::CpuRef); +} + +#if ARMCOMPUTENEON_ENABLED +BOOST_AUTO_TEST_CASE(ConstantUsage_Neon_Float32) +{ + ConstantUsageFloat32Test(armnn::Compute::CpuAcc); +} +#endif + +#if ARMCOMPUTECL_ENABLED +BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32) +{ + ConstantUsageFloat32Test(armnn::Compute::GpuAcc); +} +#endif + +BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8) +{ + ConstantUsageUint8Test(armnn::Compute::CpuRef); +} + +BOOST_AUTO_TEST_CASE(TrivialAdd) +{ + // This test was designed to match "AddTwo" in android nn/runtime/test/TestTrivialModel.cpp + + using namespace armnn; + + // Create runtime in which test will run + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(armnn::Compute::CpuRef)); + + // build up the structure of the network + armnn::INetworkPtr net(INetwork::Create()); + + IConnectableLayer* input1 = net->AddInputLayer(0); + IConnectableLayer* input2 = net->AddInputLayer(1); + IConnectableLayer* add = net->AddAdditionLayer(); + IConnectableLayer* output = net->AddOutputLayer(0); + + input1->GetOutputSlot(0).Connect(add->GetInputSlot(0)); + input2->GetOutputSlot(0).Connect(add->GetInputSlot(1)); + add->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + // set the tensors in the network + TensorInfo tensorInfo(TensorShape({3, 4}), DataType::Float32); + input1->GetOutputSlot(0).SetTensorInfo(tensorInfo); + input2->GetOutputSlot(0).SetTensorInfo(tensorInfo); + add->GetOutputSlot(0).SetTensorInfo(tensorInfo); + + // optimize the network + IOptimizedNetworkPtr optNet = Optimize(*net, runtime->GetDeviceSpec()); + + // load it into the runtime + NetworkId netId; + runtime->LoadNetwork(netId, std::move(optNet)); + + // create structures for input & output - matching android nn test + std::vector<float> input1Data + { + 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f + }; + std::vector<float> input2Data + { + 100.f, 200.f, 300.f, 400.f, 500.f, 600.f, 700.f, 800.f, 900.f, 1000.f, 1100.f, 1200.f + }; + std::vector<float> outputData(12); + + InputTensors inputTensors + { + {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())}, + {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())} + }; + OutputTensors outputTensors + { + {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())} + }; + + // do the inference + runtime->EnqueueWorkload(netId, inputTensors, outputTensors); + + // check the results + BOOST_TEST(outputData[0] == 101); + BOOST_TEST(outputData[1] == 202); + BOOST_TEST(outputData[2] == 303); + BOOST_TEST(outputData[3] == 404); + BOOST_TEST(outputData[4] == 505); + BOOST_TEST(outputData[5] == 606); + BOOST_TEST(outputData[6] == 707); + BOOST_TEST(outputData[7] == 808); + BOOST_TEST(outputData[8] == 909); + BOOST_TEST(outputData[9] == 1010); + BOOST_TEST(outputData[10] == 1111); + BOOST_TEST(outputData[11] == 1212); +} + +BOOST_AUTO_TEST_CASE(MultipleOutputs) +{ + using namespace armnn; + + // Create runtime in which test will run + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(armnn::Compute::CpuRef)); + + // build up the structure of the network + INetworkPtr net(INetwork::Create()); + + IConnectableLayer* input = net->AddInputLayer(0); + + // ReLu1 + ActivationDescriptor activation1Descriptor; + activation1Descriptor.m_Function = ActivationFunction::BoundedReLu; + activation1Descriptor.m_A = 1.f; + activation1Descriptor.m_B = -1.f; + IConnectableLayer* activation1 = net->AddActivationLayer(activation1Descriptor); + + // ReLu6 + ActivationDescriptor activation2Descriptor; + activation2Descriptor.m_Function = ActivationFunction::BoundedReLu; + activation2Descriptor.m_A = 6.0f; + IConnectableLayer* activation2 = net->AddActivationLayer(activation2Descriptor); + + // BoundedReLu(min=2, max=5) + ActivationDescriptor activation3Descriptor; + activation3Descriptor.m_Function = ActivationFunction::BoundedReLu; + activation3Descriptor.m_A = 5.0f; + activation3Descriptor.m_B = 2.0f; + IConnectableLayer* activation3 = net->AddActivationLayer(activation3Descriptor); + + IConnectableLayer* output1 = net->AddOutputLayer(0); + IConnectableLayer* output2 = net->AddOutputLayer(1); + IConnectableLayer* output3 = net->AddOutputLayer(2); + + input->GetOutputSlot(0).Connect(activation1->GetInputSlot(0)); + input->GetOutputSlot(0).Connect(activation2->GetInputSlot(0)); + input->GetOutputSlot(0).Connect(activation3->GetInputSlot(0)); + + activation1->GetOutputSlot(0).Connect(output1->GetInputSlot(0)); + activation2->GetOutputSlot(0).Connect(output2->GetInputSlot(0)); + activation3->GetOutputSlot(0).Connect(output3->GetInputSlot(0)); + + // set the tensors in the network + TensorInfo tensorInfo(TensorShape({ 10 }), DataType::Float32); + input->GetOutputSlot(0).SetTensorInfo(tensorInfo); + activation1->GetOutputSlot(0).SetTensorInfo(tensorInfo); + activation2->GetOutputSlot(0).SetTensorInfo(tensorInfo); + activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo); + + // optimize the network + IOptimizedNetworkPtr optNet = Optimize(*net, runtime->GetDeviceSpec()); + + // load it into the runtime + NetworkId netId; + runtime->LoadNetwork(netId, std::move(optNet)); + + // create structures for input & output + const std::vector<float> inputData{ 3.f, 5.f, 2.f, 3.f, 7.f, 0.f, -2.f, -1.f, 3.f, 3.f }; + + std::vector<float> output1Data(inputData.size()); + std::vector<float> output2Data(inputData.size()); + std::vector<float> output3Data(inputData.size()); + + InputTensors inputTensors + { + {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())} + }; + OutputTensors outputTensors + { + {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), output1Data.data())}, + {1,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 1), output2Data.data())}, + {2,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 2), output3Data.data())} + }; + + // do the inference + runtime->EnqueueWorkload(netId, inputTensors, outputTensors); + + // check the results + BOOST_TEST(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1 + BOOST_TEST(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6 + BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5] +} + +#if ARMCOMPUTENEON_ENABLED +BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork) +{ + using namespace armnn; + + // Create runtime in which test will run + // Note we don't allow falling back to CpuRef if an operation (excluding inputs, outputs, etc.) isn't supported + armnn::IRuntime::CreationOptions options(armnn::Compute::CpuAcc); + options.m_UseCpuRefAsFallback = false; + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + + // build up the structure of the network + INetworkPtr net(INetwork::Create()); + + IConnectableLayer* input = net->AddInputLayer(0); + + // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so LoadNetwork will fail. + NormalizationDescriptor descriptor; + IConnectableLayer* pooling = net->AddNormalizationLayer(descriptor); + + IConnectableLayer* output = net->AddOutputLayer(0); + + input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); + pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32)); + pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32)); + + // optimize the network + IOptimizedNetworkPtr optNet = Optimize(*net, runtime->GetDeviceSpec()); + + // Load it into the runtime. It should fail. + NetworkId netId; + BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Failure); +} +#endif // ARMCOMPUTENEON_ENABLED + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp new file mode 100644 index 0000000000..473cda1247 --- /dev/null +++ b/src/armnn/test/GraphTests.cpp @@ -0,0 +1,497 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> + +#include "armnn/ArmNN.hpp" +#include "Graph.hpp" +#include "Layer.hpp" +#include "Layers.hpp" +#include "armnn/TypesUtils.hpp" +#include "armnn/Exceptions.hpp" + +#include "GraphUtils.hpp" +#include "backends/CpuTensorHandle.hpp" + +#include <boost/cast.hpp> + +/// checks that first comes before second in the order +bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second) +{ + graph.Print(); + + const auto& order = graph.TopologicalSort(); + + auto firstPos = std::find(order.begin(), order.end(), first); + auto secondPos = std::find(firstPos, order.end(), second); + + return (secondPos != order.end()); +} + +static armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name) +{ + for (auto&& layer : graph) + { + if (layer->GetNameStr() == name) + { + return layer; + } + } + return nullptr; +} + +BOOST_AUTO_TEST_SUITE(Graph) + +BOOST_AUTO_TEST_CASE(ClassGraph) +{ + armnn::Graph graph; + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA")); + BOOST_TEST(GraphHasNamedLayer(graph, "layerA")); +} + +BOOST_AUTO_TEST_CASE(TopologicalSort) +{ + armnn::Graph graph; + + armnn::ActivationDescriptor activationDefaults; + + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA")); + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB")); + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerC")); + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output")); + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD")); + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE")); + + armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA"); + armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB"); + armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC"); + armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output"); + armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE"); + armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD"); + + // simple graph which branches and rejoins + // A + // / \' + // D E + // \ | + // \ B + // \| + // C + layerA->GetOutputSlot(0).Connect(layerD->GetInputSlot(0)); + layerA->GetOutputSlot(0).Connect(layerE->GetInputSlot(0)); + layerE->GetOutputSlot(0).Connect(layerB->GetInputSlot(0)); + layerD->GetOutputSlot(0).Connect(layerC->GetInputSlot(0)); + layerB->GetOutputSlot(0).Connect(layerC->GetInputSlot(1)); + layerC->GetOutputSlot(0).Connect(layerO->GetInputSlot(0)); + + // check order is valid + BOOST_TEST(CheckOrder(graph, layerA, layerD)); + BOOST_TEST(CheckOrder(graph, layerA, layerE)); + BOOST_TEST(CheckOrder(graph, layerD, layerC)); + BOOST_TEST(CheckOrder(graph, layerE, layerB)); + BOOST_TEST(CheckOrder(graph, layerB, layerC)); +} + +BOOST_AUTO_TEST_CASE(InsertNewLayer) +{ + armnn::Graph graph; + armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32); + + std::vector<armnn::Layer*> order; + + armnn::ActivationDescriptor activationDefaults; + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA")); + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB")); + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC")); + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerD")); + BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output")); + + armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA"); + armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB"); + armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC"); + armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD"); + armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output"); + + // A + // / \' + // B C + // \ / + // D + layerA->GetOutputSlot(0).SetTensorInfo(tensorInfo); + layerB->GetOutputSlot(0).SetTensorInfo(tensorInfo); + layerC->GetOutputSlot(0).SetTensorInfo(tensorInfo); + layerD->GetOutputSlot(0).SetTensorInfo(tensorInfo); + + layerA->GetOutputSlot(0).Connect(layerB->GetInputSlot(0)); + layerA->GetOutputSlot(0).Connect(layerC->GetInputSlot(0)); + layerB->GetOutputSlot(0).Connect(layerD->GetInputSlot(0)); + layerC->GetOutputSlot(0).Connect(layerD->GetInputSlot(1)); + layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0)); + + // check order is valid + BOOST_TEST(CheckOrder(graph, layerA, layerB)); + BOOST_TEST(CheckOrder(graph, layerA, layerC)); + BOOST_TEST(CheckOrder(graph, layerB, layerD)); + BOOST_TEST(CheckOrder(graph, layerC, layerD)); + + // A + // / \' + // B C + // \ | + // \ E + // \| + // D + BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerD->GetInputSlot(1), + activationDefaults, + "layerE")); + + armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE"); + + // check order is valid + BOOST_TEST(CheckOrder(graph, layerA, layerB)); + BOOST_TEST(CheckOrder(graph, layerA, layerC)); + BOOST_TEST(CheckOrder(graph, layerB, layerD)); + BOOST_TEST(CheckOrder(graph, layerC, layerE)); + BOOST_TEST(CheckOrder(graph, layerE, layerD)); + + // A + // /| + // / F + // / | + // B C + // \ | + // \ E + // \| + // D + BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetInputSlot(0), + activationDefaults, + "layerF")); + + armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF"); + + // check order is valid + BOOST_TEST(CheckOrder(graph, layerA, layerB)); + BOOST_TEST(CheckOrder(graph, layerA, layerF)); + BOOST_TEST(CheckOrder(graph, layerF, layerC)); + BOOST_TEST(CheckOrder(graph, layerB, layerD)); + BOOST_TEST(CheckOrder(graph, layerC, layerE)); + BOOST_TEST(CheckOrder(graph, layerE, layerD)); +} + +namespace +{ + using Edge = std::pair<const armnn::Layer*, const armnn::Layer*>; +} + +static std::vector<Edge> GetEdgeList(const armnn::Graph& graph) +{ + std::vector<Edge> edges; + + for (auto&& srcLayer: graph) + { + const unsigned int numOutputSlots = srcLayer->GetNumOutputSlots(); + for (unsigned int s = 0; s < numOutputSlots; ++s) + { + const armnn::IOutputSlot& outputSlot = srcLayer->GetOutputSlot(s); + const unsigned int numConnections = outputSlot.GetNumConnections(); + for (unsigned int c = 0; c < numConnections; ++c) + { + auto inputSlot = boost::polymorphic_downcast<const armnn::InputSlot*>(outputSlot.GetConnection(c)); + edges.emplace_back(srcLayer, &inputSlot->GetOwningLayer()); + } + } + } + + return edges; +} + +static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armnn::Graph& origGraph) +{ + std::vector<Edge> origEdges = GetEdgeList(origGraph); + std::vector<Edge> newEdges = GetEdgeList(graph); + + // Adding copy layers should not produce any duplicate edges + { + std::vector<Edge> sortedNewEdges = newEdges; + std::sort(sortedNewEdges.begin(), sortedNewEdges.end()); + + auto last = std::unique(sortedNewEdges.begin(), sortedNewEdges.end()); + BOOST_CHECK_MESSAGE(last == sortedNewEdges.end(), "New graph contains duplicate edges!"); + } + + // Each new edge must be tested + while (!newEdges.empty()) + { + const Edge edge = std::move(newEdges.back()); + newEdges.pop_back(); + + // Edge present in the original graph? + int originalEdge = -1; + for (unsigned int i = 0; i < origEdges.size(); i++) + { + const Edge& origEdge = origEdges[i]; + if (origEdge.first->GetNameStr() == edge.first->GetNameStr() && + origEdge.second->GetNameStr() == edge.second->GetNameStr()) + { + originalEdge = boost::numeric_cast<int>(i); + } + } + + if (originalEdge != -1) + { + // Each vertex should correspond to a layer. + const armnn::Layer* srcLayer = edge.first; + const armnn::Layer* dstLayer = edge.second; + BOOST_TEST(srcLayer); + BOOST_TEST(dstLayer); + + // Both layers must have the same compute device. + if (srcLayer && dstLayer) + { + BOOST_TEST((srcLayer->GetComputeDevice() == dstLayer->GetComputeDevice())); + } + + // Mark edge in original graph as observed (by deleting it) + origEdges.erase(origEdges.begin() + originalEdge); + } + else + { + // Edge did not exist in the original graph. + // It must then be an edge connecting a layer and a copy layer. + const armnn::Layer* srcLayer = edge.first; + const armnn::Layer* dstLayer = edge.second; + + if (srcLayer == nullptr || dstLayer == nullptr) + { + BOOST_ERROR("At least one of the two ends of a new edge (" << edge.first << ", " << edge.second << ") " + "introduced after adding copy layers to a graph correspond is not known to the graph"); + continue; + } + + // One and only one of the two layers referenced by the edge should be present in the original graph. + const bool srcLayerInOrigGraph = GraphHasNamedLayer(origGraph, edge.first->GetNameStr()); + const bool dstLayerInOrigGraph = GraphHasNamedLayer(origGraph, edge.second->GetNameStr()); + + if (srcLayerInOrigGraph == dstLayerInOrigGraph) + { + BOOST_ERROR("A new edge (" + << edge.first->GetName() + << ", " + << edge.second->GetName() + << ") introduced after adding copy " + "layers to a graph is invalid. One of the ends should be present in the original " + "graph and the other should not, but " + << (srcLayerInOrigGraph ? "both are" : "none are")); + continue; + } + + const armnn::Layer* copyLayer = srcLayerInOrigGraph ? edge.second : edge.first; + const armnn::Layer* nonCopyLayer = srcLayerInOrigGraph ? srcLayer : dstLayer; + + // Find all edges connecting the copy layer to other layers + std::vector<Edge> adjEdges; + auto it = newEdges.begin(); + while (it != newEdges.end()) + { + Edge& newEdge = *it; + if (copyLayer == (srcLayerInOrigGraph ? newEdge.first : newEdge.second)) + { + adjEdges.push_back(newEdge); + + // Since the adjacent edge is immediately tested below, no need to consider it afterwards + it = newEdges.erase(it); + } + else + { + it++; + } + } + + if (adjEdges.empty()) + { + BOOST_ERROR("An edge connecting a layer and a copy layer exists, (" << edge.first << ", " << + edge.second << "), but no other edges connecting the copy layer '" << copyLayer->GetName() + << "' to other layers could be found"); + continue; + } + + // Test adjacent edges now + for (const Edge& adjEdge : adjEdges) + { + // The adjacent edge must connect the copy layer to another layer + const armnn::Layer* adjLayer = srcLayerInOrigGraph ? adjEdge.second : adjEdge.first; + + if (!adjLayer) + { + BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second <<") is adjacent to an edge " + "connecting a layer and a copy layer, (" << edge.first << ", " << edge.second << "), " + "but the non-copy layer in the former, '" << adjLayer->GetName() << "' does not " + "correspond to a layer"); + continue; + } + + // Both layers must have different compute devices + BOOST_TEST((nonCopyLayer->GetComputeDevice() != adjLayer->GetComputeDevice())); + + // There must exist an edge connecting both layers directly in the original graph + { + const armnn::Layer* origEdgeN1 = srcLayerInOrigGraph ? nonCopyLayer : adjLayer; + const armnn::Layer* origEdgeN2 = srcLayerInOrigGraph ? adjLayer : nonCopyLayer; + auto origEdgeIter = std::find(origEdges.begin(), origEdges.end(), + Edge(origEdgeN1, origEdgeN2)); + + if (origEdgeIter != origEdges.end()) + { + origEdges.erase(origEdgeIter); + } + else + { + BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second << ") is adjacent to an " + "edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second << + "), but there is no edge connecting the layers in the original graph"); + } + } + } + } + } + + BOOST_TEST(origEdges.empty(), "Not all of the edges in the original graph correspond to paths in the new graph"); +} + +struct CopyLayersFixture +{ + CopyLayersFixture() + { + using namespace armnn; + using namespace std; + + Layer* const inputLayer = AddLayer<InputLayer>(0, "input"); + inputLayer->SetComputeDevice(Compute::CpuRef); + + Convolution2dDescriptor convolutionDefaults; + Layer* const convLayer1 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv1"); + convLayer1->SetComputeDevice(Compute::CpuRef); + + inputLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0)); + + Layer* const convLayer2 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv2"); + convLayer2->SetComputeDevice(Compute::CpuRef); + + convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0)); + + armnn::OriginsDescriptor mergerDefaults(2); + Layer* const mergerLayer = AddLayer<MergerLayer>(mergerDefaults, "merger"); + mergerLayer->SetComputeDevice(armnn::Compute::CpuRef); + + convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0)); + convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1)); + + armnn::ActivationDescriptor activationDefaults; + Layer* const actLayer = AddLayer<ActivationLayer>(activationDefaults, "act"); + actLayer->SetComputeDevice(armnn::Compute::CpuRef); + + mergerLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0)); + + armnn::SoftmaxDescriptor softmaxDefaults; + Layer* const softmaxLayer = AddLayer<SoftmaxLayer>(softmaxDefaults, "softmax"); + softmaxLayer->SetComputeDevice(armnn::Compute::CpuRef); + + actLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0)); + + Layer* const outputLayer = AddLayer<OutputLayer>(0, "output"); + outputLayer->SetComputeDevice(armnn::Compute::CpuRef); + + softmaxLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + } + + armnn::TensorInfo m_TensorDesc; + armnn::Graph m_Graph; + +private: + + template <typename LayerType, typename... Args> + LayerType* AddLayer(Args&&... args) + { + LayerType* const layer = m_Graph.AddLayer<LayerType>(std::forward<Args>(args)...); + + for (auto slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot) + { + slot->SetTensorInfo(m_TensorDesc); + } + + return layer; + }; +}; + +BOOST_FIXTURE_TEST_CASE(AddCopyLayers, CopyLayersFixture) +{ + const armnn::Graph origGraph(m_Graph); + m_Graph.AddCopyLayers(); + + TestGraphAfterAddingCopyLayers(m_Graph, origGraph); +} + +BOOST_FIXTURE_TEST_CASE(AddCopyLayersSeveralTimes, CopyLayersFixture) +{ + m_Graph.AddCopyLayers(); + + // Calling AddCopyLayers() several times should not change the connections + const std::vector<Edge> edges = GetEdgeList(m_Graph); + for (int i = 0; i < 4; ++i) + { + m_Graph.AddCopyLayers(); + const std::vector<Edge> otherEdges = GetEdgeList(m_Graph); + BOOST_TEST((edges == otherEdges)); + } +} + +BOOST_AUTO_TEST_CASE(CopyLayersAddedBetweenSameLayersHaveDifferentNames) +{ + armnn::Graph graph; + + armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input"); + inputLayer->SetComputeDevice(armnn::Compute::CpuRef); + + armnn::ViewsDescriptor splitterDesc(2); + armnn::SplitterLayer* const splitterLayer = graph.AddLayer<armnn::SplitterLayer>(splitterDesc, "splitter"); + splitterLayer->SetComputeDevice(armnn::Compute::GpuAcc); + + armnn::AdditionLayer* const additionLayer = graph.AddLayer<armnn::AdditionLayer>("addition"); + additionLayer->SetComputeDevice(armnn::Compute::CpuRef); + + armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output"); + outputLayer->SetComputeDevice(armnn::Compute::CpuRef); + + inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0)); + splitterLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0)); + splitterLayer->GetOutputSlot(1).Connect(additionLayer->GetInputSlot(1)); + additionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + graph.AddCopyLayers(); + + std::vector<Edge> edges = GetEdgeList(graph); + BOOST_CHECK(edges.size() == 7u); + std::sort(edges.begin(), edges.end()); + auto last = std::unique(edges.begin(), edges.end()); + BOOST_CHECK_MESSAGE(last == edges.end(), "Found duplicated edges after AddCopyLayers()"); +} + +BOOST_AUTO_TEST_CASE(DuplicateLayerNames) +{ + armnn::Graph graph; + + armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "layer"); + inputLayer->SetComputeDevice(armnn::Compute::CpuRef); + + armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "layer"); + outputLayer->SetComputeDevice(armnn::Compute::CpuRef); + + inputLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + auto it = graph.TopologicalSort().begin(); + BOOST_TEST(((*it)->GetType() == armnn::LayerType::Input)); + BOOST_TEST(((*std::next(it))->GetType() == armnn::LayerType::Output)); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/test/GraphUtils.hpp b/src/armnn/test/GraphUtils.hpp new file mode 100644 index 0000000000..3ff7d2f67b --- /dev/null +++ b/src/armnn/test/GraphUtils.hpp @@ -0,0 +1,24 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Graph.hpp" +#include <string> + +namespace +{ + +bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name) +{ + for (auto&& layer : graph) + { + if (layer->GetName() == name) + { + return true; + } + } + return false; +} +}
\ No newline at end of file diff --git a/src/armnn/test/Network_test.cpp b/src/armnn/test/Network_test.cpp new file mode 100644 index 0000000000..523d47b169 --- /dev/null +++ b/src/armnn/test/Network_test.cpp @@ -0,0 +1,425 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> + +#include "armnn/ArmNN.hpp" +#include "Network.hpp" +#include "Graph.hpp" +#include "backends/RefWorkloadFactory.hpp" + +#include "GraphUtils.hpp" + +namespace +{ + +bool AreAllLayerInputSlotsConnected(const armnn::IConnectableLayer& layer) +{ + bool allConnected = true; + for (unsigned int i = 0; i < layer.GetNumInputSlots(); ++i) + { + const bool inputConnected = layer.GetInputSlot(i).GetConnection() != nullptr; + allConnected &= inputConnected; + } + return allConnected; +} + +} + +BOOST_AUTO_TEST_SUITE(Network) + +BOOST_AUTO_TEST_CASE(NetworkBasic) +{ + armnn::Network net; + BOOST_TEST(net.PrintGraph() == armnn::Status::Success); +} + +BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForINetwork) +{ + armnn::Network net; + armnn::INetwork& inet = net; + inet.AddInputLayer(0); + inet.AddAdditionLayer(); + inet.AddActivationLayer(armnn::ActivationDescriptor()); + inet.AddOutputLayer(0); +} + +BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForNetwork) +{ + armnn::Network net; + net.AddInputLayer(0); + net.AddAdditionLayer(); + net.AddActivationLayer(armnn::ActivationDescriptor()); + net.AddOutputLayer(0); +} + +BOOST_AUTO_TEST_CASE(NetworkModification) +{ + armnn::Network net; + + armnn::IConnectableLayer* const inputLayer = net.AddInputLayer(0, "input layer"); + BOOST_TEST(inputLayer); + + unsigned int dims[] = { 10,1,1,1 }; + std::vector<float> convWeightsData(10); + armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), convWeightsData); + + armnn::Convolution2dDescriptor convDesc2d; + armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d, weights, "conv layer"); + BOOST_TEST(convLayer); + + inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0)); + + armnn::FullyConnectedDescriptor fullyConnectedDesc; + armnn::IConnectableLayer* const fullyConnectedLayer = net.AddFullyConnectedLayer(fullyConnectedDesc, + weights, + "fully connected"); + BOOST_TEST(fullyConnectedLayer); + + convLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(0)); + + armnn::Pooling2dDescriptor pooling2dDesc; + armnn::IConnectableLayer* const poolingLayer = net.AddPooling2dLayer(pooling2dDesc, "pooling2d"); + BOOST_TEST(poolingLayer); + + fullyConnectedLayer->GetOutputSlot(0).Connect(poolingLayer->GetInputSlot(0)); + + armnn::ActivationDescriptor activationDesc; + armnn::IConnectableLayer* const activationLayer = net.AddActivationLayer(activationDesc, "activation"); + BOOST_TEST(activationLayer); + + poolingLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0)); + + armnn::NormalizationDescriptor normalizationDesc; + armnn::IConnectableLayer* const normalizationLayer = net.AddNormalizationLayer(normalizationDesc, "normalization"); + BOOST_TEST(normalizationLayer); + + activationLayer->GetOutputSlot(0).Connect(normalizationLayer->GetInputSlot(0)); + + armnn::SoftmaxDescriptor softmaxDesc; + armnn::IConnectableLayer* const softmaxLayer = net.AddSoftmaxLayer(softmaxDesc, "softmax"); + BOOST_TEST(softmaxLayer); + + normalizationLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0)); + + armnn::BatchNormalizationDescriptor batchNormDesc; + + armnn::TensorInfo tensorInfo({ 1 }, armnn::DataType::Float32); + std::vector<float> data(tensorInfo.GetNumBytes() / sizeof(float)); + armnn::ConstTensor invalidTensor(tensorInfo, data); + + armnn::IConnectableLayer* const batchNormalizationLayer = net.AddBatchNormalizationLayer(batchNormDesc, + invalidTensor, + invalidTensor, + invalidTensor, + invalidTensor, + "batch norm"); + BOOST_TEST(batchNormalizationLayer); + + softmaxLayer->GetOutputSlot(0).Connect(batchNormalizationLayer->GetInputSlot(0)); + + armnn::IConnectableLayer* const additionLayer = net.AddAdditionLayer("addition"); + BOOST_TEST(additionLayer); + + batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0)); + batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1)); + + armnn::IConnectableLayer* const multiplicationLayer = net.AddMultiplicationLayer("multiplication"); + BOOST_TEST(multiplicationLayer); + + additionLayer->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(0)); + additionLayer->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(1)); + + armnn::IConnectableLayer* const outputLayer = net.AddOutputLayer(0, "output layer"); + BOOST_TEST(outputLayer); + + multiplicationLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + //Test that all layers are present in the graph + BOOST_TEST(net.GetGraph().GetNumLayers() == 11); + + //Test that the vertices exist and have correct names + BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "input layer")); + BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "conv layer")); + BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "fully connected")); + BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "pooling2d")); + BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "activation")); + BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "normalization")); + BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "softmax")); + BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "batch norm")); + BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "addition")); + BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "multiplication")); + BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "output layer")); + + auto checkOneOutputToOneInputConnection = [] + (const armnn::IConnectableLayer* const srcLayer, + const armnn::IConnectableLayer* const tgtLayer, + int expectedSrcNumInputs = 1, + int expectedDstNumOutputs = 1) + { + BOOST_TEST(srcLayer->GetNumInputSlots() == expectedSrcNumInputs); + BOOST_TEST(srcLayer->GetNumOutputSlots() == 1); + BOOST_TEST(tgtLayer->GetNumInputSlots() == 1); + BOOST_TEST(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs); + + BOOST_TEST(srcLayer->GetOutputSlot(0).GetNumConnections() == 1); + BOOST_TEST(srcLayer->GetOutputSlot(0).GetConnection(0) == &tgtLayer->GetInputSlot(0)); + BOOST_TEST(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(0).GetConnection()); + }; + auto checkOneOutputToTwoInputsConnections = [] + (const armnn::IConnectableLayer* const srcLayer, + const armnn::IConnectableLayer* const tgtLayer, + int expectedSrcNumInputs, + int expectedDstNumOutputs = 1) + { + BOOST_TEST(srcLayer->GetNumInputSlots() == expectedSrcNumInputs); + BOOST_TEST(srcLayer->GetNumOutputSlots() == 1); + BOOST_TEST(tgtLayer->GetNumInputSlots() == 2); + BOOST_TEST(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs); + + BOOST_TEST(srcLayer->GetOutputSlot(0).GetNumConnections() == 2); + for (unsigned int i = 0; i < srcLayer->GetOutputSlot(0).GetNumConnections(); ++i) + { + BOOST_TEST(srcLayer->GetOutputSlot(0).GetConnection(i) == &tgtLayer->GetInputSlot(i)); + BOOST_TEST(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(i).GetConnection()); + } + }; + + BOOST_TEST(AreAllLayerInputSlotsConnected(*convLayer)); + BOOST_TEST(AreAllLayerInputSlotsConnected(*fullyConnectedLayer)); + BOOST_TEST(AreAllLayerInputSlotsConnected(*poolingLayer)); + BOOST_TEST(AreAllLayerInputSlotsConnected(*activationLayer)); + BOOST_TEST(AreAllLayerInputSlotsConnected(*normalizationLayer)); + BOOST_TEST(AreAllLayerInputSlotsConnected(*softmaxLayer)); + BOOST_TEST(AreAllLayerInputSlotsConnected(*batchNormalizationLayer)); + BOOST_TEST(AreAllLayerInputSlotsConnected(*additionLayer)); + BOOST_TEST(AreAllLayerInputSlotsConnected(*multiplicationLayer)); + BOOST_TEST(AreAllLayerInputSlotsConnected(*outputLayer)); + + // Check connectivity + checkOneOutputToOneInputConnection(inputLayer, convLayer, 0); + checkOneOutputToOneInputConnection(convLayer, fullyConnectedLayer); + checkOneOutputToOneInputConnection(fullyConnectedLayer, poolingLayer); + checkOneOutputToOneInputConnection(poolingLayer, activationLayer); + checkOneOutputToOneInputConnection(activationLayer, normalizationLayer); + checkOneOutputToOneInputConnection(normalizationLayer, softmaxLayer); + checkOneOutputToOneInputConnection(softmaxLayer, batchNormalizationLayer); + checkOneOutputToTwoInputsConnections(batchNormalizationLayer, additionLayer, 1); + checkOneOutputToTwoInputsConnections(additionLayer, multiplicationLayer, 2); + checkOneOutputToOneInputConnection(multiplicationLayer, outputLayer, 2, 0); +} + +BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMerger) +{ + armnn::Network net; + + // Add an input layer and an input tensor descriptor. + armnn::IConnectableLayer* inputLayer = net.AddInputLayer(0, "input layer"); + BOOST_TEST(inputLayer); + + // Add a splitter layer + armnn::ViewsDescriptor splitterDesc(2,4); + + armnn::IConnectableLayer* splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer"); + BOOST_TEST(splitterLayer); + + inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0)); + + // Add a softmax layer 1 + armnn::SoftmaxDescriptor softmaxDescriptor; + armnn::IConnectableLayer* softmaxLayer1 = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1"); + BOOST_TEST(softmaxLayer1); + + splitterLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0)); + + // Add a softmax layer 2 + armnn::IConnectableLayer* softmaxLayer2 = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2"); + BOOST_TEST(softmaxLayer2); + + splitterLayer->GetOutputSlot(1).Connect(softmaxLayer2->GetInputSlot(0)); + + // Add a merger layer + armnn::OriginsDescriptor mergerDesc(2, 4); + + armnn::IConnectableLayer* mergerLayer = net.AddMergerLayer(mergerDesc, "merger layer"); + BOOST_TEST(mergerLayer); + + softmaxLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0)); + softmaxLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1)); + + // Add an output layer + armnn::IConnectableLayer* outputLayer = net.AddOutputLayer(0, "output layer"); + BOOST_TEST(outputLayer); + + mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + BOOST_TEST(splitterLayer->GetNumOutputSlots() == 2); + BOOST_TEST(splitterLayer->GetOutputSlot(0).GetConnection(0) == &softmaxLayer1->GetInputSlot(0)); + BOOST_TEST(&splitterLayer->GetOutputSlot(0) == softmaxLayer1->GetInputSlot(0).GetConnection()); + BOOST_TEST(splitterLayer->GetOutputSlot(1).GetConnection(0) == &softmaxLayer2->GetInputSlot(0)); + BOOST_TEST(&splitterLayer->GetOutputSlot(1) == softmaxLayer2->GetInputSlot(0).GetConnection()); + + BOOST_TEST(mergerLayer->GetNumInputSlots() == 2); + BOOST_TEST(softmaxLayer1->GetOutputSlot(0).GetConnection(0) == &mergerLayer->GetInputSlot(0)); + BOOST_TEST(&softmaxLayer1->GetOutputSlot(0) == mergerLayer->GetInputSlot(0).GetConnection()); + BOOST_TEST(softmaxLayer2->GetOutputSlot(0).GetConnection(0) == &mergerLayer->GetInputSlot(1)); + BOOST_TEST(&softmaxLayer2->GetOutputSlot(0) == mergerLayer->GetInputSlot(1).GetConnection()); +} + +BOOST_AUTO_TEST_CASE(NetworkModification_SplitterAddition) +{ + armnn::Network net; + + // Add an input layer and an input tensor descriptor. + armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer"); + BOOST_TEST(layer); + + // Add a splitter layer + armnn::ViewsDescriptor splitterDesc(2,4); + + armnn::IConnectableLayer* const splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer"); + BOOST_TEST(splitterLayer); + + layer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0)); + + // Add a softmax layer 1 + armnn::SoftmaxDescriptor softmaxDescriptor; + armnn::IConnectableLayer* const softmax1Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1"); + BOOST_TEST(softmax1Layer); + + splitterLayer->GetOutputSlot(0).Connect(softmax1Layer->GetInputSlot(0)); + + // Add a softmax layer 2 + armnn::IConnectableLayer* const softmax2Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2"); + BOOST_TEST(softmax2Layer); + + splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0)); + + // Add addition layer + layer = net.AddAdditionLayer("add layer"); + BOOST_TEST(layer); + + softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); + softmax2Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(1)); + + // Add an output layer + armnn::IConnectableLayer* prevLayer = layer; + layer = net.AddOutputLayer(0, "output layer"); + + prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); + + BOOST_TEST(layer); +} + +BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMultiplication) +{ + armnn::Network net; + + // Add an input layer and an input tensor descriptor. + armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer"); + BOOST_TEST(layer); + + // Add a splitter layer + armnn::ViewsDescriptor splitterDesc(2,4); + armnn::IConnectableLayer* const splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer"); + BOOST_TEST(splitterLayer); + + layer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0)); + + // Add a softmax layer 1 + armnn::SoftmaxDescriptor softmaxDescriptor; + armnn::IConnectableLayer* const softmax1Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1"); + BOOST_TEST(softmax1Layer); + + splitterLayer->GetOutputSlot(0).Connect(softmax1Layer->GetInputSlot(0)); + + // Add a softmax layer 2 + armnn::IConnectableLayer* const softmax2Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2"); + BOOST_TEST(softmax2Layer); + + splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0)); + + // Add multiplication layer + layer = net.AddMultiplicationLayer("multiplication layer"); + BOOST_TEST(layer); + + softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); + softmax2Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(1)); + + // Add an output layer + armnn::IConnectableLayer* prevLayer = layer; + layer = net.AddOutputLayer(0, "output layer"); + BOOST_TEST(layer); + + prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); +} + +BOOST_AUTO_TEST_CASE(ValidateWorkloads) +{ + const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32); + + armnn::Network net; + + armnn::NormalizationDescriptor nmDesc; + armnn::ActivationDescriptor acDesc; + + // in + // | + // nm + // / | + // ac | + // \ | + // ml + // | + // sm + // | + // ot + armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in"); + layer->GetOutputSlot(0).SetTensorInfo(desc); + + armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm"); + + layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0)); + normLayer->GetOutputSlot(0).SetTensorInfo(desc); + + layer = net.AddActivationLayer(acDesc, "ac"); + + normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); + layer->GetOutputSlot(0).SetTensorInfo(desc); + + armnn::IConnectableLayer* prevLayer = layer; + layer = net.AddMultiplicationLayer("ml"); + + prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); + normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1)); + layer->GetOutputSlot(0).SetTensorInfo(desc); + + prevLayer = layer; + armnn::SoftmaxDescriptor softmaxDescriptor; + layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm"); + + prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); + layer->GetOutputSlot(0).SetTensorInfo(desc); + + prevLayer = layer; + layer = net.AddOutputLayer(0, "ot"); + + prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); + + armnn::DeviceSpec spec; + spec.DefaultComputeDevice = armnn::Compute::CpuRef; + + armnn::IOptimizedNetworkPtr optNet = Optimize(net, spec); + static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph().AllocateDynamicBuffers(); + + // validate workloads + armnn::RefWorkloadFactory fact; + for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph()) + { + BOOST_CHECK_NO_THROW( + layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact)); + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp new file mode 100644 index 0000000000..117df5e55a --- /dev/null +++ b/src/armnn/test/RuntimeTests.cpp @@ -0,0 +1,190 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> + +#include "armnn/TypesUtils.hpp" + +#include "armnn/IRuntime.hpp" +#include "armnn/INetwork.hpp" +#include "armnn/Descriptors.hpp" +#include "Runtime.hpp" + +#ifdef WITH_VALGRIND +#include "valgrind/memcheck.h" +#endif + +#include <boost/core/ignore_unused.hpp> + +namespace armnn +{ + +void RuntimeLoadedNetworksReserve(armnn::Runtime* runtime) +{ + runtime->m_LoadedNetworks.reserve(1); +} + +} + +BOOST_AUTO_TEST_SUITE(Runtime) + +BOOST_AUTO_TEST_CASE(RuntimeUnloadNetwork) +{ + // build 2 mock-networks and load them into the runtime + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(armnn::Compute::CpuRef)); + + // mock network 1 + armnn::NetworkId networkIdentifier1 = 1; + armnn::INetworkPtr mockNetwork1(armnn::INetwork::Create()); + mockNetwork1->AddInputLayer(0, "test layer"); + runtime->LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, runtime->GetDeviceSpec())); + + // mock network 2 + armnn::NetworkId networkIdentifier2 = 2; + armnn::INetworkPtr mockNetwork2(armnn::INetwork::Create()); + mockNetwork2->AddInputLayer(0, "test layer"); + runtime->LoadNetwork(networkIdentifier2, Optimize(*mockNetwork2, runtime->GetDeviceSpec())); + + // unload one by its networkID + BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Success); + + BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Failure); +} + +#if defined(ARMCOMPUTECL_ENABLED) && defined(WITH_VALGRIND) +BOOST_AUTO_TEST_CASE(RuntimeMemoryUsage) +{ + // From documentation: + + // This means that no pointer to the block can be found. The block is classified as "lost", + // because the programmer could not possibly have freed it at program exit, since no pointer to it exists. + unsigned long leakedBefore = 0; + unsigned long leakedAfter = 0; + + // A start-pointer or chain of start-pointers to the block is found. Since the block is still pointed at, + // the programmer could, at least in principle, have freed it before program exit. + // We want to test this in case memory is not freed as early as it could have been + unsigned long reachableBefore = 0; + unsigned long reachableAfter = 0; + + // needed as out params but we don't test them + unsigned long dubious = 0; + unsigned long suppressed = 0; + + // ensure that runtime is large enough before checking for memory leaks + // otherwise when loading the network it will automatically reserve memory that won't be released until destruction + armnn::NetworkId networkIdentifier; + armnn::Runtime runtime(armnn::Compute::GpuAcc); + armnn::RuntimeLoadedNetworksReserve(&runtime); + + // check for leaks before we load the network and record them so that we can see the delta after unloading + VALGRIND_DO_QUICK_LEAK_CHECK; + VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed); + + // build a mock-network and load it into the runtime + { + armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32); + armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32); + + armnn::INetworkPtr mockNetwork(armnn::INetwork::Create()); + + armnn::IConnectableLayer* input = mockNetwork->AddInputLayer(0, "input"); + armnn::IConnectableLayer* layer = mockNetwork->AddActivationLayer(armnn::ActivationDescriptor(), "test"); + armnn::IConnectableLayer* output = mockNetwork->AddOutputLayer(0, "output"); + + input->GetOutputSlot(0).Connect(layer->GetInputSlot(0)); + layer->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + // set the tensors in the network + input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + // optimize the network + armnn::IOptimizedNetworkPtr optNet = Optimize(*mockNetwork, runtime.GetDeviceSpec()); + + runtime.LoadNetwork(networkIdentifier, std::move(optNet)); + } + + runtime.UnloadNetwork(networkIdentifier); + + VALGRIND_DO_ADDED_LEAK_CHECK; + VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed); + + // if we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass + BOOST_TEST(leakedBefore == leakedAfter); + + // Add resonable threshold after and before running valgrind with the ACL clear cache function. + BOOST_TEST(reachableAfter - reachableBefore < 30000); + + // these are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters + // so they are assigned to, but still considered unused, causing a warning + boost::ignore_unused(dubious); + boost::ignore_unused(suppressed); +} +#endif + +#ifdef WITH_VALGRIND +// run with the following command to get all the amazing output (in the devenv/build folder) :) +// valgrind --leak-check=full --show-leak-kinds=all --log-file=Valgrind_Memcheck_Leak_Report.txt armnn/test/UnitTests +BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak) +{ + // From documentation: + + // This means that no pointer to the block can be found. The block is classified as "lost", + // because the programmer could not possibly have freed it at program exit, since no pointer to it exists. + unsigned long leakedBefore = 0; + unsigned long leakedAfter = 0; + + // A start-pointer or chain of start-pointers to the block is found. Since the block is still pointed at, + // the programmer could, at least in principle, have freed it before program exit. + // We want to test this in case memory is not freed as early as it could have been + unsigned long reachableBefore = 0; + unsigned long reachableAfter = 0; + + // needed as out params but we don't test them + unsigned long dubious = 0; + unsigned long suppressed = 0; + + armnn::NetworkId networkIdentifier1 = 1; + + // ensure that runtime is large enough before checking for memory leaks + // otherwise when loading the network it will automatically reserve memory that won't be released until destruction + armnn::Runtime runtime(armnn::Compute::CpuRef); + armnn::RuntimeLoadedNetworksReserve(&runtime); + + // check for leaks before we load the network and record them so that we can see the delta after unloading + VALGRIND_DO_QUICK_LEAK_CHECK; + VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed); + + // build a mock-network and load it into the runtime + { + unsigned int inputShape[] = {1, 7, 1, 1}; + armnn::TensorInfo inputTensorInfo(4, inputShape, armnn::DataType::Float32); + + std::unique_ptr<armnn::Network> mockNetwork1 = std::make_unique<armnn::Network>(); + mockNetwork1->AddInputLayer(0, "test layer"); + + armnn::DeviceSpec device; + device.DefaultComputeDevice = armnn::Compute::CpuRef; + + runtime.LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, device)); + } + + runtime.UnloadNetwork(networkIdentifier1); + + VALGRIND_DO_ADDED_LEAK_CHECK; + VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed); + + // if we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass + BOOST_TEST(leakedBefore == leakedAfter); + BOOST_TEST(reachableBefore == reachableAfter); + + // these are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters + // so they are assigned to, but still considered unused, causing a warning + boost::ignore_unused(dubious); + boost::ignore_unused(suppressed); +} +#endif + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp new file mode 100644 index 0000000000..e4ff899a4e --- /dev/null +++ b/src/armnn/test/TensorHelpers.hpp @@ -0,0 +1,201 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/TensorFwd.hpp> +#include <boost/test/unit_test.hpp> +#include <boost/multi_array.hpp> +#include <vector> +#include <array> + +#include <boost/assert.hpp> +#include <boost/test/tools/floating_point_comparison.hpp> +#include <boost/random/uniform_real_distribution.hpp> +#include <boost/random/mersenne_twister.hpp> +#include <boost/numeric/conversion/cast.hpp> + +#include "armnn/Tensor.hpp" + +#include "backends/test/QuantizeHelper.hpp" + +#include <cmath> + +constexpr float g_FloatCloseToZeroTolerance = 1.0e-7f; + +template<typename T, bool isQuantized = true> +struct SelectiveComparer +{ + static bool Compare(T a, T b) + { + return (std::max(a, b) - std::min(a, b)) <= 1; + } + +}; + +template<typename T> +struct SelectiveComparer<T, false> +{ + static bool Compare(T a, T b) + { + // if a or b is zero, percent_tolerance does an exact match, so compare to a small, constant tolerance instead + if (a == 0.0f || b == 0.0f) + { + return std::abs(a - b) <= g_FloatCloseToZeroTolerance; + } + // For unquantized floats we use a tolerance of 1%. + boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f)); + return comparer(a, b); + } +}; + +template<typename T> +bool SelectiveCompare(T a, T b) +{ + return SelectiveComparer<T, armnn::IsQuantizedType<T>()>::Compare(a, b); +}; + + + +template <typename T, std::size_t n> +boost::test_tools::predicate_result CompareTensors(const boost::multi_array<T, n>& a, + const boost::multi_array<T, n>& b) +{ + // check they are same shape + for (unsigned int i=0; i<n; i++) + { + if (a.shape()[i] != b.shape()[i]) + { + boost::test_tools::predicate_result res(false); + res.message() << "Different shapes [" + << a.shape()[i] + << "!=" + << b.shape()[i] + << "]"; + return res; + } + } + + // now compare element-wise + + // fun iteration over n dimensions + std::array<unsigned int, n> indices; + for (unsigned int i = 0; i < n; i++) + { + indices[i] = 0; + } + + std::stringstream errorString; + int numFailedElements = 0; + constexpr int maxReportedDifferences = 3; + + while (true) + { + bool comparison = SelectiveCompare(a(indices), b(indices)); + if (!comparison) + { + ++numFailedElements; + + if (numFailedElements <= maxReportedDifferences) + { + if (numFailedElements >= 2) + { + errorString << ", "; + } + errorString << "["; + for (unsigned int i = 0; i < n; ++i) + { + errorString << indices[i]; + if (i != n - 1) + { + errorString << ","; + } + } + errorString << "]"; + + errorString << " (" << +a(indices) << " != " << +b(indices) << ")"; + } + } + + ++indices[n - 1]; + for (unsigned int i=n-1; i>0; i--) + { + if (indices[i] == a.shape()[i]) + { + indices[i] = 0; + ++indices[i - 1]; + } + } + + if (indices[0] == a.shape()[0]) + { + break; + } + } + + boost::test_tools::predicate_result comparisonResult(true); + if (numFailedElements > 0) + { + comparisonResult = false; + comparisonResult.message() << numFailedElements << " different values at: "; + if (numFailedElements > maxReportedDifferences) + { + errorString << ", ... (and " << (numFailedElements - maxReportedDifferences) << " other differences)"; + } + comparisonResult.message() << errorString.str(); + } + + return comparisonResult; +} + + +// Creates a boost::multi_array with shape defined by the given TensorInfo. +template <typename T, std::size_t n> +boost::multi_array<T, n> MakeTensor(const armnn::TensorInfo& tensorInfo) +{ + std::array<unsigned int, n> shape; + + for (unsigned int i = 0; i < n; i++) + { + shape[i] = tensorInfo.GetShape()[i]; + } + + return boost::multi_array<T, n>(shape); +} + +// Creates a boost::multi_array with shape defined by the given TensorInfo and contents defined by the given vector. +template <typename T, std::size_t n> +boost::multi_array<T, n> MakeTensor(const armnn::TensorInfo& tensorInfo, const std::vector<T>& flat) +{ + BOOST_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor"); + + std::array<unsigned int, n> shape; + + for (unsigned int i = 0; i < n; i++) + { + shape[i] = tensorInfo.GetShape()[i]; + } + + boost::const_multi_array_ref<T, n> arrayRef(&flat[0], shape); + return boost::multi_array<T, n>(arrayRef); +} + +template <typename T, std::size_t n> +boost::multi_array<T, n> MakeRandomTensor(const armnn::TensorInfo& tensorInfo, + unsigned int seed, + float min = -10.0f, + float max = 10.0f) +{ + boost::random::mt19937 gen(seed); + boost::random::uniform_real_distribution<float> dist(min, max); + + std::vector<float> init(tensorInfo.GetNumElements()); + for (unsigned int i = 0; i < init.size(); i++) + { + init[i] = dist(gen); + } + float qScale = tensorInfo.GetQuantizationScale(); + int32_t qOffset = tensorInfo.GetQuantizationOffset(); + return MakeTensor<T, n>(tensorInfo, QuantizedVector<T>(qScale, qOffset, init)); +} diff --git a/src/armnn/test/TensorTest.cpp b/src/armnn/test/TensorTest.cpp new file mode 100644 index 0000000000..2bb37f4fb8 --- /dev/null +++ b/src/armnn/test/TensorTest.cpp @@ -0,0 +1,146 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> +#include <armnn/Tensor.hpp> + +namespace armnn +{ + +// Add unit test framework for interpreting TensorInfo type +std::ostream& boost_test_print_type(std::ostream& ostr, const TensorInfo& right) +{ + ostr << "TensorInfo[ " + << right.GetNumDimensions() << "," + << right.GetShape()[0] << "," + << right.GetShape()[1] << "," + << right.GetShape()[2] << "," + << right.GetShape()[3] + << " ]" << std::endl; + return ostr; +} + +std::ostream& boost_test_print_type(std::ostream& ostr, const TensorShape& shape) +{ + ostr << "TensorShape[ " + << shape.GetNumDimensions() << "," + << shape[0] << "," + << shape[1] << "," + << shape[2] << "," + << shape[3] + << " ]" << std::endl; + return ostr; +} + +} //namespace armnn +using namespace armnn; + +BOOST_AUTO_TEST_SUITE(Tensor) + +struct TensorInfoFixture +{ + TensorInfoFixture() + { + unsigned int sizes[] = {6,7,8,9}; + m_TensorInfo = TensorInfo(4, sizes, DataType::Float32); + } + ~TensorInfoFixture() {}; + + TensorInfo m_TensorInfo; +}; + +BOOST_FIXTURE_TEST_CASE(ConstructShapeUsingListInitialization, TensorInfoFixture) +{ + TensorShape listInitializedShape{ 6, 7, 8, 9 }; + BOOST_TEST(listInitializedShape == m_TensorInfo.GetShape()); +} + +BOOST_FIXTURE_TEST_CASE(ConstructTensorInfo, TensorInfoFixture) +{ + BOOST_TEST(m_TensorInfo.GetNumDimensions() == 4); + BOOST_TEST(m_TensorInfo.GetShape()[0] == 6); // <= Outer most + BOOST_TEST(m_TensorInfo.GetShape()[1] == 7); + BOOST_TEST(m_TensorInfo.GetShape()[2] == 8); + BOOST_TEST(m_TensorInfo.GetShape()[3] == 9); // <= Inner most +} + +BOOST_FIXTURE_TEST_CASE(CopyConstructTensorInfo, TensorInfoFixture) +{ + TensorInfo copyConstructed(m_TensorInfo); + BOOST_TEST(copyConstructed.GetNumDimensions() == 4); + BOOST_TEST(copyConstructed.GetShape()[0] == 6); + BOOST_TEST(copyConstructed.GetShape()[1] == 7); + BOOST_TEST(copyConstructed.GetShape()[2] == 8); + BOOST_TEST(copyConstructed.GetShape()[3] == 9); +} + +BOOST_FIXTURE_TEST_CASE(TensorInfoEquality, TensorInfoFixture) +{ + TensorInfo copyConstructed(m_TensorInfo); + BOOST_TEST(copyConstructed == m_TensorInfo); +} + +BOOST_FIXTURE_TEST_CASE(TensorInfoInequality, TensorInfoFixture) +{ + TensorInfo other; + unsigned int sizes[] = {2,3,4,5}; + other = TensorInfo(4, sizes, DataType::Float32); + + BOOST_TEST(other != m_TensorInfo); +} + +BOOST_FIXTURE_TEST_CASE(TensorInfoAssignmentOperator, TensorInfoFixture) +{ + TensorInfo copy; + copy = m_TensorInfo; + BOOST_TEST(copy == m_TensorInfo); +} + +void CheckTensor(const ConstTensor& t) +{ + t.GetInfo(); +} + +BOOST_AUTO_TEST_CASE(TensorVsConstTensor) +{ + int mutableDatum = 2; + const int immutableDatum = 3; + + armnn::Tensor uninitializedTensor; + armnn::ConstTensor uninitializedTensor2; + + uninitializedTensor2 = uninitializedTensor; + + armnn::Tensor t(TensorInfo(), &mutableDatum); + armnn::ConstTensor ct(TensorInfo(), &immutableDatum); + + // Check that both Tensor and ConstTensor can be passed as a ConstTensor + CheckTensor(t); + CheckTensor(ct); +} + +BOOST_AUTO_TEST_CASE(ModifyTensorInfo) +{ + TensorInfo info; + info.SetShape({ 5, 6, 7, 8 }); + BOOST_TEST((info.GetShape() == TensorShape({ 5, 6, 7, 8 }))); + info.SetDataType(DataType::QuantisedAsymm8); + BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8)); + info.SetQuantizationScale(10.0f); + BOOST_TEST(info.GetQuantizationScale() == 10.0f); + info.SetQuantizationOffset(5); + BOOST_TEST(info.GetQuantizationOffset() == 5); +} + +BOOST_AUTO_TEST_CASE(TensorShapeOperatorBrackets) +{ + TensorShape shape({0,1,2,3}); + // Check version of operator[] which returns an unsigned int + BOOST_TEST(shape[2] == 2); + // Check the version of operator[] which returns a reference + shape[2] = 20; + BOOST_TEST(shape[2] == 20); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/test/UnitTests.cpp b/src/armnn/test/UnitTests.cpp new file mode 100644 index 0000000000..0e2f99583f --- /dev/null +++ b/src/armnn/test/UnitTests.cpp @@ -0,0 +1,60 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#define BOOST_TEST_MODULE UnitTests +#include <boost/test/unit_test.hpp> + +#include "UnitTests.hpp" + +struct ConfigureLoggingFixture +{ + ConfigureLoggingFixture() + { + ConfigureLoggingTest(); + } +}; + +BOOST_GLOBAL_FIXTURE(ConfigureLoggingFixture); + +// On Windows, duplicate the boost test logging output to the Visual Studio output window using OutputDebugString. +#if defined(_MSC_VER) + +#include <boost/iostreams/filtering_stream.hpp> +#include <boost/iostreams/tee.hpp> +#include <iostream> +#include <Windows.h> + +using namespace boost::iostreams; +using namespace std; + +struct DebugOutputSink : boost::iostreams::sink +{ + std::streamsize write(const char* s, std::streamsize n) + { + // The given string is not null-terminated, so we need to copy it. + std::string s2(s, boost::numeric_cast<size_t>(n)); + OutputDebugString(s2.c_str()); + return n; + } +}; + +class SetupDebugOutput +{ +public: + SetupDebugOutput() + { + // Send the output to both cout (as standard) and the debug output. + m_OutputStream.push(tee(std::cout)); + m_OutputStream.push(m_DebugOutputSink); + + boost::unit_test::unit_test_log.set_stream(m_OutputStream); + } +private: + filtering_ostream m_OutputStream; + DebugOutputSink m_DebugOutputSink; +}; + +BOOST_GLOBAL_FIXTURE(SetupDebugOutput); + +#endif // defined(_MSC_VER)
\ No newline at end of file diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp new file mode 100644 index 0000000000..040048ad99 --- /dev/null +++ b/src/armnn/test/UnitTests.hpp @@ -0,0 +1,79 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "Logging.hpp" +#include "armnn/Utils.hpp" +#include "backends/RefWorkloadFactory.hpp" +#include "backends/test/LayerTests.hpp" +#include <boost/test/unit_test.hpp> + +inline void ConfigureLoggingTest() +{ + // Configure logging for both the ARMNN library and this test program + armnn::ConfigureLogging(true, true, armnn::LogSeverity::Fatal); + armnnUtils::ConfigureLogging(boost::log::core::get().get(), true, true, armnn::LogSeverity::Fatal); +} + +// The following macros require the caller to have defined FactoryType, with one of the following using statements: +// +// using FactoryType = armnn::RefWorkloadFactory; +// using FactoryType = armnn::ClWorkloadFactory; +// using FactoryType = armnn::NeonWorkloadFactory; + +/// Executes BOOST_TEST on CompareTensors() return value so that the predicate_result message is reported. +/// If the test reports itself as not supported then the tensors are not compared. +/// Additionally this checks that the supportedness reported by the test matches the name of the test. +/// Unsupported tests must be 'tagged' by including "UNSUPPORTED" in their name. +/// This is useful because it clarifies that the feature being tested is not actually supported +/// (a passed test with the name of a feature would imply that feature was supported). +/// If support is added for a feature, the test case will fail because the name incorrectly contains UNSUPPORTED. +/// If support is removed for a feature, the test case will fail because the name doesn't contain UNSUPPORTED. +template <typename T, std::size_t n> +void CompareTestResultIfSupported(const std::string& testName, LayerTestResult<T, n> testResult) +{ + bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos; + BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.supported, + "The test name does not match the supportedness it is reporting"); + if (testResult.supported) + { + BOOST_TEST(CompareTensors(testResult.output, testResult.outputExpected)); + } +} + +template<typename FactoryType, typename TFuncPtr, typename... Args> +void RunTestFunction(const char* testName, TFuncPtr testFunction, Args... args) +{ + FactoryType workloadFactory; + auto testResult = (*testFunction)(workloadFactory, args...); + CompareTestResultIfSupported(testName, testResult); +} + +#define ARMNN_AUTO_TEST_CASE(TestName, TestFunction, ...) \ + BOOST_AUTO_TEST_CASE(TestName) \ + { \ + RunTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \ + } + +template<typename FactoryType, typename TFuncPtr, typename... Args> +void CompareRefTestFunction(const char* testName, TFuncPtr testFunction, Args... args) +{ + FactoryType workloadFactory; + armnn::RefWorkloadFactory refWorkloadFactory; + auto testResult = (*testFunction)(workloadFactory, refWorkloadFactory, args...); + CompareTestResultIfSupported(testName, testResult); +} + +#define ARMNN_COMPARE_REF_AUTO_TEST_CASE(TestName, TestFunction, ...) \ + BOOST_AUTO_TEST_CASE(TestName) \ + { \ + CompareRefTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \ + } + +#define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(TestName, Fixture, TestFunction, ...) \ + BOOST_FIXTURE_TEST_CASE(TestName, Fixture) \ + { \ + CompareRefTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \ + } diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp new file mode 100644 index 0000000000..11fa51626c --- /dev/null +++ b/src/armnn/test/UtilsTests.cpp @@ -0,0 +1,58 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> + +#include <armnn/Utils.hpp> +#include <armnn/Types.hpp> +#include <armnn/TypesUtils.hpp> +#include <armnn/Descriptors.hpp> + +BOOST_AUTO_TEST_SUITE(Utils) + +BOOST_AUTO_TEST_CASE(DataTypeSize) +{ + BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Float32) == 4); + BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QuantisedAsymm8) == 1); + BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Signed32) == 4); +} + +BOOST_AUTO_TEST_CASE(GetDataTypeTest) +{ + BOOST_TEST((armnn::GetDataType<float>() == armnn::DataType::Float32)); + BOOST_TEST((armnn::GetDataType<uint8_t>() == armnn::DataType::QuantisedAsymm8)); + BOOST_TEST((armnn::GetDataType<int32_t>() == armnn::DataType::Signed32)); +} + +BOOST_AUTO_TEST_CASE(PermuteDescriptorWithTooManyMappings) +{ + BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 4u }), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings1d) +{ + BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 1u }), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings2d) +{ + BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 2u, 0u }), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings3d) +{ + BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 3u, 1u }), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings4d) +{ + BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 1u, 2u, 4u }), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_CASE(PermuteDescriptorWithDuplicatedMappings) +{ + BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 1u, 1u, 0u }), armnn::InvalidArgumentException); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnCaffeParser/CaffeParser.cpp b/src/armnnCaffeParser/CaffeParser.cpp new file mode 100644 index 0000000000..e12badc3a0 --- /dev/null +++ b/src/armnnCaffeParser/CaffeParser.cpp @@ -0,0 +1,1413 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "CaffeParser.hpp" + +#include "armnn/Descriptors.hpp" +#include "armnn/INetwork.hpp" +#include "armnn/Utils.hpp" +#include "armnn/Exceptions.hpp" + +#include "GraphTopologicalSort.hpp" + +#include <boost/numeric/conversion/cast.hpp> +#include <boost/assert.hpp> +#include <boost/format.hpp> +#include <boost/log/trivial.hpp> + +// Caffe +#include "caffe/proto/caffe.pb.h" + +// ProtoBuf +#include <google/protobuf/io/coded_stream.h> +#include <google/protobuf/io/zero_copy_stream.h> +#include <google/protobuf/io/zero_copy_stream_impl.h> +#include <google/protobuf/text_format.h> +#include <google/protobuf/stubs/common.h> +#include <google/protobuf/stubs/once.h> +#include <google/protobuf/io/coded_stream.h> +#include <google/protobuf/wire_format_lite_inl.h> +#include <google/protobuf/descriptor.h> +#include <google/protobuf/generated_message_reflection.h> +#include <google/protobuf/reflection_ops.h> +#include <google/protobuf/wire_format.h> + +#include <cmath> +#include <sstream> +#include <queue> +#include <fcntl.h> + +/// Caffe networks are loaded from protobuf files (binary or text) using the protobuf library and the generated +/// code from caffe.pb.h. This gives us a caffe::NetParameter which is an in-memory version of the file. +/// This contains a flat list of Caffe 'layers' (e.g. convolution, pooling etc.). +/// Each layer has inputs (called "bottoms") and outputs (called "tops"). Data flows from bottom to top. +/// The bottoms of a layer refer to the tops of other layers, not their names. +/// The names of layers seem to be arbitrary (you could rename a layer and the network wouldn't need any other changes). +/// +/// Some layers (e.g. Relu) can be configured so that their top and bottom are both the same. This is called an +/// "in-place" layer and is a Caffe runtime feature used to reduce memory usage by modifying tensors in-place. +/// This isn't relevant to the parser and so we preprocess these layers to convert them to regular layers, to result +/// in a consistent graph structure. + +namespace armnnCaffeParser +{ + +using namespace armnn; +using namespace caffe; +using namespace std; +using namespace google::protobuf::io; + +const std::map<std::string, CaffeParser::OperationParsingFunction> CaffeParser::ms_CaffeLayerNameToParsingFunctions = { + { "Input", &CaffeParser::ParseInputLayer }, + { "Convolution", &CaffeParser::ParseConvLayer }, + { "Pooling", &CaffeParser::ParsePoolingLayer }, + { "ReLU", &CaffeParser::ParseReluLayer }, + { "LRN", &CaffeParser::ParseLRNLayer }, + { "InnerProduct", &CaffeParser::ParseInnerProductLayer }, + { "Softmax", &CaffeParser::ParseSoftmaxLayer }, + { "Eltwise", &CaffeParser::ParseEltwiseLayer }, + { "Concat", &CaffeParser::ParseConcatLayer }, + { "BatchNorm", &CaffeParser::ParseBatchNormLayer }, + { "Scale", &CaffeParser::ParseScaleLayer }, + { "Split", &CaffeParser::ParseSplitLayer }, + { "Dropout", &CaffeParser::ParseDropoutLayer}, +}; + +ICaffeParser* ICaffeParser::CreateRaw() +{ + return new CaffeParser(); +} + +ICaffeParserPtr ICaffeParser::Create() +{ + return ICaffeParserPtr(CreateRaw(), &ICaffeParser::Destroy); +} + +void ICaffeParser::Destroy(ICaffeParser* parser) +{ + delete parser; +} + +CaffeParser::CaffeParser() +: m_Network(nullptr, nullptr) +{ + +} + +void GetDataFromBlob(const LayerParameter& layerParam, vector<float>& outData, unsigned int blobIndex) +{ + if (blobIndex >= boost::numeric_cast<unsigned int>(layerParam.blobs_size())) + { + throw ParseException(boost::str(boost::format("Expected data blob at index %1% in layer %2% not found") + % blobIndex % layerParam.name())); + } + + const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(blobIndex)); + + if (boost::numeric_cast<size_t>(blob.data_size()) != outData.size()) + { + throw ParseException(boost::str(boost::format( + "Data blob at index %1% in layer %2% has an unexpected size. Expected %3% elements but got %4% elements") + % blobIndex % layerParam.name() % outData.size() % blob.data_size())); + } + + for (unsigned int i = 0; i < outData.size(); ++i) + { + outData[i] = blob.data(boost::numeric_cast<int>(i)); + } +} + +bool IsInRange(unsigned int value, unsigned int min, unsigned int max) +{ + return (value >= min && value <= max) ? true : false; +} + +template <typename T> +size_t SizeOfVectorData(const vector<T>& vec) +{ + return vec.size() * sizeof(T); +} + +void ValidateNumInputsOutputs(const caffe::LayerParameter& layerParameter, + unsigned int numInputs, + unsigned int numOutputs) +{ + int numInputsActual = layerParameter.bottom_size(); + if (numInputs != boost::numeric_cast<unsigned int>(numInputsActual)) + { + throw ParseException("Loading layer: invalid number of inputs"); + } + + int numOutputsActual = layerParameter.top_size(); + if (numOutputs != boost::numeric_cast<unsigned int>(numOutputsActual)) + { + throw ParseException("Loading layer: invalid number of outputs"); + } +} + +BindingPointInfo CaffeParser::GetNetworkInputBindingInfo(const std::string& name) const +{ + return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo); +} + +BindingPointInfo CaffeParser::GetNetworkOutputBindingInfo(const std::string& name) const +{ + return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo); +} + +std::pair<armnn::LayerBindingId, armnn::TensorInfo> CaffeParser::GetBindingInfo(const std::string& layerName, + const char* bindingPointDesc, + const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo) +{ + auto it = nameToBindingInfo.find(layerName); + if (it == nameToBindingInfo.end()) + { + throw InvalidArgumentException(boost::str(boost::format("Unknown %1% '%2%'") % bindingPointDesc % layerName)); + } + return it->second; +} + +TensorInfo CaffeParser::BlobShapeToTensorInfo(const caffe::BlobShape& blobShape) const +{ + std::vector<unsigned int> shape; + for (int j = 0; j < blobShape.dim_size(); ++j) + { + shape.push_back(static_cast<unsigned int>(blobShape.dim(j))); + } + + return TensorInfo(boost::numeric_cast<unsigned int>(shape.size()), shape.data(), DataType::Float32); +} + +BlobShape TensorDescToBlobShape(const TensorInfo& desc) +{ + BlobShape ret; + for (unsigned int i = 0; i < desc.GetNumDimensions(); ++i) + { + ret.add_dim(i); + ret.set_dim(boost::numeric_cast<int>(i), desc.GetShape()[i]); + } + + return ret; +} + +vector<const LayerParameter*> CaffeParser::GetInputs(const LayerParameter& layerParam) +{ + std::vector<const caffe::LayerParameter*> ret; + ret.reserve(boost::numeric_cast<size_t>(layerParam.bottom_size())); + for (int j = 0; j < layerParam.bottom_size(); ++j) + { + std::string inputName = layerParam.bottom(j); + auto inputIt = m_CaffeLayersByTopName.find(inputName); + if (inputIt == m_CaffeLayersByTopName.end()) + { + throw ParseException( + "Can't find Caffe layer with top called '" + inputName + "', which is listed as an input of '" + + layerParam.name() + "'"); + } + ret.push_back(inputIt->second); + } + + return ret; +} + +void CaffeParser::ParseInputLayer(const LayerParameter& layerParam) +{ + BOOST_ASSERT(layerParam.type() == "Input"); + ValidateNumInputsOutputs(layerParam, 0, 1); + + const InputParameter& param = layerParam.input_param(); + + const armnn::LayerBindingId inputId = boost::numeric_cast<armnn::LayerBindingId>(m_NetworkInputsBindingInfo.size()); + armnn::IConnectableLayer* const inputLayer = m_Network->AddInputLayer(inputId, layerParam.name().c_str()); + + // Decide on the tensor info for this input. This can be specified in the Caffe network but can also + // be overriden by user input (m_inputShapes). + armnn::TensorInfo inputTensorInfo; + + const BlobShape* originalShape = param.shape_size() > 0 && param.shape(0).dim_size() > 0 ? + ¶m.shape(0) : nullptr; + if (originalShape) + { + inputTensorInfo = BlobShapeToTensorInfo(*originalShape); + } + + auto overrideIt = m_InputShapes.find(layerParam.name()); + if (overrideIt != m_InputShapes.end()) + { + const TensorShape& overrideShape = overrideIt->second; + if (originalShape && + ( originalShape->dim(1) != overrideShape[1] + || originalShape->dim(2) != overrideShape[2] + || originalShape->dim(3) != overrideShape[3])) + { + throw ParseException("Parsed input shape for '" + layerParam.name() + + "' is incompatible with the override provided"); + } + inputTensorInfo.SetShape(overrideShape); + } + else if (!originalShape) + { + throw ParseException("No input descriptor given for '" + layerParam.name() + + "' and no input shape found in caffe model"); + } + + TrackInputBinding(inputLayer, inputId, inputTensorInfo); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo); + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), inputLayer->GetOutputSlot(0)); +} + +void CaffeParser::ParseConvLayer(const LayerParameter& layerParam) +{ + BOOST_ASSERT(layerParam.type() == "Convolution"); + ValidateNumInputsOutputs(layerParam, 1, 1); + + ConvolutionParameter convParam = layerParam.convolution_param(); + BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo()); + + unsigned int kernelH = 0; + unsigned int kernelW = 0; + if (convParam.has_kernel_h() && convParam.has_kernel_w()) + { + kernelH = convParam.kernel_h(); + kernelW = convParam.kernel_w(); + } + else if (convParam.kernel_size_size() > 0) + { + kernelH = (convParam.kernel_size()).Get(0); + kernelW = (convParam.kernel_size()).Get(0); + } + else + { + throw ParseException("Loading Convolution Layer: Kernel Size defined Illegally"); + } + + if (!IsInRange(kernelH, 0, 11) || !IsInRange(kernelW, 0, 11) || (kernelH != kernelW)) + { + throw ParseException("Loading Convolution Layer: Kernel has invalid size"); + } + + unsigned int strideH = 0; + unsigned int strideW = 0; + + if (convParam.has_stride_h() && convParam.has_stride_w()) + { + strideH = convParam.stride_h(); + strideW = convParam.stride_w(); + } + else if (convParam.stride_size() > 0) + { + strideH = (convParam.stride()).Get(0); + strideW = (convParam.stride()).Get(0); + } + else + { + // Caffe stride default is 1 + strideH = strideW = 1; + } + + if (!IsInRange(strideH, 0, 11) || !IsInRange(strideW, 0, 11) || (strideH != strideW)) + { + throw ParseException("Loading Convolution Layer: stride has invalid size"); + } + + unsigned int padH = 0; + unsigned int padW = 0; + + if (convParam.has_pad_h() && convParam.has_pad_w()) + { + padH = convParam.pad_h(); + padW = convParam.pad_w(); + } + else if (convParam.pad_size() > 0) + { + padH = (convParam.pad()).Get(0); + padW = (convParam.pad()).Get(0); + } + else + { + padH = 0; + padW = 0; + } + + if (!IsInRange(padH, 0, 11) || !IsInRange(padW, 0, 11) || (padH != padW)) + { + throw ParseException("Loading Convolution Layer: pad has invalid size"); + } + + // Handle grouping + const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1; + armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)); + + vector<string> convLayerNames(numGroups); + vector<armnn::IConnectableLayer*> convLayers(numGroups); + convLayerNames[0] = layerParam.name(); + + armnn::IConnectableLayer* splitterLayer = nullptr; + if (numGroups > 1) + { + // This convolution is to be applied to chunks of the input data so add a splitter layer + + // Redirect the convolution input to the splitter + unsigned int splitterDimSizes[4] = {static_cast<unsigned int>(inputShape.dim(0)), + static_cast<unsigned int>(inputShape.dim(1)), + static_cast<unsigned int>(inputShape.dim(2)), + static_cast<unsigned int>(inputShape.dim(3))}; + + // Split dimension 1 of the splitter output shape and conv input shapes + // according to the number of groups + splitterDimSizes[1] /= numGroups; + inputShape.set_dim(1, splitterDimSizes[1]); + + // This is used to describe how the input is to be split + ViewsDescriptor splitterDesc(numGroups); + + // Create an output node for each group, giving each a unique name + for (unsigned int g = 0; g < numGroups; ++g) + { + // Work out the names of the splitter layers child convolutions + stringstream ss; + ss << layerParam.name() << "_" << g; + convLayerNames[g] = ss.str(); + + splitterDesc.SetViewOriginCoord(g, 1, splitterDimSizes[1] * g); + + // Set the size of the views. + for (unsigned int dimIdx=0; dimIdx < 4; dimIdx++) + { + splitterDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]); + } + } + + const std::string splitterLayerName = std::string("splitter_") + layerParam.bottom(0); + + // Add the splitter layer + splitterLayer = m_Network->AddSplitterLayer(splitterDesc, + splitterLayerName.c_str()); + + inputConnection.Connect(splitterLayer->GetInputSlot(0)); + for (unsigned int i = 0; i < splitterLayer->GetNumOutputSlots(); i++) + { + splitterLayer->GetOutputSlot(i).SetTensorInfo(BlobShapeToTensorInfo(inputShape)); + } + } + + // Ignored Caffe Parameters + // * Dilation Size + // * Weight Filler + // * Bias Filler + // * Engine + // * Force nd_im2col + // * Axis + + // Not Available ArmNN Interface Parameters + // * Rounding policy; + + Convolution2dDescriptor convolution2dDescriptor; + convolution2dDescriptor.m_PadLeft = padW; + convolution2dDescriptor.m_PadRight = padW; + convolution2dDescriptor.m_PadTop = padH; + convolution2dDescriptor.m_PadBottom = padH; + convolution2dDescriptor.m_StrideX = strideW; + convolution2dDescriptor.m_StrideY = strideH; + + unsigned int numFilters = convParam.num_output(); + + // Populate convolution output tensor descriptor dimensions + BlobShape outputShape; + outputShape.add_dim(0); + outputShape.set_dim(0, inputShape.dim(0)); + outputShape.add_dim(1); + // Ensure that dimension 1 of the convolution output is split according to the number of groups. + outputShape.set_dim(1, numFilters / numGroups); + outputShape.add_dim(2); + outputShape.set_dim( + 2, (static_cast<int>(static_cast<float>(inputShape.dim(2) + 2 * padH - kernelH) / + boost::numeric_cast<float>(strideH)) + 1)); + outputShape.add_dim(3); + outputShape.set_dim( + 3, (static_cast<int>(static_cast<float>(inputShape.dim(3) + 2 * padW - kernelW) / + boost::numeric_cast<float>(strideW)) + 1)); + + // Load the weight data for ALL groups + vector<float> weightData(boost::numeric_cast<size_t>(numGroups * inputShape.dim(1) * outputShape.dim(1) * + kernelH * kernelW)); + GetDataFromBlob(layerParam, weightData, 0); + + const unsigned int weightDimSizes[4] = { + static_cast<unsigned int>(outputShape.dim(1)), static_cast<unsigned int>(inputShape.dim(1)), kernelH, kernelW}; + + // Bias data - This defaults to true in Caffe + TensorInfo biasInfo; + vector<float> biasData; + convolution2dDescriptor.m_BiasEnabled = convParam.has_bias_term() ? convParam.bias_term() : true; + if (convolution2dDescriptor.m_BiasEnabled) + { + biasData.resize(boost::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f); + GetDataFromBlob(layerParam, biasData, 1); + + const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))}; + biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32); + } + + const unsigned int numWeightsPerGroup = boost::numeric_cast<unsigned int>(weightData.size()) / numGroups; + const unsigned int numBiasesPerGroup = boost::numeric_cast<unsigned int>(biasData.size()) / numGroups; + + armnn::IConnectableLayer* returnLayer = nullptr; + + for (unsigned int g = 0; g < numGroups; ++g) + { + // set the slot index, group 0 should be connected to the 0th output of the splitter + // group 1 should be connected to the 1st output of the splitter + + // Pull out the weights for this group from that loaded from the model file earlier + ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32), + weightData.data() + numWeightsPerGroup * g); + + IConnectableLayer* convLayer = nullptr; + if (convolution2dDescriptor.m_BiasEnabled) + { + // Pull out the biases for this group from that loaded from the model file earlier + ConstTensor biases(biasInfo, biasData.data() + numBiasesPerGroup * g); + + convLayer = m_Network->AddConvolution2dLayer(convolution2dDescriptor, + weights, biases, convLayerNames[g].c_str()); + } + else + { + convLayer = m_Network->AddConvolution2dLayer(convolution2dDescriptor, + weights, convLayerNames[g].c_str()); + } + convLayers[g] = convLayer; + + // If we have more than one group then the input to the nth convolution the splitter layer's nth output, + // otherwise it's the regular input to this layer. + armnn::IOutputSlot& splitterInputConnection = splitterLayer ? splitterLayer->GetOutputSlot(g) : inputConnection; + splitterInputConnection.Connect(convLayer->GetInputSlot(0)); + convLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape)); + + returnLayer = convLayer; + } + + if (numGroups > 1) + { + // If the convolution was performed in chunks, add a layer to merge the results + + // The merge input shape matches that of the convolution output + unsigned int mergeDimSizes[4] = {static_cast<unsigned int>(outputShape.dim(0)), + static_cast<unsigned int>(outputShape.dim(1)), + static_cast<unsigned int>(outputShape.dim(2)), + static_cast<unsigned int>(outputShape.dim(3))}; + + // This is used to describe how the input is to be merged + OriginsDescriptor mergeDesc(numGroups); + + // Now create an input node for each group, using the name from + // the output of the corresponding convolution + for (unsigned int g = 0; g < numGroups; ++g) + { + mergeDesc.SetViewOriginCoord(g, 1, mergeDimSizes[1] * g); + } + + // Make sure the output from the merge is the correct size to hold the data for all groups + mergeDimSizes[1] *= numGroups; + outputShape.set_dim(1, mergeDimSizes[1]); + + // The merge layer just assumes the name of the original convolution + // layer so the following layer connection "just works" + const string mergeOutputName = layerParam.name(); + + // Finally add the merge layer + IConnectableLayer* layer = m_Network->AddMergerLayer(mergeDesc, mergeOutputName.c_str()); + + for (unsigned int g = 0; g < numGroups; ++g) + { + convLayers[g]->GetOutputSlot(0).Connect(layer->GetInputSlot(g)); + } + layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(4, mergeDimSizes, DataType::Float32)); + + returnLayer = layer; + } + + BOOST_ASSERT(returnLayer); + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0)); +} + +void CaffeParser::ParsePoolingLayer(const LayerParameter& layerParam) +{ + ValidateNumInputsOutputs(layerParam, 1, 1); + + PoolingParameter param = layerParam.pooling_param(); + + const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo(); + + // Kernel size + unsigned int kernel_h = 0; + unsigned int kernel_w = 0; + if (param.has_kernel_h() && param.has_kernel_w()) + { + kernel_h = param.kernel_h(); + kernel_w = param.kernel_w(); + } + else if (param.kernel_size() > 0) + { + kernel_h = param.kernel_size(); + kernel_w = param.kernel_size(); + } + else if (param.has_global_pooling()) + { + kernel_h = inputInfo.GetShape()[2]; + kernel_w = inputInfo.GetShape()[3]; + } + else + { + throw ParseException("Loading Pooling Layer: Kernel Size defined Illegally"); + } + + if (!IsInRange(kernel_h, 0, 11) || !IsInRange(kernel_w, 0, 11) || (kernel_h != kernel_w)) + { + throw ParseException(boost::str( + boost::format("Loading Pooling Layer: kernel has invalid size: %1% x %2%") % kernel_h % kernel_w)); + } + + // Strides + // Default to a valid value for the case of global pooling (where the strides don't have to be explicitly set) + unsigned int stride_h = 1; + unsigned int stride_w = 1; + if (param.has_stride_h() && param.has_stride_w()) + { + stride_h = param.stride_h(); + stride_w = param.stride_w(); + } + else if (param.has_stride()) + { + stride_h = param.stride(); + stride_w = param.stride(); + } + else if (!param.has_global_pooling()) + { + throw ParseException("Loading Pooling Layer: Stride Size defined Illegally"); + } + + if (!IsInRange(stride_h, 0, 11) || !IsInRange(stride_w, 0, 11) || (stride_h != stride_w)) + { + throw ParseException("Loading Pooling Layer: stride has invalid size"); + } + + // Padding + unsigned int pad_h = 0; + unsigned int pad_w = 0; + if (param.has_pad_h() && param.has_pad_w()) + { + pad_h = param.pad_h(); + pad_w = param.pad_w(); + } + else if (param.has_pad()) + { + pad_h = param.pad(); + pad_w = param.pad(); + } + else + { + pad_h = 0; + pad_w = 0; + } + + if (!IsInRange(pad_h, 0, 11) || !IsInRange(pad_w, 0, 11) || (pad_h != pad_w)) + { + throw ParseException("Loading Pooling Layer: pad has invalid size"); + } + + // Ignored Caffe Parameters + // Stochastic Pooling + // Engine + + // Populate Weight and Bias Filter Descriptor + Pooling2dDescriptor pooling2dDescriptor; + if (param.has_pool()) + { + PoolingParameter_PoolMethod p = param.pool(); + switch (p) + { + case PoolingParameter_PoolMethod_MAX: + { + pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Max; + break; + } + case PoolingParameter_PoolMethod_AVE: + { + pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average; + break; + } + case PoolingParameter_PoolMethod_STOCHASTIC: + { + throw ParseException("Loading Pooling Layer: Stochastic Pooling Not Supported"); + } + default: + { + throw ParseException("Loading Pooling Layer: Mode Not Supported"); + } + } + } + else + { + throw ParseException("Loading Pooling Layer: No Pooling Method Defined"); + } + + pooling2dDescriptor.m_PadLeft = pad_w; + pooling2dDescriptor.m_PadRight = pad_w; + pooling2dDescriptor.m_PadTop = pad_h; + pooling2dDescriptor.m_PadBottom = pad_h; + pooling2dDescriptor.m_StrideX = stride_w; + pooling2dDescriptor.m_StrideY = stride_h; + pooling2dDescriptor.m_PoolWidth = kernel_w; + pooling2dDescriptor.m_PoolHeight = kernel_h; + + pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Ceiling; + pooling2dDescriptor.m_PaddingMethod = PaddingMethod::IgnoreValue; + + armnn::IConnectableLayer* poolingLayer = m_Network->AddPooling2dLayer(pooling2dDescriptor, + layerParam.name().c_str()); + + + TensorInfo outputInfo( + { inputInfo.GetShape()[0], + inputInfo.GetShape()[1], + static_cast<unsigned int>(ceil( + static_cast<float>(inputInfo.GetShape()[2] + 2 * pad_h - kernel_h) / + boost::numeric_cast<float>(stride_h))) + 1, + static_cast<unsigned int>(ceil( + static_cast<float>(inputInfo.GetShape()[3] + 2 * pad_w - kernel_w) / + boost::numeric_cast<float>(stride_w))) + 1 }, + DataType::Float32); + + GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(poolingLayer->GetInputSlot(0)); + poolingLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), poolingLayer->GetOutputSlot(0)); +} + +void CaffeParser::ParseReluLayer(const LayerParameter& layerParam) +{ + ValidateNumInputsOutputs(layerParam, 1, 1); + + const string& name = layerParam.name(); + const ReLUParameter& param = layerParam.relu_param(); + + ActivationDescriptor activationDescriptor; + const float negativeSlope = param.negative_slope(); + if (negativeSlope == 0.0f) + { + activationDescriptor.m_Function = ActivationFunction::ReLu; + } + else + { + activationDescriptor.m_Function = ActivationFunction::LeakyReLu; + activationDescriptor.m_A = negativeSlope; + } + + const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo(); + IConnectableLayer* const activationLayer = m_Network->AddActivationLayer(activationDescriptor, name.c_str()); + GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(activationLayer->GetInputSlot(0)); + activationLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), activationLayer->GetOutputSlot(0)); +} + +void CaffeParser::ParseLRNLayer(const LayerParameter& layerParam) +{ + ValidateNumInputsOutputs(layerParam, 1, 1); + + LRNParameter param = layerParam.lrn_param(); + + const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo(); + + // Ignored BATCH NORMALIZATION Caffe Parameters + // Ignored MVN Caffe Parameters + // Ignored LRN Caffe Parameters + // Engine + + NormalizationDescriptor normalizationDescriptor; + if (param.has_norm_region()) + { + LRNParameter_NormRegion n = param.norm_region(); + switch (n) + { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + { + normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across; + break; + } + case LRNParameter_NormRegion_WITHIN_CHANNEL: + { + normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Within; + break; + } + default: + throw ParseException("Loading LRN Layer: Mode Not Supported"); + } + } + else + { + // Caffe defaults to normalization across channels + normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across; + } + + normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness; + if (param.has_local_size()) + { + normalizationDescriptor.m_NormSize = param.local_size(); + } + else + { + throw ParseException("Loading LRN Layer: Local_size not defined"); + } + + if (param.has_alpha()) + { + normalizationDescriptor.m_Alpha = param.alpha(); + normalizationDescriptor.m_Alpha /= boost::numeric_cast<float>(param.local_size()); + } + else + { + throw ParseException("Loading LRN Layer: Alpha not defined"); + } + if (param.has_beta()) + { + normalizationDescriptor.m_Beta = param.beta(); + } + else + { + throw ParseException("Loading LRN Layer: Beta not defined"); + } + if (param.has_k()) + { + normalizationDescriptor.m_K = param.k(); + } + else + normalizationDescriptor.m_K = 1; + + IConnectableLayer* const normLayer = m_Network->AddNormalizationLayer(normalizationDescriptor, + layerParam.name().c_str()); + GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(normLayer->GetInputSlot(0)); + normLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), normLayer->GetOutputSlot(0)); +} + +void CaffeParser::ParseInnerProductLayer(const LayerParameter& layerParam) +{ + InnerProductParameter param = layerParam.inner_product_param(); + + ValidateNumInputsOutputs(layerParam, 1, 1); + + unsigned int outputSize = param.num_output(); + + // Ignored Caffe Parameters + // Weight Filler + // Bias Filler + // Engine + // Axis + + FullyConnectedDescriptor tensorFullyConnectedDescriptor; + + if (param.has_transpose()) + { + // If true assume transposed weights + tensorFullyConnectedDescriptor.m_TransposeWeightMatrix = param.transpose(); + } + else + { + // caffe defaults to transposed + tensorFullyConnectedDescriptor.m_TransposeWeightMatrix = true; + } + + const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo(); + + TensorInfo weightInfo; + TensorInfo biasInfo; + + // allow implicit flattening of extra dimensions + unsigned int inputSize = inputInfo.GetShape()[1]; + for (unsigned int i = 2; i < inputInfo.GetNumDimensions(); ++i) + { + inputSize *= inputInfo.GetShape()[i]; + } + + vector<float> weightData(inputSize * outputSize); + + GetDataFromBlob(layerParam, weightData, 0); + const unsigned int swTD[2] = { outputSize, inputSize }; + ConstTensor weights(TensorInfo(2, swTD, DataType::Float32), weightData); + + tensorFullyConnectedDescriptor.m_BiasEnabled = true; + // Todo: check whether bias enabled + armnn::IConnectableLayer* fullyConnectedLayer = nullptr; + if (tensorFullyConnectedDescriptor.m_BiasEnabled) + { + // BIAS VALUE + vector<float> biasData(outputSize); + + GetDataFromBlob(layerParam, biasData, 1); + + const unsigned int sbTD[1] = { outputSize }; + + ConstTensor biases(TensorInfo(1, sbTD, DataType::Float32), biasData); + + fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor, weights, biases, + layerParam.name().c_str()); + } + else + { + fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor, weights, + layerParam.name().c_str()); + } + + TensorInfo outputInfo({ inputInfo.GetShape()[0], outputSize }, DataType::Float32); + GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(fullyConnectedLayer->GetInputSlot(0)); + fullyConnectedLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), fullyConnectedLayer->GetOutputSlot(0)); +} + +void CaffeParser::ParseSoftmaxLayer(const LayerParameter& layerParam) +{ + ValidateNumInputsOutputs(layerParam, 1, 1); + + SoftmaxParameter param = layerParam.softmax_param(); + + const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo(); + + // Ignored Caffe Parameters + // axis + // Engine + + armnn::SoftmaxDescriptor softmaxDescriptor; + armnn::IConnectableLayer* const softmaxLayer = m_Network->AddSoftmaxLayer( + softmaxDescriptor, + layerParam.name().c_str()); + GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(softmaxLayer->GetInputSlot(0)); + softmaxLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), softmaxLayer->GetOutputSlot(0)); +} + +void CaffeParser::ParseEltwiseLayer(const LayerParameter& layerParam) +{ + ValidateNumInputsOutputs(layerParam, 2, 1); + + const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo(); + + // Ignored Caffe Parameters + // coeff + + EltwiseParameter_EltwiseOp operation = EltwiseParameter_EltwiseOp_SUM; // default to sum as per caffe + + if (layerParam.has_eltwise_param() && layerParam.eltwise_param().has_operation()) + { + operation = layerParam.eltwise_param().operation(); + } + + armnn::IConnectableLayer* newLayer = nullptr; + switch (operation) + { + case EltwiseParameter_EltwiseOp_SUM: + { + newLayer = m_Network->AddAdditionLayer(layerParam.name().c_str()); + break; + } + case EltwiseParameter_EltwiseOp_PROD: + { + newLayer = m_Network->AddMultiplicationLayer(layerParam.name().c_str()); + break; + } + default: + { + throw ParseException("Unsupported operation in Eltwise layer"); + } + } + + GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(newLayer->GetInputSlot(0)); + GetArmnnOutputSlotForCaffeTop(layerParam.bottom(1)).Connect(newLayer->GetInputSlot(1)); + newLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), newLayer->GetOutputSlot(0)); +} + +void CaffeParser::ParseConcatLayer(const LayerParameter& layerParam) +{ + unsigned int numInputs = static_cast<unsigned int>(layerParam.bottom_size()); + // we assume concat happens along the channel dimension, which is 1 in (0, 1, 2, 3) + unsigned int concatDim = 1; + unsigned int numOfDims = 4; + + OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numInputs), numOfDims);// we only consider 4-D tensor here + std::vector<unsigned int>mergeDimSizes(numOfDims, 0u); + + unsigned int mergeDim = 0; + for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex) + { + const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop( + layerParam.bottom(boost::numeric_cast<int>(viewIndex))).GetTensorInfo(); + // Check whether the dimensions of the input tensors are actually 4 + if (inputInfo.GetNumDimensions()!=4) + { + throw ParseException("The number of dimensions for input tensors of the concatenation op should be 4."); + } + + mergeDimSizes[0] = inputInfo.GetShape()[0]; + mergeDimSizes[1] = inputInfo.GetShape()[1]; + mergeDimSizes[2] = inputInfo.GetShape()[2]; + mergeDimSizes[3] = inputInfo.GetShape()[3]; + + for (unsigned int j = 0; j < concatDim; ++j) + { + concatDescriptor.SetViewOriginCoord(viewIndex, j, 0); + } + + concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim); + mergeDim += mergeDimSizes[concatDim]; + + for (unsigned int j = concatDim+1; j < numOfDims; ++j) + { + concatDescriptor.SetViewOriginCoord(viewIndex, j, 0); + } + } + mergeDimSizes[concatDim] = mergeDim; + + armnn::IConnectableLayer *concatlayer = m_Network->AddMergerLayer(concatDescriptor, layerParam.name().c_str()); + for (unsigned int i = 0; i < numInputs; ++i) + { + armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(boost::numeric_cast<int>(i))); + outputSlot.Connect(concatlayer->GetInputSlot(i)); + } + + concatlayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(numOfDims, mergeDimSizes.data(), DataType::Float32)); + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatlayer->GetOutputSlot(0)); +} + +void CaffeParser::ParseBatchNormLayer(const LayerParameter& layerParam) +{ + ValidateNumInputsOutputs(layerParam, 1, 1); + + const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo(); + + string name = layerParam.name(); + + BatchNormParameter param = layerParam.batch_norm_param(); + // If use_global_stats is not explicitly set in the model, assume it to be true (its default value + // when the network is in the testing phase). + if (param.has_use_global_stats()) + { + if (!param.use_global_stats()) + { + throw ParseException(boost::str(boost::format("Error parsing Batch Norm layer '%1%': " + "Parameter 'use_global_stats' is set to false, which is unsupported (value used for training).") + % name)); + } + } + + BatchNormalizationDescriptor desc; + desc.m_Eps = param.eps(); + + unsigned int channels = inputInfo.GetShape()[1]; + unsigned int shape[] = {channels}; + + vector<float> meanData(channels); + GetDataFromBlob(layerParam, meanData, 0); + + vector<float> varianceData(channels); + GetDataFromBlob(layerParam, varianceData, 1); + + // identity scale operation + vector<float> betaData(channels, 0.0f); + vector<float> gammaData(channels, 1.0f); + + ConstTensor mean(TensorInfo(1, shape, armnn::DataType::Float32), meanData); + ConstTensor variance(TensorInfo(1, shape, armnn::DataType::Float32), varianceData); + ConstTensor beta(TensorInfo(1, shape, armnn::DataType::Float32), betaData); + ConstTensor gamma(TensorInfo(1, shape, armnn::DataType::Float32), gammaData); + + armnn::IConnectableLayer* const batchNormLayer = m_Network->AddBatchNormalizationLayer(desc, + mean, variance, beta, gamma, name.c_str()); + GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(batchNormLayer->GetInputSlot(0)); + batchNormLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0)); +} + +void CaffeParser::ParseScaleLayer(const LayerParameter& layerParam) +{ + // current unoptimal solution: add a batchnormalization layer with 0 mean and 1 variance + ValidateNumInputsOutputs(layerParam, 1, 1); + + const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo(); + + string name = layerParam.name(); + + ScaleParameter param = layerParam.scale_param(); + if (param.axis() != 1) + { + // Would have to use something other than BatchNormalizationLayer in this case + throw ParseException("Loading Scale Layer: Only axis 1 supported currently"); + } + + unsigned int channels = inputInfo.GetShape()[1]; + unsigned int shape[] = {channels}; + + BatchNormalizationDescriptor desc; + desc.m_Eps = 0.0f; // don't need epsilon if variance is 1 + vector<float> meanData(channels, 0.0f); + vector<float> varianceData(channels, 1.0f); + vector<float> betaData(channels, 0.0f); + vector<float> gammaData(channels); + + GetDataFromBlob(layerParam, gammaData, 0); + + if(param.has_bias_term()) + { + GetDataFromBlob(layerParam, betaData, 1); + } + + ConstTensor mean(TensorInfo(1, shape, armnn::DataType::Float32), meanData); + ConstTensor variance(TensorInfo(1, shape, armnn::DataType::Float32), varianceData); + ConstTensor beta(TensorInfo(1, shape, armnn::DataType::Float32), betaData); + ConstTensor gamma(TensorInfo(1, shape, armnn::DataType::Float32), gammaData); + + armnn::IConnectableLayer* const batchNormLayer = m_Network->AddBatchNormalizationLayer(desc, + mean, variance, beta, gamma, name.c_str()); + GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(batchNormLayer->GetInputSlot(0)); + batchNormLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0)); +} + +void CaffeParser::ParseSplitLayer(const caffe::LayerParameter& layerParam) +{ + // Used in caffe to duplicate memory - not necessary in armnn + if (layerParam.bottom_size() != 1) + { + throw ParseException("Split layer '" + layerParam.name() + "' should have exactly 1 bottom"); + } + armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)); + for (int i = 0; i < layerParam.top_size(); i++) + { + SetArmnnOutputSlotForCaffeTop(layerParam.top(i), outputSlot); + } +} + +void CaffeParser::ParseDropoutLayer(const caffe::LayerParameter& layerParam) +{ + // Ignored for inference so patch the single input to its single output + if (layerParam.bottom_size() != 1 || layerParam.top_size() != 1) + { + throw ParseException("Dropout layer '" + layerParam.name() + "' should have exactly 1 bottom and 1 top"); + } + SetArmnnOutputSlotForCaffeTop(layerParam.top(0), GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0))); +} + +void CaffeParser::TrackInputBinding(armnn::IConnectableLayer* layer, + armnn::LayerBindingId id, + const armnn::TensorInfo& tensorInfo) +{ + return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkInputsBindingInfo); +} + +void CaffeParser::TrackOutputBinding(armnn::IConnectableLayer* layer, + armnn::LayerBindingId id, + const armnn::TensorInfo& tensorInfo) +{ + return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkOutputsBindingInfo); +} + +void CaffeParser::TrackBindingPoint(armnn::IConnectableLayer* layer, + armnn::LayerBindingId id, + const armnn::TensorInfo& tensorInfo, + const char* bindingPointDesc, + std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo) +{ + const std::string layerName = layer->GetName(); + auto it = nameToBindingInfo.find(layerName); + if (it == nameToBindingInfo.end()) + { + nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo); + } + else + { + throw ParseException(boost::str( + boost::format("Id %1% used by more than one %2% layer") % id % bindingPointDesc)); + } +} + +armnn::IOutputSlot& CaffeParser::GetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName) const +{ + auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName); + if (it != m_ArmnnOutputSlotForCaffeTop.end()) + { + return *it->second; + } + else + { + throw ParseException(boost::str(boost::format( + "Could not find armnn output slot for Caffe top '%1%'") % caffeTopName)); + } +} + +void CaffeParser::SetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName, armnn::IOutputSlot& armnnOutputSlot) +{ + auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName); + if (it == m_ArmnnOutputSlotForCaffeTop.end()) + { + m_ArmnnOutputSlotForCaffeTop[caffeTopName] = &armnnOutputSlot; + } + else + { + throw ParseException("Attempting to add duplicate entry for Caffe top '" + caffeTopName + "'"); + } +} + +void CaffeParser::ResolveInPlaceLayers(caffe::NetParameter& netParameter) +{ + // Find layers with the same top + std::map<std::string, std::vector<caffe::LayerParameter*>> layersByTop; + for (int layerIdx = 0; layerIdx < netParameter.layer_size(); ++layerIdx) + { + caffe::LayerParameter& layer = *netParameter.mutable_layer(layerIdx); + for (int i = 0; i < layer.top_size(); ++i) + { + layersByTop[layer.top(i)].push_back(&layer); + } + } + + // For each set of layers with the same top, resolve them to a linear chain rather than in-place layers. + // Note that for 'regular' layers, there will be a single layer in each group and so this will be a no-op. + for (auto layersWithSameTopIt : layersByTop) + { + const std::string& top = layersWithSameTopIt.first; + const std::vector<caffe::LayerParameter*>& layersWithSameTop = layersWithSameTopIt.second; + + // Chain the layers together in the order that they are listed in the prototxt (hopefully this is correct). + // Note that the last layer will not have its top modified so that other layers will continue to reference it. + for (unsigned int layerIdx = 0; layerIdx < layersWithSameTop.size() - 1; ++layerIdx) + { + caffe::LayerParameter& layer1 = *layersWithSameTop[layerIdx]; + caffe::LayerParameter& layer2 = *layersWithSameTop[layerIdx+1]; + if (layer1.top_size() != 1) + { + throw ParseException("Node '" + layer1.name() + "' is an in-place layer but " + "doesn't have exactly one top."); + } + std::string newTop = layer1.name() + "_top"; + layer1.set_top(0, newTop); + if (layer2.bottom_size() != 1 || layer2.bottom(0) != top) + { + throw ParseException("Node '" + layer2.name() + "' is an in-place layer but " + " doesn't have exactly one bottom, or it doesn't match its top."); + } + layer2.set_bottom(0, newTop); + } + } +} + +void CaffeParser::LoadNetParam(NetParameter& netParameter) +{ + // caffe models sometimes have an implicit input layer. + // in that case, add an explicit one + if (netParameter.input_size() > 0) + { + LayerParameter* newLayer = netParameter.add_layer(); + + newLayer->set_type("Input"); + newLayer->set_name(netParameter.input(0)); + newLayer->add_top(netParameter.input(0)); + + InputParameter* inputParam = newLayer->mutable_input_param(); + BlobShape* shape = inputParam->add_shape(); + + int dim_size = netParameter.input_dim_size(); + for (int i = 0; i < dim_size; ++i) + { + shape->add_dim(netParameter.input_dim(i)); + } + } + + // Replace in-place layers with regular ones to make the rest of the parsing easier. + ResolveInPlaceLayers(netParameter); + + // Create a lookup of Caffe layers by name + for (int i = 0; i < netParameter.layer_size(); ++i) + { + const caffe::LayerParameter& layer = netParameter.layer(i); + for (int i = 0; i < layer.top_size(); ++i) + { + m_CaffeLayersByTopName[layer.top(i)] = &layer; + } + } + + // Find the output layers the user requested + std::vector<const caffe::LayerParameter*> targetLayers; + for (const std::string& requestedOutputName : m_RequestedOutputs) + { + auto nodeIt = m_CaffeLayersByTopName.find(requestedOutputName); + if (nodeIt == m_CaffeLayersByTopName.end()) + { + throw ParseException("Couldn't find requested output layer '" + requestedOutputName + "' in graph"); + } + targetLayers.push_back(nodeIt->second); + } + + // Sort them into a linear ordering such that all inputs of a node are before the node itself + std::vector<const caffe::LayerParameter*> sortedNodes; + if (!armnnUtils::GraphTopologicalSort<const caffe::LayerParameter*>( + targetLayers, + [this](const caffe::LayerParameter* node) + { + return GetInputs(*node); + }, + sortedNodes)) + { + throw ParseException("Cycle detected in graph"); + } + + // Parse each node in order, knowing that all inputs of a node will be processed before the node itself + for (const caffe::LayerParameter* current : sortedNodes) + { + auto it = ms_CaffeLayerNameToParsingFunctions.find(current->type()); + if (it == ms_CaffeLayerNameToParsingFunctions.end()) + { + throw ParseException("Unsupported layer type '" + current->type() + "'"); + } + auto func = it->second; + (this->*func)(*current); + } + + // Add ArmNN output layers connected to each requested output + for (const std::string& requestedOutput : m_RequestedOutputs) + { + armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(requestedOutput); + + const armnn::LayerBindingId outputId = boost::numeric_cast<armnn::LayerBindingId>( + m_NetworkOutputsBindingInfo.size()); + armnn::IConnectableLayer* const outputLayer = m_Network->AddOutputLayer(outputId, requestedOutput.c_str()); + outputSlot.Connect(outputLayer->GetInputSlot(0)); + + TrackOutputBinding(outputLayer, outputId, outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo()); + } +} + +INetworkPtr CaffeParser::CreateNetworkFromTextFile(const char* graphFile, + const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs) +{ + FILE* fd = fopen(graphFile, "r"); + + if (fd == nullptr) + { + std::stringstream error; + error << "Graph file " << graphFile << " failed to open"; + throw FileNotFoundException(error.str()); + } + + // Parse the file into a message + NetParameter netParam; + auto input = new google::protobuf::io::FileInputStream(fileno(fd)); + bool success = google::protobuf::TextFormat::Parse(input, &netParam); + delete input; + fclose(fd); + + if (!success) + { + std::stringstream error; + error << "Failed to parse graph file"; + throw ParseException(error.str()); + } + + return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs); +} + +INetworkPtr CaffeParser::CreateNetworkFromString(const char* protoText, + const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs) +{ + // Parse the string into a message + NetParameter netParam; + bool success = google::protobuf::TextFormat::ParseFromString(protoText, &netParam); + + if (!success) + { + std::stringstream error; + error << "Failed to parse graph string"; + throw ParseException(error.str()); + } + + return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs); +} + +INetworkPtr CaffeParser::CreateNetworkFromBinaryFile(const char* graphFile, + const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs) +{ + FILE* fd = fopen(graphFile, "rb"); + + if (fd == nullptr) + { + std::stringstream error; + error << "Graph file " << graphFile << " failed to open"; + throw FileNotFoundException(error.str()); + } + + // Parse the file into a message + NetParameter netParam; + + FileInputStream inStream(fileno(fd)); + CodedInputStream codedStream(&inStream); + codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX); + bool success = netParam.ParseFromCodedStream(&codedStream); + fclose(fd); + + if (!success) + { + std::stringstream error; + error << "Failed to parse protobuf file" << graphFile; + throw ParseException(error.str()); + } + + return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs); +} + +INetworkPtr CaffeParser::CreateNetworkFromNetParameter(NetParameter& netParam, + const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs) +{ + m_NetworkInputsBindingInfo.clear(); + m_NetworkOutputsBindingInfo.clear(); + + m_Network = INetwork::Create(); + + m_InputShapes = inputShapes; + if (requestedOutputs.size() == 0) + { + throw ParseException("requestedOutputs must have at least one entry"); + } + m_RequestedOutputs = requestedOutputs; + + try + { + LoadNetParam(netParam); + } + catch (const ParseException& e) + { + Cleanup(); + throw e; + } + + Cleanup(); + + return move(m_Network); +} + +void CaffeParser::Cleanup() +{ + // cleanup, in case we reuse this parser + m_CaffeLayersByTopName.clear(); + m_InputShapes.clear(); + m_RequestedOutputs.clear(); + m_ArmnnOutputSlotForCaffeTop.clear(); +} + +} + + diff --git a/src/armnnCaffeParser/CaffeParser.hpp b/src/armnnCaffeParser/CaffeParser.hpp new file mode 100644 index 0000000000..0b31e187dd --- /dev/null +++ b/src/armnnCaffeParser/CaffeParser.hpp @@ -0,0 +1,142 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once +#include "armnnCaffeParser/ICaffeParser.hpp" + +#include "armnn/Types.hpp" +#include "armnn/NetworkFwd.hpp" +#include "armnn/Tensor.hpp" + +#include <memory> +#include <vector> +#include <unordered_map> + +namespace caffe +{ +class BlobShape; +class LayerParameter; +class NetParameter; +} + +namespace armnnCaffeParser +{ + +using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>; + +class CaffeParser : public ICaffeParser +{ +public: + /// Create the network from a protobuf text file on disk + virtual armnn::INetworkPtr CreateNetworkFromTextFile( + const char* graphFile, + const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs) override; + + /// Create the network from a protobuf binary file on disk + virtual armnn::INetworkPtr CreateNetworkFromBinaryFile( + const char* graphFile, + const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs) override; + + /// Create the network directly from protobuf text in a string. Useful for debugging/testing + virtual armnn::INetworkPtr CreateNetworkFromString( + const char* protoText, + const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs) override; + + /// Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name + virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const override; + + /// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name + virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const override; + +public: + CaffeParser(); + +private: + static std::pair<armnn::LayerBindingId, armnn::TensorInfo> GetBindingInfo(const std::string& layerName, + const char* bindingPointDesc, + const std::unordered_map<std::string, BindingPointInfo>& bindingInfos); + + /// Parses a NetParameter loaded into memory from one of the other CreateNetwork* + armnn::INetworkPtr CreateNetworkFromNetParameter( + caffe::NetParameter& netParam, + const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs); + + /// does the actual conversion from caffe::NetParameter to armnn::INetwork + void LoadNetParam(caffe::NetParameter& netParameter); + + /// Find the Caffe layers listed as inputs (bottoms) for a given layer. + std::vector<const caffe::LayerParameter*> GetInputs(const caffe::LayerParameter& layerParam); + + /// Modifies the Caffe network to replace "in-place" layers (whose top() and bottom() are both the same) + /// with regular layers. This simplifies further parsing. + void ResolveInPlaceLayers(caffe::NetParameter& netParameter); + + /// Converts Caffe's protobuf tensor shape format to ArmNN's + armnn::TensorInfo BlobShapeToTensorInfo(const caffe::BlobShape& blobShape) const; + + /// Adds an armnn layer to m_Network given a Caffe LayerParameter of the correct type + /// and is responsible for recording any newly created IOutputSlots using SetArmnnOutputSlotForCaffeTop(). + /// @{ + void ParseInputLayer(const caffe::LayerParameter& layerParam); + void ParseConvLayer(const caffe::LayerParameter& layerParam); + void ParsePoolingLayer(const caffe::LayerParameter& layerParam); + void ParseReluLayer(const caffe::LayerParameter& layerParam); + void ParseLRNLayer(const caffe::LayerParameter& layerParam); + void ParseInnerProductLayer(const caffe::LayerParameter& layerParam); + void ParseSoftmaxLayer(const caffe::LayerParameter& layerParam); + void ParseEltwiseLayer(const caffe::LayerParameter& layerParam); + void ParseConcatLayer(const caffe::LayerParameter& layerParam); + void ParseBatchNormLayer(const caffe::LayerParameter& layerParam); + void ParseScaleLayer(const caffe::LayerParameter& layerParam); + void ParseSplitLayer(const caffe::LayerParameter& layerParam); + void ParseDropoutLayer(const caffe::LayerParameter& layerParam); + /// @} + + void TrackInputBinding(armnn::IConnectableLayer* layer, + armnn::LayerBindingId id, + const armnn::TensorInfo& tensorInfo); + + void TrackOutputBinding(armnn::IConnectableLayer* layer, + armnn::LayerBindingId id, + const armnn::TensorInfo& tensorInfo); + + static void TrackBindingPoint(armnn::IConnectableLayer* layer, armnn::LayerBindingId id, + const armnn::TensorInfo& tensorInfo, + const char* bindingPointDesc, + std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo); + + /// Retrieves the Armnn IOutputSlot representing the given Caffe top. + /// Throws if it cannot be found (e.g. not parsed yet). + armnn::IOutputSlot& GetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName) const; + + void SetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName, armnn::IOutputSlot& armnnOutputSlot); + + void Cleanup(); + + armnn::INetworkPtr m_Network; + + std::map<std::string, const caffe::LayerParameter*> m_CaffeLayersByTopName; + + using OperationParsingFunction = void(CaffeParser::*)(const caffe::LayerParameter& layerParam); + + /// map of Caffe layer names to parsing member functions + static const std::map<std::string, OperationParsingFunction> ms_CaffeLayerNameToParsingFunctions; + + std::map<std::string, armnn::TensorShape> m_InputShapes; + std::vector<std::string> m_RequestedOutputs; + + /// As we add armnn layers we store the armnn IOutputSlot which corresponds to the Caffe tops. + std::unordered_map<std::string, armnn::IOutputSlot*> m_ArmnnOutputSlotForCaffeTop; + + /// maps input layer names to their corresponding ids and tensor infos + std::unordered_map<std::string, BindingPointInfo> m_NetworkInputsBindingInfo; + + /// maps output layer names to their corresponding ids and tensor infos + std::unordered_map<std::string, BindingPointInfo> m_NetworkOutputsBindingInfo; +}; +}
\ No newline at end of file diff --git a/src/armnnCaffeParser/test/TestAdd.cpp b/src/armnnCaffeParser/test/TestAdd.cpp new file mode 100644 index 0000000000..7d91924638 --- /dev/null +++ b/src/armnnCaffeParser/test/TestAdd.cpp @@ -0,0 +1,70 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> +#include "armnnCaffeParser/ICaffeParser.hpp" +#include "ParserPrototxtFixture.hpp" + +BOOST_AUTO_TEST_SUITE(CaffeParser) + +struct AddFixture : public ParserPrototxtFixture<armnnCaffeParser::ICaffeParser> +{ + AddFixture() + { + m_Prototext = "name: \"MinimalAdd\"\n" + "layer {\n" + " name: \"data\"\n" + " type: \"Input\"\n" + " top: \"data\"\n" + " input_param { shape: { dim: 1 dim: 1 dim: 4 dim: 4 } }\n" + "}\n" + "layer {\n" + " bottom: \"data\"\n" + " top: \"pool1\"\n" + " name: \"pool1\"\n" + " type: \"Pooling\"\n" + " pooling_param {\n" + " kernel_size: 2\n" + " stride: 2\n" + " pool: MAX\n" + " }\n" + "}\n" + "layer {\n" + " bottom: \"data\"\n" + " top: \"pool2\"\n" + " name: \"pool2\"\n" + " type: \"Pooling\"\n" + " pooling_param {\n" + " kernel_size: 2\n" + " stride: 2\n" + " pool: MAX\n" + " }\n" + "}\n" + "layer {\n" + " bottom: \"pool1\"\n" + " bottom: \"pool2\"\n" + " top: \"add\"\n" + " name: \"add\"\n" + " type: \"Eltwise\"\n" + "}\n"; + SetupSingleInputSingleOutput("data", "add"); + } +}; + +BOOST_FIXTURE_TEST_CASE(ParseAdd, AddFixture) +{ + RunTest<4>( + { + 0, 1, 0, 0, + 0, 0, 0, 0, + 0, 0, 1, 0, + 1, 0, 1, 1 + }, + { + 2, 0, + 2, 2 + }); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnCaffeParser/test/TestConcat.cpp b/src/armnnCaffeParser/test/TestConcat.cpp new file mode 100644 index 0000000000..441c28c837 --- /dev/null +++ b/src/armnnCaffeParser/test/TestConcat.cpp @@ -0,0 +1,73 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> +#include "armnnCaffeParser/ICaffeParser.hpp" +#include "ParserPrototxtFixture.hpp" + +BOOST_AUTO_TEST_SUITE(CaffeParser) + +struct ConcatFixture : public ParserPrototxtFixture<armnnCaffeParser::ICaffeParser> +{ + ConcatFixture() + { + m_Prototext = "name: \"Concat\"\n" + "layer {\n" + " name: \"data\"\n" + " type: \"Input\"\n" + " top: \"data\"\n" + " input_param { shape: { dim: 1 dim: 1 dim: 4 dim: 4 } }\n" + "}\n" + "layer {\n" + " bottom: \"data\"\n" + " top: \"pool1\"\n" + " name: \"pool1\"\n" + " type: \"Pooling\"\n" + " pooling_param {\n" + " kernel_size: 2\n" + " stride: 2\n" + " pool: MAX\n" + " }\n" + "}\n" + "layer {\n" + " bottom: \"data\"\n" + " top: \"pool2\"\n" + " name: \"pool2\"\n" + " type: \"Pooling\"\n" + " pooling_param {\n" + " kernel_size: 2\n" + " stride: 2\n" + " pool: MAX\n" + " }\n" + "}\n" + "layer {\n" + " bottom: \"pool1\"\n" + " bottom: \"pool2\"\n" + " top: \"concat\"\n" + " name: \"concat\"\n" + " type: \"Concat\"\n" + "}\n"; + SetupSingleInputSingleOutput("data", "concat"); + } +}; + +BOOST_FIXTURE_TEST_CASE(ParseConcat, ConcatFixture) +{ + RunTest<4>( + { + 0, 1, 0, 0, + 0, 0, 0, 0, + 0, 0, 1, 0, + 1, 0, 1, 1 + }, + { + 1, 0, + 1, 1, + + 1, 0, + 1, 1 + }); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnCaffeParser/test/TestDropout.cpp b/src/armnnCaffeParser/test/TestDropout.cpp new file mode 100644 index 0000000000..16f2c2728c --- /dev/null +++ b/src/armnnCaffeParser/test/TestDropout.cpp @@ -0,0 +1,53 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include <boost/test/unit_test.hpp> +#include "armnnCaffeParser/ICaffeParser.hpp" +#include "ParserPrototxtFixture.hpp" + +BOOST_AUTO_TEST_SUITE(CaffeParser) + +struct DropoutFixture : public ParserPrototxtFixture<armnnCaffeParser::ICaffeParser> +{ + DropoutFixture() + { + m_Prototext = "name: \"DropoutFixture\"\n" + "layer {\n" + " name: \"data\"\n" + " type: \"Input\"\n" + " top: \"data\"\n" + " input_param { shape: { dim: 1 dim: 1 dim: 2 dim: 2 } }\n" + "}\n" + "layer {\n" + " bottom: \"data\"\n" + " top: \"drop1\"\n" + " name: \"drop1\"\n" + " type: \"Dropout\"\n" + "}\n" + "layer {\n" + " bottom: \"drop1\"\n" + " bottom: \"drop1\"\n" + " top: \"add\"\n" + " name: \"add\"\n" + " type: \"Eltwise\"\n" + "}\n"; + SetupSingleInputSingleOutput("data", "add"); + } +}; + +BOOST_FIXTURE_TEST_CASE(ParseDropout, DropoutFixture) +{ + RunTest<4>( + { + 1, 2, + 3, 4, + }, + { + 2, 4, + 6, 8 + }); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnCaffeParser/test/TestInPlace.cpp b/src/armnnCaffeParser/test/TestInPlace.cpp new file mode 100644 index 0000000000..3954baa75b --- /dev/null +++ b/src/armnnCaffeParser/test/TestInPlace.cpp @@ -0,0 +1,98 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> +#include "armnnCaffeParser/ICaffeParser.hpp" +#include "ParserPrototxtFixture.hpp" + +BOOST_AUTO_TEST_SUITE(CaffeParser) + +// The pooling layer should take its input from the relu, not the add directly. +struct InPlaceFixture : public ParserPrototxtFixture<armnnCaffeParser::ICaffeParser> +{ + InPlaceFixture() + { + m_Prototext = R"( +name: "InPlace" +layer { + name: "data" + type: "Input" + top: "data" + input_param { shape: { dim: 1 dim: 1 dim: 1 dim: 1 } } +} +layer { + bottom: "data" + bottom: "data" + top: "add" + name: "add" + type: "Eltwise" +} +layer { + name: "relu" + type: "ReLU" + bottom: "add" + top: "relu" + phase: TEST +} +layer { + name: "pool" + type: "Pooling" + bottom: "relu" + top: "pool" + phase: TEST + pooling_param { + pool: MAX + kernel_size: 1 + stride: 1 + } +} + )"; + SetupSingleInputSingleOutput("data", "pool"); + } +}; + +BOOST_FIXTURE_TEST_CASE(ParseInPlace, InPlaceFixture) +{ + RunTest<1>({ -1.0f }, { 0.0f }); +} + +// The requested output of the network is a layer which has an activation attached. +// The output of the network should therefore actually be the activation layer. +struct InPlaceOutputFixture : public ParserPrototxtFixture<armnnCaffeParser::ICaffeParser> +{ + InPlaceOutputFixture() + { + m_Prototext = R"( +name: "InPlace" +layer { + name: "data" + type: "Input" + top: "data" + input_param { shape: { dim: 1 dim: 1 dim: 1 dim: 1 } } +} +layer { + bottom: "data" + bottom: "data" + top: "add" + name: "add" + type: "Eltwise" +} +layer { + name: "relu" + type: "ReLU" + bottom: "add" + top: "add" + phase: TEST +} + )"; + SetupSingleInputSingleOutput("data", "add"); + } +}; + +BOOST_FIXTURE_TEST_CASE(InPlaceOutput, InPlaceOutputFixture) +{ + RunTest<1>({ -1.0f }, { 0.0f }); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnCaffeParser/test/TestInputs.cpp b/src/armnnCaffeParser/test/TestInputs.cpp new file mode 100644 index 0000000000..f0e2343a33 --- /dev/null +++ b/src/armnnCaffeParser/test/TestInputs.cpp @@ -0,0 +1,120 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> +#include "armnnCaffeParser/ICaffeParser.hpp" +#include "armnn/IRuntime.hpp" +#include "armnn/INetwork.hpp" +#include "armnn/Exceptions.hpp" + +#include "test/TensorHelpers.hpp" + +#include <string> + +#include "ParserPrototxtFixture.hpp" + +BOOST_AUTO_TEST_SUITE(CaffeParser) + + +BOOST_AUTO_TEST_CASE(InputShapes) +{ + std::string explicitInput = "name: \"Minimal\"\n" + "layer {\n" + " name: \"data\"\n" + " type: \"Input\"\n" + " top: \"data\"\n" + " input_param { shape: { dim: 1 dim: 2 dim: 3 dim: 4 } }\n" + "}"; + std::string implicitInput = "name: \"Minimal\"\n" + "input: \"data\" \n" + "input_dim: 1 \n" + "input_dim: 2 \n" + "input_dim: 3 \n" + "input_dim: 4 \n"; + std::string implicitInputNoShape = "name: \"Minimal\"\n" + "input: \"data\" \n"; + + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(armnn::Compute::CpuRef)); + armnnCaffeParser::ICaffeParserPtr parser(armnnCaffeParser::ICaffeParser::Create()); + armnn::INetworkPtr network(nullptr, nullptr); + armnn::NetworkId netId; + + // Check everything works normally + { + network = parser->CreateNetworkFromString(explicitInput.c_str(), {}, { "data" }); + BOOST_TEST(network.get()); + runtime->LoadNetwork(netId, Optimize(*network, runtime->GetDeviceSpec())); + + armnnCaffeParser::BindingPointInfo inputBindingInfo = parser->GetNetworkInputBindingInfo("data"); + armnn::TensorInfo inputTensorInfo = inputBindingInfo.second; + BOOST_TEST((inputTensorInfo == runtime->GetInputTensorInfo(netId, inputBindingInfo.first))); + + BOOST_TEST(inputTensorInfo.GetShape()[0] == 1); + BOOST_TEST(inputTensorInfo.GetShape()[1] == 2); + BOOST_TEST(inputTensorInfo.GetShape()[2] == 3); + BOOST_TEST(inputTensorInfo.GetShape()[3] == 4); + } + + // Check everything works with implicit input + { + network = parser->CreateNetworkFromString(implicitInput.c_str(), {}, { "data" }); + BOOST_TEST(network.get()); + runtime->LoadNetwork(netId, Optimize(*network, runtime->GetDeviceSpec())); + + armnnCaffeParser::BindingPointInfo inputBindingInfo = parser->GetNetworkInputBindingInfo("data"); + armnn::TensorInfo inputTensorInfo = inputBindingInfo.second; + BOOST_TEST((inputTensorInfo == runtime->GetInputTensorInfo(netId, inputBindingInfo.first))); + + BOOST_TEST(inputTensorInfo.GetShape()[0] == 1); + BOOST_TEST(inputTensorInfo.GetShape()[1] == 2); + BOOST_TEST(inputTensorInfo.GetShape()[2] == 3); + BOOST_TEST(inputTensorInfo.GetShape()[3] == 4); + } + + // Check everything works with implicit and passing shape + { + network = parser->CreateNetworkFromString(implicitInput.c_str(), { {"data", { 2, 2, 3, 4 } } }, { "data" }); + BOOST_TEST(network.get()); + runtime->LoadNetwork(netId, Optimize(*network, runtime->GetDeviceSpec())); + + armnnCaffeParser::BindingPointInfo inputBindingInfo = parser->GetNetworkInputBindingInfo("data"); + armnn::TensorInfo inputTensorInfo = inputBindingInfo.second; + BOOST_TEST((inputTensorInfo == runtime->GetInputTensorInfo(netId, inputBindingInfo.first))); + + BOOST_TEST(inputTensorInfo.GetShape()[0] == 2); + BOOST_TEST(inputTensorInfo.GetShape()[1] == 2); + BOOST_TEST(inputTensorInfo.GetShape()[2] == 3); + BOOST_TEST(inputTensorInfo.GetShape()[3] == 4); + } + + // Check everything works with implicit (no shape) and passing shape + { + network = parser->CreateNetworkFromString(implicitInputNoShape.c_str(), {{"data", {2, 2, 3, 4} }}, { "data" }); + BOOST_TEST(network.get()); + runtime->LoadNetwork(netId, Optimize(*network, runtime->GetDeviceSpec())); + + armnnCaffeParser::BindingPointInfo inputBindingInfo = parser->GetNetworkInputBindingInfo("data"); + armnn::TensorInfo inputTensorInfo = inputBindingInfo.second; + BOOST_TEST((inputTensorInfo == runtime->GetInputTensorInfo(netId, inputBindingInfo.first))); + + BOOST_TEST(inputTensorInfo.GetShape()[0] == 2); + BOOST_TEST(inputTensorInfo.GetShape()[1] == 2); + BOOST_TEST(inputTensorInfo.GetShape()[2] == 3); + BOOST_TEST(inputTensorInfo.GetShape()[3] == 4); + } + + // Check exception on incompatible shapes + { + BOOST_CHECK_THROW(parser->CreateNetworkFromString(implicitInput.c_str(), {{"data",{ 2, 2, 3, 2 }}}, {"data"}), + armnn::ParseException); + } + + // Check exception when no shape available + { + BOOST_CHECK_THROW(parser->CreateNetworkFromString(implicitInputNoShape.c_str(), {}, { "data" }), + armnn::ParseException); + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnCaffeParser/test/TestMul.cpp b/src/armnnCaffeParser/test/TestMul.cpp new file mode 100644 index 0000000000..b53318e81e --- /dev/null +++ b/src/armnnCaffeParser/test/TestMul.cpp @@ -0,0 +1,73 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> +#include "armnnCaffeParser/ICaffeParser.hpp" +#include "ParserPrototxtFixture.hpp" + +BOOST_AUTO_TEST_SUITE(CaffeParser) + +struct MulFixture : public ParserPrototxtFixture<armnnCaffeParser::ICaffeParser> +{ + MulFixture() + { + m_Prototext = "name: \"MinimalMul\"\n" + "layer {\n" + " name: \"data\"\n" + " type: \"Input\"\n" + " top: \"data\"\n" + " input_param { shape: { dim: 1 dim: 1 dim: 4 dim: 4 } }\n" + "}\n" + "layer {\n" + " bottom: \"data\"\n" + " top: \"pool1\"\n" + " name: \"pool1\"\n" + " type: \"Pooling\"\n" + " pooling_param {\n" + " kernel_size: 2\n" + " stride: 2\n" + " pool: MAX\n" + " }\n" + "}\n" + "layer {\n" + " bottom: \"data\"\n" + " top: \"pool2\"\n" + " name: \"pool2\"\n" + " type: \"Pooling\"\n" + " pooling_param {\n" + " kernel_size: 2\n" + " stride: 2\n" + " pool: MAX\n" + " }\n" + "}\n" + "layer {\n" + " bottom: \"pool1\"\n" + " bottom: \"pool2\"\n" + " top: \"mul\"\n" + " name: \"mul\"\n" + " type: \"Eltwise\"\n" + " eltwise_param {\n" + " operation: 0\n" + " }\n" + "}\n"; + SetupSingleInputSingleOutput("data", "mul"); + } +}; + +BOOST_FIXTURE_TEST_CASE(ParseMul, MulFixture) +{ + RunTest<4>( + { + 0, 1, 0, 0, + 0, 0, 0, 0, + 0, 0, 1, 0, + 1, 0, 1, 1 + }, + { + 1, 0, + 1, 1 + }); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnCaffeParser/test/TestMultiInputsOutputs.cpp b/src/armnnCaffeParser/test/TestMultiInputsOutputs.cpp new file mode 100644 index 0000000000..cd87246bee --- /dev/null +++ b/src/armnnCaffeParser/test/TestMultiInputsOutputs.cpp @@ -0,0 +1,54 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> +#include "armnnCaffeParser/ICaffeParser.hpp" +#include "ParserPrototxtFixture.hpp" + +BOOST_AUTO_TEST_SUITE(CaffeParser) + +struct MultiInputsOutputsFixture : public ParserPrototxtFixture<armnnCaffeParser::ICaffeParser> +{ + MultiInputsOutputsFixture() + { + m_Prototext = R"( +name: "MultiInputsOutputs" +layer { + name: "input1" + type: "Input" + top: "input1" + input_param { shape: { dim: 1 } } +} +layer { + name: "input2" + type: "Input" + top: "input2" + input_param { shape: { dim: 1 } } +} +layer { + bottom: "input1" + bottom: "input2" + top: "add1" + name: "add1" + type: "Eltwise" +} +layer { + bottom: "input2" + bottom: "input1" + top: "add2" + name: "add2" + type: "Eltwise" +} + )"; + Setup({ }, { "add1", "add2" }); + } +}; + +BOOST_FIXTURE_TEST_CASE(MultiInputsOutputs, MultiInputsOutputsFixture) +{ + RunTest<1>({ { "input1",{ 12.0f } },{ "input2",{ 13.0f } } }, + { { "add1",{ 25.0f } },{ "add2",{ 25.0f } } }); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnCaffeParser/test/TestPooling2d.cpp b/src/armnnCaffeParser/test/TestPooling2d.cpp new file mode 100644 index 0000000000..25cd124648 --- /dev/null +++ b/src/armnnCaffeParser/test/TestPooling2d.cpp @@ -0,0 +1,54 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> +#include "armnnCaffeParser/ICaffeParser.hpp" +#include "ParserPrototxtFixture.hpp" + +BOOST_AUTO_TEST_SUITE(CaffeParser) + +struct GlobalPoolingFixture : public ParserPrototxtFixture<armnnCaffeParser::ICaffeParser> +{ + GlobalPoolingFixture() + { + m_Prototext = "name: \"GlobalPooling\"\n" + "layer {\n" + " name: \"data\"\n" + " type: \"Input\"\n" + " top: \"data\"\n" + " input_param { shape: { dim: 1 dim: 3 dim: 2 dim: 2 } }\n" + "}\n" + "layer {\n" + " bottom: \"data\"\n" + " top: \"pool1\"\n" + " name: \"pool1\"\n" + " type: \"Pooling\"\n" + " pooling_param {\n" + " pool: AVE\n" + " global_pooling: true\n" + " }\n" + "}\n"; + SetupSingleInputSingleOutput("data", "pool1"); + } +}; + +BOOST_FIXTURE_TEST_CASE(GlobalPooling, GlobalPoolingFixture) +{ + RunTest<4>( + { + 1,3, + 5,7, + + 2,2, + 2,2, + + 4,4, + 6,6 + }, + { + 4, 2, 5 + }); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnCaffeParser/test/TestSplit.cpp b/src/armnnCaffeParser/test/TestSplit.cpp new file mode 100644 index 0000000000..c2f29fb4f3 --- /dev/null +++ b/src/armnnCaffeParser/test/TestSplit.cpp @@ -0,0 +1,47 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <boost/test/unit_test.hpp> +#include "armnnCaffeParser/ICaffeParser.hpp" +#include "ParserPrototxtFixture.hpp" + +BOOST_AUTO_TEST_SUITE(CaffeParser) + +struct SplitFixture : public ParserPrototxtFixture<armnnCaffeParser::ICaffeParser> +{ + SplitFixture() + { + m_Prototext = R"( +name: "Split" +layer { + name: "data" + type: "Input" + top: "data" + input_param { shape: { dim: 1 dim: 1 dim: 1 dim: 1 } } +} +layer { + name: "split" + type: "Split" + bottom: "data" + top: "split_top0" + top: "split_top1" +} +layer { + bottom: "split_top0" + bottom: "split_top1" + top: "add" + name: "add" + type: "Eltwise" +} + )"; + SetupSingleInputSingleOutput("data", "add"); + } +}; + +BOOST_FIXTURE_TEST_CASE(Split, SplitFixture) +{ + RunTest<1>({ 1.0f }, { 2.0f }); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnnUtils/GraphTopologicalSort.hpp b/src/armnnUtils/GraphTopologicalSort.hpp new file mode 100644 index 0000000000..f455289567 --- /dev/null +++ b/src/armnnUtils/GraphTopologicalSort.hpp @@ -0,0 +1,90 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <boost/assert.hpp> + +#include <functional> +#include <map> +#include <vector> + +namespace armnnUtils +{ + +namespace +{ + +enum class NodeState +{ + Visiting, + Visited, +}; + +template<typename TNodeId> +bool Visit( + TNodeId current, + std::function<std::vector<TNodeId>(TNodeId)> getIncomingEdges, + std::vector<TNodeId>& outSorted, + std::map<TNodeId, NodeState>& nodeStates) +{ + auto currentStateIt = nodeStates.find(current); + if (currentStateIt != nodeStates.end()) + { + if (currentStateIt->second == NodeState::Visited) + { + return true; + } + if (currentStateIt->second == NodeState::Visiting) + { + return false; + } + else + { + BOOST_ASSERT(false); + } + } + + nodeStates[current] = NodeState::Visiting; + + for (TNodeId inputNode : getIncomingEdges(current)) + { + Visit(inputNode, getIncomingEdges, outSorted, nodeStates); + } + + nodeStates[current] = NodeState::Visited; + + outSorted.push_back(current); + return true; +} + +} + +// Sorts an directed acyclic graph (DAG) into a flat list such that all inputs to a node are before the node itself. +// Returns true if successful or false if there is an error in the graph structure (e.g. it contains a cycle). +// The graph is defined entirely by the "getIncomingEdges" function which the user provides. For a given node, +// it must return the list of nodes which are required to come before it. +// "targetNodes" is the list of nodes where the search begins - i.e. the nodes that you want to evaluate. +// The implementation is based on https://en.wikipedia.org/wiki/Topological_sorting#Depth-first_search +template<typename TNodeId, typename TTargetNodes> +bool GraphTopologicalSort( + const TTargetNodes& targetNodes, + std::function<std::vector<TNodeId>(TNodeId)> getIncomingEdges, + std::vector<TNodeId>& outSorted) +{ + outSorted.clear(); + std::map<TNodeId, NodeState> nodeStates; + + for (TNodeId targetNode : targetNodes) + { + if (!Visit(targetNode, getIncomingEdges, outSorted, nodeStates)) + { + return false; + } + } + + return true; +} + +}
\ No newline at end of file diff --git a/src/armnnUtils/Logging.cpp b/src/armnnUtils/Logging.cpp new file mode 100644 index 0000000000..95978d437e --- /dev/null +++ b/src/armnnUtils/Logging.cpp @@ -0,0 +1,99 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "Logging.hpp" + +#include <string> +#include <iostream> + +#if defined(_MSC_VER) +#include <Windows.h> +#endif + +#if defined(__ANDROID__) +#include <android/log.h> +#endif + +#include <boost/make_shared.hpp> +#include <boost/log/core.hpp> +#include <boost/log/sinks.hpp> +#include <boost/log/sinks/debug_output_backend.hpp> +#include <boost/log/sinks/basic_sink_backend.hpp> +#include <boost/log/sinks/text_ostream_backend.hpp> +#include <boost/log/utility/setup/console.hpp> + +namespace armnnUtils +{ + +struct DebugOutputSink : boost::log::sinks::basic_formatted_sink_backend<char, boost::log::sinks::concurrent_feeding> +{ + void consume(boost::log::record_view const& rec, std::string const& formatted_message) + { +#if defined(_MSC_VER) + OutputDebugString(formatted_message.c_str()); + OutputDebugString("\n"); +#endif +#if defined(__ANDROID__) + __android_log_write(ANDROID_LOG_DEBUG, "armnn", formatted_message.c_str()); +#endif + } +}; + +void ConfigureLogging(boost::log::core* core, bool printToStandardOutput, bool printToDebugOutput, + armnn::LogSeverity severity) +{ + // Even if we remove all the sinks, Boost will fallback to the 'default sink' and still print stuff to + // stdout, so we have to explicitly disable logging in this case. + core->set_logging_enabled(printToStandardOutput || printToDebugOutput); + + // Setup severity filter + boost::log::trivial::severity_level boostSeverity; + switch (severity) + { + case armnn::LogSeverity::Trace: + boostSeverity = boost::log::trivial::trace; + break; + case armnn::LogSeverity::Debug: + boostSeverity = boost::log::trivial::debug; + break; + case armnn::LogSeverity::Info: + boostSeverity = boost::log::trivial::info; + break; + case armnn::LogSeverity::Warning: + boostSeverity = boost::log::trivial::warning; + break; + case armnn::LogSeverity::Error: + boostSeverity = boost::log::trivial::error; + break; + case armnn::LogSeverity::Fatal: + boostSeverity = boost::log::trivial::fatal; + break; + default: + BOOST_ASSERT_MSG(false, "Invalid severity"); + } + core->set_filter(boost::log::trivial::severity >= boostSeverity); + + core->remove_all_sinks(); + if (printToStandardOutput) + { + typedef boost::log::sinks::basic_text_ostream_backend<char> backend_t; + boost::shared_ptr<backend_t> backend = boost::make_shared<backend_t>(); + + boost::shared_ptr<std::basic_ostream<char>> stream(&std::cout, boost::null_deleter()); + backend->add_stream(stream); + + typedef boost::log::sinks::synchronous_sink<backend_t> sink_t; + boost::shared_ptr<sink_t> standardOutputSink = boost::make_shared<sink_t>(backend); + + core->add_sink(standardOutputSink); + } + if (printToDebugOutput) + { + typedef boost::log::sinks::synchronous_sink<DebugOutputSink> sink_t; + boost::shared_ptr<sink_t> debugOutputSink(new sink_t()); + core->add_sink(debugOutputSink); + } +} + +} diff --git a/src/armnnUtils/Logging.hpp b/src/armnnUtils/Logging.hpp new file mode 100644 index 0000000000..5669fcaebf --- /dev/null +++ b/src/armnnUtils/Logging.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + + +#include "armnn/Utils.hpp" + +#include <boost/log/trivial.hpp> + +namespace armnnUtils +{ + +// Configures logging for the given Boost Log Core object. +void ConfigureLogging(boost::log::core* core, + bool printToStandardOutput, + bool printToDebugOutput, + armnn::LogSeverity severity); + +}
\ No newline at end of file diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp new file mode 100644 index 0000000000..0e34477a96 --- /dev/null +++ b/src/armnnUtils/ParserPrototxtFixture.hpp @@ -0,0 +1,134 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#pragma once + +#include "armnn/IRuntime.hpp" +#include "test/TensorHelpers.hpp" +#include <string> + +template<typename TParser> +struct ParserPrototxtFixture +{ + ParserPrototxtFixture() + : m_Parser(TParser::Create()) + , m_Runtime(armnn::IRuntime::Create(armnn::Compute::CpuRef)) + , m_NetworkIdentifier(-1) + {} + + /// Parses and loads the network defined by the m_Prototext string. + /// @{ + void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName); + void SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape, + const std::string& inputName, + const std::string& outputName); + void Setup(const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs); + /// @} + + /// Executes the network with the given input tensor and checks the result against the given output tensor. + /// This overload assumes the network has a single input and a single output. + template <std::size_t NumOutputDimensions> + void RunTest(const std::vector<float>& inputData, const std::vector<float>& expectedOutputData); + + /// Executes the network with the given input tensors and checks the results against the given output tensors. + /// This overload supports multiple inputs and multiple outputs, identified by name. + template <std::size_t NumOutputDimensions> + void RunTest(const std::map<std::string, std::vector<float>>& inputData, + const std::map<std::string, std::vector<float>>& expectedOutputData); + + std::string m_Prototext; + std::unique_ptr<TParser, void(*)(TParser* parser)> m_Parser; + armnn::IRuntimePtr m_Runtime; + armnn::NetworkId m_NetworkIdentifier; + + /// If the single-input-single-output overload of Setup() is called, these will store the input and output name + /// so they don't need to be passed to the single-input-single-output overload of RunTest(). + /// @{ + std::string m_SingleInputName; + std::string m_SingleOutputName; + /// @} +}; + +template<typename TParser> +void ParserPrototxtFixture<TParser>::SetupSingleInputSingleOutput(const std::string& inputName, + const std::string& outputName) +{ + // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest(). + m_SingleInputName = inputName; + m_SingleOutputName = outputName; + Setup({ }, { outputName }); +} + +template<typename TParser> +void ParserPrototxtFixture<TParser>::SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape, + const std::string& inputName, + const std::string& outputName) +{ + // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest(). + m_SingleInputName = inputName; + m_SingleOutputName = outputName; + Setup({ { inputName, inputTensorShape } }, { outputName }); +} + +template<typename TParser> +void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::TensorShape>& inputShapes, + const std::vector<std::string>& requestedOutputs) +{ + armnn::INetworkPtr network = + m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs); + + auto optimized = Optimize(*network, m_Runtime->GetDeviceSpec()); + armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized)); + if (ret != armnn::Status::Success) + { + throw armnn::Exception("LoadNetwork failed"); + } +} + +template<typename TParser> +template <std::size_t NumOutputDimensions> +void ParserPrototxtFixture<TParser>::RunTest(const std::vector<float>& inputData, + const std::vector<float>& expectedOutputData) +{ + RunTest<NumOutputDimensions>({ { m_SingleInputName, inputData } }, { { m_SingleOutputName, expectedOutputData } }); +} + +template<typename TParser> +template <std::size_t NumOutputDimensions> +void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::vector<float>>& inputData, + const std::map<std::string, std::vector<float>>& expectedOutputData) +{ + using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>; + + // Setup the armnn input tensors from the given vectors. + armnn::InputTensors inputTensors; + for (auto&& it : inputData) + { + BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(it.first); + inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) }); + } + + // Allocate storage for the output tensors to be written to and setup the armnn output tensors. + std::map<std::string, boost::multi_array<float, NumOutputDimensions>> outputStorage; + armnn::OutputTensors outputTensors; + for (auto&& it : expectedOutputData) + { + BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first); + outputStorage.emplace(it.first, MakeTensor<float, NumOutputDimensions>(bindingInfo.second)); + outputTensors.push_back( + { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) }); + } + + m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors); + + // Compare each output tensor to the expected values + for (auto&& it : expectedOutputData) + { + BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first); + auto outputExpected = MakeTensor<float, NumOutputDimensions>(bindingInfo.second, it.second); + BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first])); + } +} diff --git a/src/armnnUtils/Permute.cpp b/src/armnnUtils/Permute.cpp new file mode 100644 index 0000000000..58e58583fc --- /dev/null +++ b/src/armnnUtils/Permute.cpp @@ -0,0 +1,118 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "Permute.hpp" + +#include <armnn/Tensor.hpp> + +#include <cassert> + +namespace +{ + +class PermuteLoop +{ +public: + using size_type = unsigned int; + + PermuteLoop(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings) + : m_DstShape(dstShape) + { + assert(dstShape.GetNumDimensions() == mappings.GetSize()); + + const size_type numDims = dstShape.GetNumDimensions(); + + size_type srcStride = 1U; + size_type dstStride = 1U; + + for (size_type i = numDims - 1U, k = 0U; k < numDims; ++k, --i) + { + m_SrcStrides[mappings[i]] = srcStride; + m_DstStrides[i] = dstStride; + + srcStride *= dstShape[mappings[i]]; + dstStride *= dstShape[i]; + } + } + + template <typename T> + void Unroll(const T* srcData, T* dstData) + { + const T* const srcEnd = srcData + m_DstShape.GetNumElements(); + T* const dstEnd = dstData + m_DstShape.GetNumElements(); + Unroll(0, srcData, dstData, srcEnd, dstEnd); + } + +private: + template <typename T> + void Unroll(size_type dimension, const T* srcData, T* dstData, const T* srcEnd, T* dstEnd) + { + assert(srcData < srcEnd); + assert(dstData < dstEnd); + + if (dimension >= m_DstShape.GetNumDimensions()) + { + *dstData = *srcData; + } + else + { + for (size_type i = 0; i < m_DstShape[dimension]; i++) + { + Unroll(dimension + 1, srcData, dstData, srcEnd, dstEnd); + + srcData += m_SrcStrides[dimension]; + dstData += m_DstStrides[dimension]; + } + } + } + + armnn::TensorShape m_DstShape; + std::array<size_type, armnn::MaxNumOfTensorDimensions> m_SrcStrides; + std::array<size_type, armnn::MaxNumOfTensorDimensions> m_DstStrides; +}; + +} // namespace + +namespace armnnUtils +{ + +armnn::TensorShape Permuted(const armnn::TensorShape& srcShape, const armnn::PermutationVector& mappings) +{ + assert(srcShape.GetNumDimensions() == mappings.GetSize()); + + const unsigned int numDims = mappings.GetSize(); + unsigned int outDims[armnn::MaxNumOfTensorDimensions]; + + for (unsigned int i = 0U; i < numDims; ++i) + { + outDims[mappings[i]] = srcShape[i]; + } + + armnn::TensorShape permutedShape(numDims, outDims); + return permutedShape; +} + +armnn::TensorInfo Permuted(const armnn::TensorInfo& info, const armnn::PermutationVector& mappings) +{ + armnn::TensorInfo outInfo(info); + outInfo.SetShape(Permuted(info.GetShape(), mappings)); + return outInfo; +} + +template <typename T> +void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings, const T* src, T* dst) +{ + PermuteLoop(dstShape, mappings).Unroll(src, dst); +} + +// Instantiate for types +template void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings, + const float* src, float* dst); +template void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings, + const uint8_t* src, uint8_t* dst); +template void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings, + const int32_t* src, int32_t* dst); + +} // namespace armnnUtils diff --git a/src/armnnUtils/Permute.hpp b/src/armnnUtils/Permute.hpp new file mode 100644 index 0000000000..44f7a281bb --- /dev/null +++ b/src/armnnUtils/Permute.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/TensorFwd.hpp> +#include <armnn/Types.hpp> + +namespace armnnUtils +{ + +armnn::TensorShape Permuted(const armnn::TensorShape& srcShape, const armnn::PermutationVector& mappings); + +armnn::TensorInfo Permuted(const armnn::TensorInfo& info, const armnn::PermutationVector& mappings); + +template <typename T> +void Permute(const armnn::TensorShape& dstShape, const armnn::PermutationVector& mappings, const T* src, T* dst); + +} // namespace armnnUtils
\ No newline at end of file diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt new file mode 100644 index 0000000000..15b1b2415d --- /dev/null +++ b/tests/CMakeLists.txt @@ -0,0 +1,108 @@ +# UnitTests +include(CheckIncludeFiles) + +include_directories(SYSTEM "${THIRD_PARTY_INCLUDE_DIRS}") + +# Setup the inference test framework +set(inference_test_sources + ClassifierTestCaseData.hpp + InferenceModel.hpp + InferenceTest.hpp + InferenceTest.inl + InferenceTest.cpp + InferenceTestImage.hpp + InferenceTestImage.cpp) +add_library_ex(inferenceTest STATIC ${inference_test_sources}) +target_include_directories(inferenceTest PRIVATE ../src/armnnUtils) + +if(BUILD_CAFFE_PARSER) + macro(CaffeParserTest testName sources) + add_executable_ex(${testName} ${sources}) + target_include_directories(${testName} PRIVATE ../src/armnnUtils) + set_target_properties(${testName} PROPERTIES COMPILE_FLAGS "${CAFFE_PARSER_TEST_ADDITIONAL_COMPILE_FLAGS}") + + target_link_libraries(${testName} inferenceTest) + target_link_libraries(${testName} armnnCaffeParser) + target_link_libraries(${testName} armnn) + target_link_libraries(${testName} ${CMAKE_THREAD_LIBS_INIT}) + if(OPENCL_LIBRARIES) + target_link_libraries(${testName} ${OPENCL_LIBRARIES}) + endif() + target_link_libraries(${testName} + ${Boost_SYSTEM_LIBRARY} + ${Boost_FILESYSTEM_LIBRARY} + ${Boost_PROGRAM_OPTIONS_LIBRARY}) + addDllCopyCommands(${testName}) + endmacro() + + set(CaffeCifar10AcrossChannels-Armnn_sources + CaffeCifar10AcrossChannels-Armnn/CaffeCifar10AcrossChannels-Armnn.cpp + Cifar10Database.hpp + Cifar10Database.cpp) + CaffeParserTest(CaffeCifar10AcrossChannels-Armnn "${CaffeCifar10AcrossChannels-Armnn_sources}") + + set(CaffeMnist-Armnn_sources + CaffeMnist-Armnn/CaffeMnist-Armnn.cpp + MnistDatabase.hpp + MnistDatabase.cpp) + CaffeParserTest(CaffeMnist-Armnn "${CaffeMnist-Armnn_sources}") + + set(CaffeAlexNet-Armnn_sources + CaffeAlexNet-Armnn/CaffeAlexNet-Armnn.cpp + ImageNetDatabase.hpp + ImageNetDatabase.cpp) + CaffeParserTest(CaffeAlexNet-Armnn "${CaffeAlexNet-Armnn_sources}") + + set(MultipleNetworksCifar10_SRC + MultipleNetworksCifar10/MultipleNetworksCifar10.cpp + Cifar10Database.hpp + Cifar10Database.cpp) + CaffeParserTest(MultipleNetworksCifar10 "${MultipleNetworksCifar10_SRC}") + + set(CaffeResNet-Armnn_sources + CaffeResNet-Armnn/CaffeResNet-Armnn.cpp + ImageNetDatabase.hpp + ImageNetDatabase.cpp) + CaffeParserTest(CaffeResNet-Armnn "${CaffeResNet-Armnn_sources}") + + set(CaffeVGG-Armnn_sources + CaffeVGG-Armnn/CaffeVGG-Armnn.cpp + ImageNetDatabase.hpp + ImageNetDatabase.cpp) + CaffeParserTest(CaffeVGG-Armnn "${CaffeVGG-Armnn_sources}") + + set(CaffeInception_BN-Armnn_sources + CaffeInception_BN-Armnn/CaffeInception_BN-Armnn.cpp + ImageNetDatabase.hpp + ImageNetDatabase.cpp) + CaffeParserTest(CaffeInception_BN-Armnn "${CaffeInception_BN-Armnn_sources}") + + set(CaffeYolo-Armnn_sources + CaffeYolo-Armnn/CaffeYolo-Armnn.cpp + YoloDatabase.hpp + YoloDatabase.cpp + YoloInferenceTest.hpp) + CaffeParserTest(CaffeYolo-Armnn "${CaffeYolo-Armnn_sources}") +endif() + +if (BUILD_CAFFE_PARSER) + set(ExecuteNetwork_sources + ExecuteNetwork/ExecuteNetwork.cpp) + + add_executable_ex(ExecuteNetwork ${ExecuteNetwork_sources}) + target_include_directories(ExecuteNetwork PRIVATE ../src/armnnUtils) + + if (BUILD_CAFFE_PARSER) + target_link_libraries(ExecuteNetwork armnnCaffeParser) + endif() + target_link_libraries(ExecuteNetwork armnn) + target_link_libraries(ExecuteNetwork ${CMAKE_THREAD_LIBS_INIT}) + if(OPENCL_LIBRARIES) + target_link_libraries(ExecuteNetwork ${OPENCL_LIBRARIES}) + endif() + target_link_libraries(ExecuteNetwork + ${Boost_SYSTEM_LIBRARY} + ${Boost_FILESYSTEM_LIBRARY} + ${Boost_PROGRAM_OPTIONS_LIBRARY}) + addDllCopyCommands(ExecuteNetwork) +endif() diff --git a/tests/CaffeAlexNet-Armnn/CaffeAlexNet-Armnn.cpp b/tests/CaffeAlexNet-Armnn/CaffeAlexNet-Armnn.cpp new file mode 100644 index 0000000000..c50d8ea05f --- /dev/null +++ b/tests/CaffeAlexNet-Armnn/CaffeAlexNet-Armnn.cpp @@ -0,0 +1,14 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "../InferenceTest.hpp" +#include "../ImageNetDatabase.hpp" +#include "armnnCaffeParser/ICaffeParser.hpp" + +int main(int argc, char* argv[]) +{ + return armnn::test::ClassifierInferenceTestMain<ImageNetDatabase, armnnCaffeParser::ICaffeParser>( + argc, argv, "bvlc_alexnet_1.caffemodel", true, "data", "prob", { 0 }, + [](const char* dataDir) { return ImageNetDatabase(dataDir); }); +} diff --git a/tests/CaffeAlexNet-Armnn/Validation.txt b/tests/CaffeAlexNet-Armnn/Validation.txt new file mode 100644 index 0000000000..cb95f050e2 --- /dev/null +++ b/tests/CaffeAlexNet-Armnn/Validation.txt @@ -0,0 +1,1000 @@ +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 diff --git a/tests/CaffeCifar10AcrossChannels-Armnn/CaffeCifar10AcrossChannels-Armnn.cpp b/tests/CaffeCifar10AcrossChannels-Armnn/CaffeCifar10AcrossChannels-Armnn.cpp new file mode 100644 index 0000000000..9994bb5431 --- /dev/null +++ b/tests/CaffeCifar10AcrossChannels-Armnn/CaffeCifar10AcrossChannels-Armnn.cpp @@ -0,0 +1,15 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "../InferenceTest.hpp" +#include "../Cifar10Database.hpp" +#include "armnnCaffeParser/ICaffeParser.hpp" + +int main(int argc, char* argv[]) +{ + return armnn::test::ClassifierInferenceTestMain<Cifar10Database, armnnCaffeParser::ICaffeParser>( + argc, argv, "cifar10_full_iter_60000.caffemodel", true, "data", "prob", + { 0, 1, 2, 4, 7 }, + [](const char* dataDir) { return Cifar10Database(dataDir); }); +} diff --git a/tests/CaffeCifar10AcrossChannels-Armnn/Validation.txt b/tests/CaffeCifar10AcrossChannels-Armnn/Validation.txt new file mode 100644 index 0000000000..a7b59465eb --- /dev/null +++ b/tests/CaffeCifar10AcrossChannels-Armnn/Validation.txt @@ -0,0 +1,1000 @@ +3 +8 +8 +8 +6 +8 +5 +6 +3 +8 +0 +9 +5 +7 +9 +8 +5 +7 +8 +6 +7 +0 +8 +9 +4 +3 +3 +0 +9 +6 +6 +5 +8 +3 +9 +3 +7 +9 +9 +5 +0 +6 +7 +3 +0 +9 +3 +8 +7 +2 +9 +8 +5 +5 +8 +8 +7 +5 +5 +3 +7 +5 +2 +3 +6 +7 +8 +0 +3 +7 +0 +3 +8 +8 +0 +2 +0 +8 +5 +8 +8 +0 +1 +7 +3 +0 +3 +3 +8 +9 +0 +2 +8 +6 +7 +3 +6 +0 +0 +7 +8 +5 +6 +3 +1 +1 +3 +6 +8 +7 +5 +0 +2 +3 +0 +3 +0 +3 +7 +5 +8 +0 +1 +2 +8 +8 +8 +3 +6 +0 +4 +1 +8 +9 +1 +0 +9 +4 +2 +8 +3 +5 +6 +5 +8 +0 +6 +5 +5 +5 +8 +9 +5 +0 +0 +5 +0 +9 +5 +4 +0 +0 +0 +6 +0 +0 +8 +8 +5 +8 +9 +0 +8 +8 +9 +9 +3 +7 +5 +0 +0 +5 +2 +8 +0 +8 +5 +3 +3 +8 +5 +8 +0 +1 +7 +3 +8 +8 +7 +8 +5 +0 +8 +0 +1 +3 +8 +5 +7 +8 +7 +0 +5 +8 +8 +0 +7 +9 +8 +2 +7 +5 +8 +5 +5 +9 +8 +0 +3 +6 +5 +1 +7 +8 +8 +0 +4 +0 +5 +3 +1 +1 +8 +3 +0 +8 +1 +8 +2 +0 +5 +5 +9 +9 +2 +8 +3 +0 +8 +9 +8 +8 +3 +3 +0 +8 +8 +4 +7 +0 +0 +3 +6 +3 +8 +0 +0 +3 +2 +5 +9 +0 +6 +1 +0 +9 +8 +8 +7 +9 +8 +2 +6 +9 +3 +0 +6 +0 +0 +6 +6 +3 +3 +8 +8 +8 +8 +3 +1 +0 +8 +6 +0 +0 +8 +0 +7 +7 +5 +5 +3 +3 +2 +0 +5 +0 +7 +7 +3 +6 +1 +9 +3 +6 +6 +9 +3 +8 +0 +7 +0 +6 +2 +5 +8 +5 +7 +6 +8 +9 +9 +1 +8 +2 +3 +7 +5 +2 +8 +0 +9 +5 +8 +8 +9 +4 +0 +5 +8 +0 +0 +7 +9 +3 +2 +7 +3 +7 +8 +6 +6 +9 +0 +8 +5 +0 +7 +3 +5 +5 +1 +2 +6 +2 +3 +6 +2 +3 +0 +8 +9 +8 +7 +8 +8 +4 +0 +8 +8 +3 +5 +8 +3 +8 +1 +9 +0 +5 +5 +7 +4 +7 +8 +0 +0 +9 +3 +7 +0 +6 +3 +3 +8 +7 +3 +7 +8 +5 +3 +8 +1 +3 +9 +8 +8 +7 +3 +0 +0 +0 +2 +9 +7 +0 +8 +3 +4 +5 +3 +8 +5 +6 +8 +7 +3 +8 +4 +3 +7 +8 +5 +7 +8 +8 +3 +7 +4 +0 +5 +4 +3 +6 +0 +8 +5 +8 +9 +9 +8 +0 +0 +0 +0 +1 +8 +8 +0 +5 +2 +0 +4 +0 +5 +2 +9 +4 +7 +9 +0 +4 +5 +6 +8 +9 +5 +5 +8 +9 +3 +8 +5 +7 +0 +7 +0 +5 +0 +0 +0 +6 +8 +8 +9 +5 +6 +3 +6 +3 +9 +8 +1 +7 +0 +7 +5 +9 +0 +6 +5 +5 +3 +3 +8 +3 +9 +8 +6 +4 +3 +2 +0 +7 +6 +0 +2 +3 +9 +5 +8 +0 +6 +7 +8 +3 +6 +8 +8 +8 +7 +5 +4 +0 +8 +4 +0 +8 +3 +5 +8 +9 +6 +9 +2 +3 +0 +0 +7 +8 +8 +3 +8 +5 +0 +2 +1 +6 +3 +4 +3 +9 +6 +9 +8 +8 +5 +8 +6 +3 +2 +1 +7 +7 +1 +2 +7 +9 +9 +4 +4 +0 +8 +3 +2 +8 +7 +0 +8 +3 +0 +3 +3 +8 +0 +7 +9 +1 +8 +0 +4 +5 +3 +9 +3 +0 +8 +0 +1 +5 +4 +1 +8 +0 +7 +6 +3 +0 +9 +0 +8 +2 +6 +3 +2 +3 +0 +0 +3 +8 +0 +3 +9 +6 +8 +0 +9 +2 +8 +2 +3 +0 +3 +2 +2 +7 +8 +3 +8 +0 +7 +5 +7 +0 +4 +8 +7 +4 +8 +3 +8 +8 +6 +0 +8 +7 +4 +3 +3 +8 +4 +8 +7 +8 +8 +9 +8 +8 +1 +3 +3 +5 +5 +0 +7 +9 +8 +0 +8 +4 +1 +3 +5 +7 +8 +7 +8 +7 +4 +6 +2 +5 +8 +0 +8 +1 +2 +0 +6 +8 +2 +1 +3 +5 +6 +0 +1 +2 +0 +8 +3 +0 +5 +0 +6 +8 +0 +2 +7 +6 +0 +6 +9 +1 +7 +8 +7 +0 +3 +9 +7 +8 +0 +0 +3 +3 +7 +5 +4 +8 +8 +8 +7 +1 +2 +7 +4 +4 +8 +4 +7 +7 +3 +2 +7 +2 +0 +8 +8 +5 +8 +0 +8 +2 +0 +8 +7 +5 +0 +8 +5 +0 +0 +8 +2 +2 +2 +8 +9 +2 +7 +2 +7 +0 +7 +2 +1 +0 +0 +0 +8 +4 +7 +9 +8 +0 +0 +7 +7 +0 +7 +8 +4 +4 +3 +5 +0 +1 +3 +7 +0 +1 +8 +1 +4 +2 +3 +8 +4 +5 +0 +7 +8 +8 +3 +0 +8 +8 +8 +8 +8 +4 +3 +6 +7 +3 +1 +8 +3 +7 +7 +5 +5 +6 +6 +5 +8 +8 +1 +6 +8 +8 +3 +3 +3 +2 +0 +1 +8 +8 +8 +0 +0 +9 +9 +3 +3 +5 +8 +3 +0 +0 +4 +2 +3 +3 +7 +3 +0 +5 +8 +8 +9 +8 +5 +4 +8 +3 +0 +8 +7 +8 +3 +9 +2 +8 +4 +7 +8 +3 +7 +8 +8 +8 +8 +3 +6 +3 +3 +8 +1 +9 +9 +4 +6 +8 +0 +0 +0 +8 +8 +9 +2 +8 +8 +8 +7 +8 +3 +1 +7 +0 +1 +5 +8 +3 +3 +3 +8 +9 +3 +8 diff --git a/tests/CaffeInception_BN-Armnn/CaffeInception_BN-Armnn.cpp b/tests/CaffeInception_BN-Armnn/CaffeInception_BN-Armnn.cpp new file mode 100644 index 0000000000..557a3b00f4 --- /dev/null +++ b/tests/CaffeInception_BN-Armnn/CaffeInception_BN-Armnn.cpp @@ -0,0 +1,19 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "../InferenceTest.hpp" +#include "../ImageNetDatabase.hpp" +#include "armnnCaffeParser/ICaffeParser.hpp" + +int main(int argc, char* argv[]) +{ + std::vector<ImageSet> imageSet = + { + {"shark.jpg", 3694} + }; + return armnn::test::ClassifierInferenceTestMain<ImageNetDatabase, armnnCaffeParser::ICaffeParser>( + argc, argv, "Inception-BN-batchsize1.caffemodel", true, + "data", "softmax", { 0 }, + [&imageSet](const char* dataDir) { return ImageNetDatabase(dataDir, 224, 224, imageSet); }); +} diff --git a/tests/CaffeInception_BN-Armnn/Validation.txt b/tests/CaffeInception_BN-Armnn/Validation.txt new file mode 100644 index 0000000000..f6040137d8 --- /dev/null +++ b/tests/CaffeInception_BN-Armnn/Validation.txt @@ -0,0 +1,1000 @@ +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 +3694 diff --git a/tests/CaffeMnist-Armnn/CaffeMnist-Armnn.cpp b/tests/CaffeMnist-Armnn/CaffeMnist-Armnn.cpp new file mode 100644 index 0000000000..5b8864d73d --- /dev/null +++ b/tests/CaffeMnist-Armnn/CaffeMnist-Armnn.cpp @@ -0,0 +1,15 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "../InferenceTest.hpp" +#include "../MnistDatabase.hpp" +#include "armnnCaffeParser/ICaffeParser.hpp" + +int main(int argc, char* argv[]) +{ + return armnn::test::ClassifierInferenceTestMain<MnistDatabase, armnnCaffeParser::ICaffeParser>( + argc, argv, "lenet_iter_9000.caffemodel", true, "data", "prob", + { 0, 1, 5, 8, 9 }, + [](const char* dataDir) { return MnistDatabase(dataDir); }); +} diff --git a/tests/CaffeMnist-Armnn/Validation.txt b/tests/CaffeMnist-Armnn/Validation.txt new file mode 100644 index 0000000000..63cbca6c56 --- /dev/null +++ b/tests/CaffeMnist-Armnn/Validation.txt @@ -0,0 +1,1000 @@ +7 +2 +1 +0 +4 +1 +4 +9 +5 +9 +0 +6 +9 +0 +1 +5 +9 +7 +3 +4 +9 +6 +6 +5 +4 +0 +7 +4 +0 +1 +3 +1 +3 +4 +7 +2 +7 +1 +2 +1 +1 +7 +4 +2 +3 +5 +1 +2 +4 +4 +6 +3 +5 +5 +6 +0 +4 +1 +9 +5 +7 +8 +9 +3 +7 +4 +6 +4 +3 +0 +7 +0 +2 +9 +1 +7 +3 +2 +9 +7 +7 +6 +2 +7 +8 +4 +7 +3 +6 +1 +3 +6 +9 +3 +1 +4 +1 +7 +6 +9 +6 +0 +5 +4 +9 +9 +2 +1 +9 +4 +8 +7 +3 +9 +7 +9 +4 +4 +9 +2 +5 +4 +7 +6 +7 +9 +0 +5 +8 +5 +6 +6 +5 +7 +8 +1 +0 +1 +6 +4 +6 +7 +3 +1 +7 +1 +8 +2 +0 +2 +9 +9 +5 +5 +1 +5 +6 +0 +3 +4 +4 +6 +5 +4 +6 +5 +4 +5 +1 +4 +4 +7 +2 +3 +2 +7 +1 +8 +1 +8 +1 +8 +5 +0 +8 +9 +2 +5 +0 +1 +1 +1 +0 +9 +0 +3 +1 +6 +4 +2 +3 +6 +1 +1 +1 +3 +9 +5 +2 +9 +4 +5 +9 +3 +9 +0 +3 +6 +5 +5 +7 +2 +2 +7 +1 +2 +8 +4 +1 +7 +3 +3 +8 +8 +7 +9 +2 +2 +4 +1 +5 +9 +8 +7 +2 +3 +0 +4 +4 +2 +4 +1 +9 +5 +7 +7 +2 +8 +2 +6 +8 +5 +7 +7 +9 +1 +8 +1 +8 +0 +3 +0 +1 +9 +9 +4 +1 +8 +2 +1 +2 +9 +7 +5 +9 +2 +6 +4 +1 +5 +8 +2 +9 +2 +0 +4 +0 +0 +2 +8 +4 +7 +1 +2 +4 +0 +2 +7 +4 +3 +3 +0 +0 +3 +1 +9 +6 +5 +2 +5 +9 +2 +9 +3 +0 +4 +2 +0 +7 +1 +1 +2 +1 +5 +3 +3 +9 +7 +8 +6 +3 +6 +1 +3 +8 +1 +0 +5 +1 +3 +1 +5 +5 +6 +1 +8 +5 +1 +7 +9 +4 +6 +2 +2 +5 +0 +6 +5 +6 +3 +7 +2 +0 +8 +8 +5 +4 +1 +1 +4 +0 +3 +3 +7 +6 +1 +6 +2 +1 +9 +2 +8 +6 +1 +9 +5 +2 +5 +4 +4 +2 +8 +3 +8 +2 +4 +5 +0 +3 +1 +7 +7 +5 +7 +9 +7 +1 +9 +2 +1 +4 +2 +9 +2 +0 +4 +9 +1 +4 +8 +1 +8 +4 +5 +9 +8 +8 +3 +7 +6 +0 +0 +3 +0 +2 +0 +6 +4 +9 +5 +3 +3 +2 +3 +9 +1 +2 +6 +8 +0 +5 +6 +6 +6 +3 +8 +8 +2 +7 +5 +8 +9 +6 +1 +8 +4 +1 +2 +5 +9 +1 +9 +7 +5 +4 +0 +8 +9 +9 +1 +0 +5 +2 +3 +7 +8 +9 +4 +0 +6 +3 +9 +5 +2 +1 +3 +1 +3 +6 +5 +7 +4 +2 +2 +6 +3 +2 +6 +5 +4 +8 +9 +7 +1 +3 +0 +3 +8 +3 +1 +9 +3 +4 +4 +6 +4 +2 +1 +8 +2 +5 +4 +8 +8 +4 +0 +0 +2 +3 +2 +7 +3 +0 +8 +7 +4 +4 +7 +9 +6 +9 +0 +9 +8 +0 +4 +6 +0 +6 +3 +5 +4 +8 +3 +3 +9 +3 +3 +3 +7 +8 +0 +2 +2 +1 +7 +0 +6 +5 +4 +3 +8 +0 +9 +6 +3 +8 +0 +9 +9 +6 +8 +6 +8 +5 +7 +8 +6 +0 +2 +4 +0 +2 +2 +3 +1 +9 +7 +5 +8 +0 +8 +4 +6 +2 +6 +7 +9 +3 +2 +9 +8 +2 +2 +9 +2 +7 +3 +5 +9 +1 +8 +0 +2 +0 +5 +2 +1 +3 +7 +6 +7 +1 +2 +5 +8 +0 +3 +7 +1 +4 +0 +9 +1 +8 +6 +7 +7 +4 +3 +4 +9 +1 +9 +5 +1 +7 +3 +9 +7 +6 +9 +1 +3 +3 +8 +3 +3 +6 +7 +2 +8 +5 +8 +5 +1 +1 +4 +4 +3 +1 +0 +7 +7 +0 +7 +9 +4 +4 +8 +5 +5 +4 +0 +8 +2 +7 +0 +8 +4 +8 +0 +4 +0 +6 +1 +7 +3 +2 +6 +7 +2 +6 +9 +3 +1 +4 +6 +2 +5 +4 +2 +0 +6 +2 +1 +7 +3 +4 +1 +0 +5 +4 +3 +1 +1 +7 +4 +9 +9 +4 +8 +4 +0 +2 +4 +5 +1 +1 +6 +4 +7 +1 +9 +4 +2 +4 +1 +5 +5 +3 +8 +3 +1 +4 +5 +6 +8 +9 +4 +1 +5 +3 +8 +0 +3 +2 +5 +1 +2 +8 +3 +4 +4 +0 +8 +8 +3 +3 +1 +7 +3 +5 +9 +6 +3 +2 +6 +1 +3 +6 +0 +7 +2 +1 +7 +1 +4 +2 +4 +2 +1 +7 +9 +6 +1 +1 +2 +4 +8 +1 +7 +7 +4 +8 +0 +9 +3 +1 +3 +1 +0 +7 +7 +0 +3 +5 +5 +2 +7 +6 +6 +9 +2 +8 +3 +5 +2 +2 +5 +6 +0 +8 +2 +9 +2 +8 +8 +8 +8 +7 +4 +9 +3 +0 +6 +6 +3 +2 +1 +3 +2 +2 +9 +3 +0 +0 +5 +7 +8 +3 +4 +4 +6 +0 +2 +9 +1 +4 +7 +4 +7 +3 +9 +8 +8 +4 +7 +1 +2 +1 +2 +2 +3 +2 +3 +2 +3 +9 +1 +7 +4 +0 +3 +5 +5 +8 +6 +3 +2 +6 +7 +6 +6 +3 +2 +7 +9 +1 +1 +7 +5 +6 +4 +9 +5 +1 +3 +3 +4 +7 +8 +9 +1 +1 +6 +9 +1 +4 +4 +5 +4 +0 +6 +2 +2 +3 +1 +5 +1 +2 +0 +3 +8 +1 +2 +6 +7 +1 +6 +2 +3 +9 +0 +1 +2 +2 +0 +8 +9 diff --git a/tests/CaffeResNet-Armnn/CaffeResNet-Armnn.cpp b/tests/CaffeResNet-Armnn/CaffeResNet-Armnn.cpp new file mode 100644 index 0000000000..ed304f8b0c --- /dev/null +++ b/tests/CaffeResNet-Armnn/CaffeResNet-Armnn.cpp @@ -0,0 +1,23 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "../InferenceTest.hpp" +#include "../ImageNetDatabase.hpp" +#include "armnnCaffeParser/ICaffeParser.hpp" + +int main(int argc, char* argv[]) +{ + std::vector<ImageSet> imageSet = + { + {"ILSVRC2012_val_00000018.JPEG", 21 }, + {"shark.jpg", 2} + }; + + armnn::TensorShape inputTensorShape({ 1, 3, 224, 224 }); + return armnn::test::ClassifierInferenceTestMain<ImageNetDatabase, armnnCaffeParser::ICaffeParser>( + argc, argv, "ResNet_50_ilsvrc15_model.caffemodel", true, + "data", "prob", { 0, 1 }, + [&imageSet](const char* dataDir) { return ImageNetDatabase(dataDir, 224, 224, imageSet); }, + &inputTensorShape); +} diff --git a/tests/CaffeResNet-Armnn/Validation.txt b/tests/CaffeResNet-Armnn/Validation.txt new file mode 100644 index 0000000000..b3c5de80b7 --- /dev/null +++ b/tests/CaffeResNet-Armnn/Validation.txt @@ -0,0 +1,2000 @@ +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 +21 +2 diff --git a/tests/CaffeSqueezeNet1_0-Armnn/CaffeSqueezeNet1_0-Armnn.cpp b/tests/CaffeSqueezeNet1_0-Armnn/CaffeSqueezeNet1_0-Armnn.cpp new file mode 100644 index 0000000000..f0b48836f1 --- /dev/null +++ b/tests/CaffeSqueezeNet1_0-Armnn/CaffeSqueezeNet1_0-Armnn.cpp @@ -0,0 +1,15 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "../InferenceTest.hpp" +#include "../ImageNetDatabase.hpp" +#include "armnnCaffeParser/ICaffeParser.hpp" + +int main(int argc, char* argv[]) +{ + return armnn::test::ClassifierInferenceTestMain<ImageNetDatabase, armnnCaffeParser::ICaffeParser>( + argc, argv, "squeezenet.caffemodel", true, + "data", "output", { 0 }, + [](const char* dataDir) { return ImageNetDatabase(dataDir); }); +} diff --git a/tests/CaffeVGG-Armnn/CaffeVGG-Armnn.cpp b/tests/CaffeVGG-Armnn/CaffeVGG-Armnn.cpp new file mode 100644 index 0000000000..e7fc55c7e7 --- /dev/null +++ b/tests/CaffeVGG-Armnn/CaffeVGG-Armnn.cpp @@ -0,0 +1,17 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "../InferenceTest.hpp" +#include "../ImageNetDatabase.hpp" +#include "armnnCaffeParser/ICaffeParser.hpp" + +int main(int argc, char* argv[]) +{ + armnn::TensorShape inputTensorShape({ 1, 3, 224, 224 }); + return armnn::test::ClassifierInferenceTestMain<ImageNetDatabase, armnnCaffeParser::ICaffeParser>( + argc, argv, "VGG_CNN_S.caffemodel", true, + "input", "prob", { 0 }, + [](const char* dataDir) { return ImageNetDatabase(dataDir, 224, 224); }, + &inputTensorShape); +} diff --git a/tests/CaffeVGG-Armnn/Validation.txt b/tests/CaffeVGG-Armnn/Validation.txt new file mode 100644 index 0000000000..cb95f050e2 --- /dev/null +++ b/tests/CaffeVGG-Armnn/Validation.txt @@ -0,0 +1,1000 @@ +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 diff --git a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp new file mode 100644 index 0000000000..af60be95ec --- /dev/null +++ b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp @@ -0,0 +1,39 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "../YoloInferenceTest.hpp" +#include "armnnCaffeParser/ICaffeParser.hpp" +#include "armnn/TypesUtils.hpp" + +int main(int argc, char* argv[]) +{ + armnn::TensorShape inputTensorShape{ { 1, 3, YoloImageHeight, YoloImageWidth } }; + + using YoloInferenceModel = InferenceModel<armnnCaffeParser::ICaffeParser, + float>; + + return InferenceTestMain(argc, argv, { 0 }, + [&inputTensorShape]() + { + return make_unique<YoloTestCaseProvider<YoloInferenceModel>>( + [&] + (typename YoloInferenceModel::CommandLineOptions modelOptions) + { + if (!ValidateDirectory(modelOptions.m_ModelDir)) + { + return std::unique_ptr<YoloInferenceModel>(); + } + + typename YoloInferenceModel::Params modelParams; + modelParams.m_ModelPath = modelOptions.m_ModelDir + "yolov1_tiny_voc2007_model.caffemodel"; + modelParams.m_InputBinding = "data"; + modelParams.m_OutputBinding = "fc12"; + modelParams.m_InputTensorShape = &inputTensorShape; + modelParams.m_IsModelBinary = true; + modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice; + + return std::make_unique<YoloInferenceModel>(modelParams); + }); + }); +} diff --git a/tests/Cifar10Database.cpp b/tests/Cifar10Database.cpp new file mode 100644 index 0000000000..f3bf68fd45 --- /dev/null +++ b/tests/Cifar10Database.cpp @@ -0,0 +1,84 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "Cifar10Database.hpp" + +#include <boost/numeric/conversion/cast.hpp> +#include <boost/log/trivial.hpp> +#include <fstream> +#include <vector> + +constexpr unsigned int g_kCifar10ImageByteSize = 1 + 3 * 32 * 32; + +Cifar10Database::Cifar10Database(const std::string& binaryFileDirectory, bool rgbPack) + : m_BinaryDirectory(binaryFileDirectory), m_RgbPack(rgbPack) +{ +} + +std::unique_ptr<Cifar10Database::TTestCaseData> Cifar10Database::GetTestCaseData(unsigned int testCaseId) +{ + std::vector<unsigned char> I(g_kCifar10ImageByteSize); + + std::string fullpath = m_BinaryDirectory + std::string("test_batch.bin"); + + std::ifstream fileStream(fullpath, std::ios::binary); + if (!fileStream.is_open()) + { + BOOST_LOG_TRIVIAL(fatal) << "Failed to load " << fullpath; + return nullptr; + } + + fileStream.seekg(testCaseId * g_kCifar10ImageByteSize, std::ios_base::beg); + fileStream.read(reinterpret_cast<char*>(&I[0]), g_kCifar10ImageByteSize); + + if (!fileStream.good()) + { + BOOST_LOG_TRIVIAL(fatal) << "Failed to read " << fullpath; + return nullptr; + } + + + std::vector<float> inputImageData; + inputImageData.resize(g_kCifar10ImageByteSize - 1); + + unsigned int step; + unsigned int countR_o; + unsigned int countG_o; + unsigned int countB_o; + unsigned int countR = 1; + unsigned int countG = 1 + 32 * 32; + unsigned int countB = 1 + 2 * 32 * 32; + + if (m_RgbPack) + { + countR_o = 0; + countG_o = 1; + countB_o = 2; + step = 3; + } + else + { + countR_o = 0; + countG_o = 32 * 32; + countB_o = 2 * 32 * 32; + step = 1; + } + + for (unsigned int h = 0; h < 32; h++) + { + for (unsigned int w = 0; w < 32; w++) + { + inputImageData[countR_o] = boost::numeric_cast<float>(I[countR++]); + inputImageData[countG_o] = boost::numeric_cast<float>(I[countG++]); + inputImageData[countB_o] = boost::numeric_cast<float>(I[countB++]); + + countR_o += step; + countG_o += step; + countB_o += step; + } + } + + const unsigned int label = boost::numeric_cast<unsigned int>(I[0]); + return std::make_unique<TTestCaseData>(label, std::move(inputImageData)); +} diff --git a/tests/Cifar10Database.hpp b/tests/Cifar10Database.hpp new file mode 100644 index 0000000000..a4998cee1d --- /dev/null +++ b/tests/Cifar10Database.hpp @@ -0,0 +1,23 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "ClassifierTestCaseData.hpp" + +#include <string> +#include <memory> + +class Cifar10Database +{ +public: + using TTestCaseData = ClassifierTestCaseData<float>; + + explicit Cifar10Database(const std::string& binaryFileDirectory, bool rgbPack = false); + std::unique_ptr<TTestCaseData> GetTestCaseData(unsigned int testCaseId); + +private: + std::string m_BinaryDirectory; + bool m_RgbPack; +}; diff --git a/tests/ClassifierTestCaseData.hpp b/tests/ClassifierTestCaseData.hpp new file mode 100644 index 0000000000..6dbced28d6 --- /dev/null +++ b/tests/ClassifierTestCaseData.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <vector> + +template <typename DataType> +class ClassifierTestCaseData +{ +public: + ClassifierTestCaseData(unsigned int label, std::vector<DataType> inputImage) + : m_Label(label) + , m_InputImage(std::move(inputImage)) + { + } + + const unsigned int m_Label; + std::vector<DataType> m_InputImage; +}; diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp new file mode 100644 index 0000000000..5e9e6532cf --- /dev/null +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -0,0 +1,244 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "armnn/ArmNN.hpp" +#if defined(ARMNN_CAFFE_PARSER) +#include "armnnCaffeParser/ICaffeParser.hpp" +#endif +#include "Logging.hpp" +#include "../InferenceTest.hpp" + +#include <boost/program_options.hpp> +#include <boost/algorithm/string/split.hpp> +#include <boost/algorithm/string/classification.hpp> + +#include <iostream> +#include <fstream> + +namespace +{ + +template<typename T, typename TParseElementFunc> +std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc) +{ + std::vector<T> result; + // Process line-by-line + std::string line; + while (std::getline(stream, line)) + { + std::vector<std::string> tokens; + boost::split(tokens, line, boost::algorithm::is_any_of("\t ,;:"), boost::token_compress_on); + for (const std::string& token : tokens) + { + if (!token.empty()) // See https://stackoverflow.com/questions/10437406/ + { + try + { + result.push_back(parseElementFunc(token)); + } + catch (const std::exception&) + { + BOOST_LOG_TRIVIAL(error) << "'" << token << "' is not a valid number. It has been ignored."; + } + } + } + } + + return result; +} + +} + +template<typename T> +std::vector<T> ParseArray(std::istream& stream); + +template<> +std::vector<float> ParseArray(std::istream& stream) +{ + return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); }); +} + +template<> +std::vector<unsigned int> ParseArray(std::istream& stream) +{ + return ParseArrayImpl<unsigned int>(stream, + [](const std::string& s) { return boost::numeric_cast<unsigned int>(std::stoi(s)); }); +} + +void PrintArray(const std::vector<float>& v) +{ + for (size_t i = 0; i < v.size(); i++) + { + printf("%f ", v[i]); + } + printf("\n"); +} + +template<typename TParser, typename TDataType> +int MainImpl(const char* modelPath, bool isModelBinary, armnn::Compute computeDevice, + const char* inputName, const armnn::TensorShape* inputTensorShape, const char* inputTensorDataFilePath, + const char* outputName) +{ + // Load input tensor + std::vector<TDataType> input; + { + std::ifstream inputTensorFile(inputTensorDataFilePath); + if (!inputTensorFile.good()) + { + BOOST_LOG_TRIVIAL(fatal) << "Failed to load input tensor data file from " << inputTensorDataFilePath; + return 1; + } + input = ParseArray<TDataType>(inputTensorFile); + } + + try + { + // Create an InferenceModel, which will parse the model and load it into an IRuntime + typename InferenceModel<TParser, TDataType>::Params params; + params.m_ModelPath = modelPath; + params.m_IsModelBinary = isModelBinary; + params.m_ComputeDevice = computeDevice; + params.m_InputBinding = inputName; + params.m_InputTensorShape = inputTensorShape; + params.m_OutputBinding = outputName; + InferenceModel<TParser, TDataType> model(params); + + // Execute the model + std::vector<TDataType> output(model.GetOutputSize()); + model.Run(input, output); + + // Print the output tensor + PrintArray(output); + } + catch (armnn::Exception const& e) + { + BOOST_LOG_TRIVIAL(fatal) << "Armnn Error: " << e.what(); + return 1; + } + + return 0; +} + +int main(int argc, char* argv[]) +{ + // Configure logging for both the ARMNN library and this test program +#ifdef NDEBUG + armnn::LogSeverity level = armnn::LogSeverity::Info; +#else + armnn::LogSeverity level = armnn::LogSeverity::Debug; +#endif + armnn::ConfigureLogging(true, true, level); + armnnUtils::ConfigureLogging(boost::log::core::get().get(), true, true, level); + + // Configure boost::program_options for command-line parsing + namespace po = boost::program_options; + + std::string modelFormat; + std::string modelPath; + std::string inputName; + std::string inputTensorShapeStr; + std::string inputTensorDataFilePath; + std::string outputName; + armnn::Compute computeDevice; + + po::options_description desc("Options"); + try + { + desc.add_options() + ("help", "Display usage information") + ("model-format,f", po::value(&modelFormat)->required(), + "caffe-binary, caffe-text, tensorflow-binary or tensorflow-text.") + ("model-path,m", po::value(&modelPath)->required(), "Path to model file, e.g. .caffemodel, .prototxt") + ("compute,c", po::value<armnn::Compute>(&computeDevice)->required(), + "Which device to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc") + ("input-name,i", po::value(&inputName)->required(), "Identifier of the input tensor in the network.") + ("input-tensor-shape,s", po::value(&inputTensorShapeStr), + "The shape of the input tensor in the network as a flat array of integers separated by whitespace. " + "This parameter is optional, depending on the network.") + ("input-tensor-data,d", po::value(&inputTensorDataFilePath)->required(), + "Path to a file containing the input data as a flat array separated by whitespace.") + ("output-name,o", po::value(&outputName)->required(), "Identifier of the output tensor in the network."); + } + catch (const std::exception& e) + { + // Coverity points out that default_value(...) can throw a bad_lexical_cast, + // and that desc.add_options() can throw boost::io::too_few_args. + // They really won't in any of these cases. + BOOST_ASSERT_MSG(false, "Caught unexpected exception"); + BOOST_LOG_TRIVIAL(fatal) << "Fatal internal error: " << e.what(); + return 1; + } + + // Parse the command-line + po::variables_map vm; + try + { + po::store(po::parse_command_line(argc, argv, desc), vm); + + if (vm.count("help") || argc <= 1) + { + std::cout << "Executes a neural network model using the provided input tensor. " << std::endl; + std::cout << "Prints the resulting output tensor." << std::endl; + std::cout << std::endl; + std::cout << desc << std::endl; + return 1; + } + + po::notify(vm); + } + catch (po::error& e) + { + std::cerr << e.what() << std::endl << std::endl; + std::cerr << desc << std::endl; + return 1; + } + + // Parse model binary flag from the model-format string we got from the command-line + bool isModelBinary; + if (modelFormat.find("bin") != std::string::npos) + { + isModelBinary = true; + } + else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos) + { + isModelBinary = false; + } + else + { + BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'"; + return 1; + } + + // Parse input tensor shape from the string we got from the command-line. + std::unique_ptr<armnn::TensorShape> inputTensorShape; + if (!inputTensorShapeStr.empty()) + { + std::stringstream ss(inputTensorShapeStr); + std::vector<unsigned int> dims = ParseArray<unsigned int>(ss); + inputTensorShape = std::make_unique<armnn::TensorShape>(dims.size(), dims.data()); + } + + // Forward to implementation based on the parser type + if (modelFormat.find("caffe") != std::string::npos) + { +#if defined(ARMNN_CAFFE_PARSER) + return MainImpl<armnnCaffeParser::ICaffeParser, float>(modelPath.c_str(), isModelBinary, computeDevice, + inputName.c_str(), inputTensorShape.get(), inputTensorDataFilePath.c_str(), outputName.c_str()); +#else + BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support."; + return 1; +#endif + } + else if (modelFormat.find("tensorflow") != std::string::npos) + { + BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support."; + return 1; + } + else + { + BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << + "'. Please include 'caffe' or 'tensorflow'"; + return 1; + } +} diff --git a/tests/ImageNetDatabase.cpp b/tests/ImageNetDatabase.cpp new file mode 100644 index 0000000000..0a235c9a3e --- /dev/null +++ b/tests/ImageNetDatabase.cpp @@ -0,0 +1,54 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "InferenceTestImage.hpp" +#include "ImageNetDatabase.hpp" + +#include <boost/numeric/conversion/cast.hpp> +#include <boost/log/trivial.hpp> +#include <boost/assert.hpp> +#include <boost/format.hpp> + +#include <iostream> +#include <fcntl.h> +#include <array> + +const std::vector<ImageSet> g_DefaultImageSet = +{ + {"shark.jpg", 2} +}; + +ImageNetDatabase::ImageNetDatabase(const std::string& binaryFileDirectory, unsigned int width, unsigned int height, + const std::vector<ImageSet>& imageSet) +: m_BinaryDirectory(binaryFileDirectory) +, m_Height(height) +, m_Width(width) +, m_ImageSet(imageSet.empty() ? g_DefaultImageSet : imageSet) +{ +} + +std::unique_ptr<ImageNetDatabase::TTestCaseData> ImageNetDatabase::GetTestCaseData(unsigned int testCaseId) +{ + testCaseId = testCaseId % boost::numeric_cast<unsigned int>(m_ImageSet.size()); + const ImageSet& imageSet = m_ImageSet[testCaseId]; + const std::string fullPath = m_BinaryDirectory + imageSet.first; + FILE* file = fopen(fullPath.c_str(), "rb"); + + if (file == nullptr) + { + BOOST_LOG_TRIVIAL(fatal) << "Failed to load " << fullPath; + return nullptr; + } + + InferenceTestImage image(fullPath.c_str()); + image.Resize(m_Width, m_Height); + + // The model expects image data in BGR format + std::vector<float> inputImageData = GetImageDataInArmNnLayoutAsFloatsSubtractingMean(ImageChannelLayout::Bgr, + image, m_MeanBgr); + + // list of labels: https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a + const unsigned int label = imageSet.second; + return std::make_unique<TTestCaseData>(label, std::move(inputImageData)); +} diff --git a/tests/ImageNetDatabase.hpp b/tests/ImageNetDatabase.hpp new file mode 100644 index 0000000000..cd990c458a --- /dev/null +++ b/tests/ImageNetDatabase.hpp @@ -0,0 +1,37 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "ClassifierTestCaseData.hpp" + +#include <array> +#include <string> +#include <vector> +#include <memory> + +using ImageSet = std::pair<const std::string, unsigned int>; + +class ImageNetDatabase +{ +public: + using TTestCaseData = ClassifierTestCaseData<float>; + + explicit ImageNetDatabase(const std::string& binaryFileDirectory, + unsigned int width = 227, + unsigned int height = 227, + const std::vector<ImageSet>& imageSet = std::vector<ImageSet>()); + std::unique_ptr<TTestCaseData> GetTestCaseData(unsigned int testCaseId); + +private: + unsigned int GetNumImageElements() const { return 3 * m_Width * m_Height; } + unsigned int GetNumImageBytes() const { return 4 * GetNumImageElements(); } + + std::string m_BinaryDirectory; + unsigned int m_Height; + unsigned int m_Width; + //mean value of the database [B, G, R] + const std::array<float, 3> m_MeanBgr = {{104.007965f, 116.669472f, 122.675102f}}; + const std::vector<ImageSet> m_ImageSet; +};
\ No newline at end of file diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp new file mode 100644 index 0000000000..c390ccdc2f --- /dev/null +++ b/tests/InferenceModel.hpp @@ -0,0 +1,149 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "armnn/ArmNN.hpp" + +#include <boost/log/trivial.hpp> +#include <boost/format.hpp> +#include <boost/program_options.hpp> + +#include <map> +#include <string> + +template<typename TContainer> +inline armnn::InputTensors MakeInputTensors(const std::pair<armnn::LayerBindingId, armnn::TensorInfo>& input, + const TContainer& inputTensorData) +{ + if (inputTensorData.size() != input.second.GetNumElements()) + { + throw armnn::Exception(boost::str(boost::format("Input tensor has incorrect size. Expected %1% elements " + "but got %2%.") % input.second.GetNumElements() % inputTensorData.size())); + } + return { { input.first, armnn::ConstTensor(input.second, inputTensorData.data()) } }; +} + +template<typename TContainer> +inline armnn::OutputTensors MakeOutputTensors(const std::pair<armnn::LayerBindingId, armnn::TensorInfo>& output, + TContainer& outputTensorData) +{ + if (outputTensorData.size() != output.second.GetNumElements()) + { + throw armnn::Exception("Output tensor has incorrect size"); + } + return { { output.first, armnn::Tensor(output.second, outputTensorData.data()) } }; +} + +template <typename IParser, typename TDataType> +class InferenceModel +{ +public: + using DataType = TDataType; + + struct CommandLineOptions + { + std::string m_ModelDir; + armnn::Compute m_ComputeDevice; + }; + + static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options) + { + namespace po = boost::program_options; + + desc.add_options() + ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(), + "Path to directory containing model files (.caffemodel/.prototxt)") + ("compute,c", po::value<armnn::Compute>(&options.m_ComputeDevice)->default_value(armnn::Compute::CpuAcc), + "Which device to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc"); + } + + struct Params + { + std::string m_ModelPath; + std::string m_InputBinding; + std::string m_OutputBinding; + const armnn::TensorShape* m_InputTensorShape; + armnn::Compute m_ComputeDevice; + bool m_IsModelBinary; + + Params() + : m_InputTensorShape(nullptr) + , m_ComputeDevice(armnn::Compute::CpuRef) + , m_IsModelBinary(true) + { + } + }; + + + InferenceModel(const Params& params) + : m_Runtime(armnn::IRuntime::Create(params.m_ComputeDevice)) + { + const std::string& modelPath = params.m_ModelPath; + + // Create a network from a file on disk + auto parser(IParser::Create()); + + std::map<std::string, armnn::TensorShape> inputShapes; + if (params.m_InputTensorShape) + { + inputShapes[params.m_InputBinding] = *params.m_InputTensorShape; + } + std::vector<std::string> requestedOutputs{ params.m_OutputBinding }; + + // Handle text and binary input differently by calling the corresponding parser function + armnn::INetworkPtr network = (params.m_IsModelBinary ? + parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) : + parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs)); + + m_InputBindingInfo = parser->GetNetworkInputBindingInfo(params.m_InputBinding); + m_OutputBindingInfo = parser->GetNetworkOutputBindingInfo(params.m_OutputBinding); + + armnn::IOptimizedNetworkPtr optNet = + armnn::Optimize(*network, m_Runtime->GetDeviceSpec()); + + // Load the network into the runtime. + armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet)); + if (ret == armnn::Status::Failure) + { + throw armnn::Exception("IRuntime::LoadNetwork failed"); + } + } + + unsigned int GetOutputSize() const + { + return m_OutputBindingInfo.second.GetNumElements(); + } + + void Run(const std::vector<TDataType>& input, std::vector<TDataType>& output) + { + BOOST_ASSERT(output.size() == GetOutputSize()); + armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier, + MakeInputTensors(input), + MakeOutputTensors(output)); + if (ret == armnn::Status::Failure) + { + throw armnn::Exception("IRuntime::EnqueueWorkload failed"); + } + } + +private: + template<typename TContainer> + armnn::InputTensors MakeInputTensors(const TContainer& inputTensorData) + { + return ::MakeInputTensors(m_InputBindingInfo, inputTensorData); + } + + template<typename TContainer> + armnn::OutputTensors MakeOutputTensors(TContainer& outputTensorData) + { + return ::MakeOutputTensors(m_OutputBindingInfo, outputTensorData); + } + + armnn::NetworkId m_NetworkIdentifier; + armnn::IRuntimePtr m_Runtime; + + std::pair<armnn::LayerBindingId, armnn::TensorInfo> m_InputBindingInfo; + std::pair<armnn::LayerBindingId, armnn::TensorInfo> m_OutputBindingInfo; +}; diff --git a/tests/InferenceTest.cpp b/tests/InferenceTest.cpp new file mode 100644 index 0000000000..55616798e2 --- /dev/null +++ b/tests/InferenceTest.cpp @@ -0,0 +1,236 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "InferenceTest.hpp" + +#include <boost/algorithm/string.hpp> +#include <boost/numeric/conversion/cast.hpp> +#include <boost/log/trivial.hpp> +#include <boost/filesystem/path.hpp> +#include <boost/assert.hpp> +#include <boost/format.hpp> +#include <boost/program_options.hpp> +#include <boost/filesystem/operations.hpp> + +#include <fstream> +#include <iostream> +#include <iomanip> +#include <array> + +using namespace std; +using namespace std::chrono; +using namespace armnn::test; + +namespace armnn +{ +namespace test +{ + +/// Parse the command line of an ArmNN (or referencetests) inference test program. +/// \return false if any error occurred during options processing, otherwise true +bool ParseCommandLine(int argc, char** argv, IInferenceTestCaseProvider& testCaseProvider, + InferenceTestOptions& outParams) +{ + namespace po = boost::program_options; + + std::string computeDeviceStr; + + po::options_description desc("Options"); + + try + { + // Add generic options needed for all inference tests + desc.add_options() + ("help", "Display help messages") + ("iterations,i", po::value<unsigned int>(&outParams.m_IterationCount)->default_value(0), + "Sets the number number of inferences to perform. If unset, a default number will be ran.") + ("inference-times-file", po::value<std::string>(&outParams.m_InferenceTimesFile)->default_value(""), + "If non-empty, each individual inference time will be recorded and output to this file"); + + // Add options specific to the ITestCaseProvider + testCaseProvider.AddCommandLineOptions(desc); + } + catch (const std::exception& e) + { + // Coverity points out that default_value(...) can throw a bad_lexical_cast, + // and that desc.add_options() can throw boost::io::too_few_args. + // They really won't in any of these cases. + BOOST_ASSERT_MSG(false, "Caught unexpected exception"); + std::cerr << "Fatal internal error: " << e.what() << std::endl; + return false; + } + + po::variables_map vm; + + try + { + po::store(po::parse_command_line(argc, argv, desc), vm); + + if (vm.count("help")) + { + std::cout << desc << std::endl; + return false; + } + + po::notify(vm); + } + catch (po::error& e) + { + std::cerr << e.what() << std::endl << std::endl; + std::cerr << desc << std::endl; + return false; + } + + if (!testCaseProvider.ProcessCommandLineOptions()) + { + return false; + } + + return true; +} + +bool ValidateDirectory(std::string& dir) +{ + if (dir[dir.length() - 1] != '/') + { + dir += "/"; + } + + if (!boost::filesystem::exists(dir)) + { + std::cerr << "Given directory " << dir << " does not exist" << std::endl; + return false; + } + + return true; +} + +bool InferenceTest(const InferenceTestOptions& params, + const std::vector<unsigned int>& defaultTestCaseIds, + IInferenceTestCaseProvider& testCaseProvider) +{ +#if !defined (NDEBUG) + if (params.m_IterationCount > 0) // If just running a few select images then don't bother to warn + { + BOOST_LOG_TRIVIAL(warning) << "Performance test running in DEBUG build - results may be inaccurate."; + } +#endif + + double totalTime = 0; + unsigned int nbProcessed = 0; + bool success = true; + + // Open the file to write inference times to, if needed + ofstream inferenceTimesFile; + const bool recordInferenceTimes = !params.m_InferenceTimesFile.empty(); + if (recordInferenceTimes) + { + inferenceTimesFile.open(params.m_InferenceTimesFile.c_str(), ios_base::trunc | ios_base::out); + if (!inferenceTimesFile.good()) + { + BOOST_LOG_TRIVIAL(error) << "Failed to open inference times file for writing: " + << params.m_InferenceTimesFile; + return false; + } + } + + // Run a single test case to 'warm-up' the model. The first one can sometimes take up to 10x longer + std::unique_ptr<IInferenceTestCase> warmupTestCase = testCaseProvider.GetTestCase(0); + if (warmupTestCase == nullptr) + { + BOOST_LOG_TRIVIAL(error) << "Failed to load test case"; + return false; + } + + try + { + warmupTestCase->Run(); + } + catch (const TestFrameworkException& testError) + { + BOOST_LOG_TRIVIAL(error) << testError.what(); + return false; + } + + const unsigned int nbTotalToProcess = params.m_IterationCount > 0 ? params.m_IterationCount + : boost::numeric_cast<unsigned int>(defaultTestCaseIds.size()); + + for (; nbProcessed < nbTotalToProcess; nbProcessed++) + { + const unsigned int testCaseId = params.m_IterationCount > 0 ? nbProcessed : defaultTestCaseIds[nbProcessed]; + std::unique_ptr<IInferenceTestCase> testCase = testCaseProvider.GetTestCase(testCaseId); + + if (testCase == nullptr) + { + BOOST_LOG_TRIVIAL(error) << "Failed to load test case"; + return false; + } + + time_point<high_resolution_clock> predictStart; + time_point<high_resolution_clock> predictEnd; + + TestCaseResult result = TestCaseResult::Ok; + + try + { + predictStart = high_resolution_clock::now(); + + testCase->Run(); + + predictEnd = high_resolution_clock::now(); + + // duration<double> will convert the time difference into seconds as a double by default. + double timeTakenS = duration<double>(predictEnd - predictStart).count(); + totalTime += timeTakenS; + + // Output inference times if needed + if (recordInferenceTimes) + { + inferenceTimesFile << testCaseId << " " << (timeTakenS * 1000.0) << std::endl; + } + + result = testCase->ProcessResult(params); + + } + catch (const TestFrameworkException& testError) + { + BOOST_LOG_TRIVIAL(error) << testError.what(); + result = TestCaseResult::Abort; + } + + switch (result) + { + case TestCaseResult::Ok: + break; + case TestCaseResult::Abort: + return false; + case TestCaseResult::Failed: + // This test failed so we will fail the entire program eventually, but keep going for now. + success = false; + break; + default: + BOOST_ASSERT_MSG(false, "Unexpected TestCaseResult"); + return false; + } + } + + const double averageTimePerTestCaseMs = totalTime / nbProcessed * 1000.0f; + + BOOST_LOG_TRIVIAL(info) << std::fixed << std::setprecision(3) << + "Total time for " << nbProcessed << " test cases: " << totalTime << " seconds"; + BOOST_LOG_TRIVIAL(info) << std::fixed << std::setprecision(3) << + "Average time per test case: " << averageTimePerTestCaseMs << " ms"; + + if (!success) + { + BOOST_LOG_TRIVIAL(error) << "One or more test cases failed"; + return false; + } + + return testCaseProvider.OnInferenceTestFinished(); +} + +} // namespace test + +} // namespace armnn diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp new file mode 100644 index 0000000000..5f53c06a88 --- /dev/null +++ b/tests/InferenceTest.hpp @@ -0,0 +1,197 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "armnn/ArmNN.hpp" +#include "armnn/TypesUtils.hpp" +#include <Logging.hpp> + +#include <boost/log/core/core.hpp> +#include <boost/program_options.hpp> + +namespace armnn +{ + +inline std::istream& operator>>(std::istream& in, armnn::Compute& compute) +{ + std::string token; + in >> token; + compute = armnn::ParseComputeDevice(token.c_str()); + if (compute == armnn::Compute::Undefined) + { + in.setstate(std::ios_base::failbit); + throw boost::program_options::validation_error(boost::program_options::validation_error::invalid_option_value); + } + return in; +} + +namespace test +{ + +class TestFrameworkException : public Exception +{ +public: + using Exception::Exception; +}; + +struct InferenceTestOptions +{ + unsigned int m_IterationCount; + std::string m_InferenceTimesFile; + + InferenceTestOptions() + : m_IterationCount(0) + {} +}; + +enum class TestCaseResult +{ + /// The test completed without any errors. + Ok, + /// The test failed (e.g. the prediction didn't match the validation file). + /// This will eventually fail the whole program but the remaining test cases will still be run. + Failed, + /// The test failed with a fatal error. The remaining tests will not be run. + Abort +}; + +class IInferenceTestCase +{ +public: + virtual ~IInferenceTestCase() {} + + virtual void Run() = 0; + virtual TestCaseResult ProcessResult(const InferenceTestOptions& options) = 0; +}; + +class IInferenceTestCaseProvider +{ +public: + virtual ~IInferenceTestCaseProvider() {} + + virtual void AddCommandLineOptions(boost::program_options::options_description& options) {}; + virtual bool ProcessCommandLineOptions() { return true; }; + virtual std::unique_ptr<IInferenceTestCase> GetTestCase(unsigned int testCaseId) = 0; + virtual bool OnInferenceTestFinished() { return true; }; +}; + +template <typename TModel> +class InferenceModelTestCase : public IInferenceTestCase +{ +public: + InferenceModelTestCase(TModel& model, + unsigned int testCaseId, + std::vector<typename TModel::DataType> modelInput, + unsigned int outputSize) + : m_Model(model) + , m_TestCaseId(testCaseId) + , m_Input(std::move(modelInput)) + { + m_Output.resize(outputSize); + } + + virtual void Run() override + { + m_Model.Run(m_Input, m_Output); + } + +protected: + unsigned int GetTestCaseId() const { return m_TestCaseId; } + const std::vector<typename TModel::DataType>& GetOutput() const { return m_Output; } + +private: + TModel& m_Model; + unsigned int m_TestCaseId; + std::vector<typename TModel::DataType> m_Input; + std::vector<typename TModel::DataType> m_Output; +}; + +template <typename TTestCaseDatabase, typename TModel> +class ClassifierTestCase : public InferenceModelTestCase<TModel> +{ +public: + ClassifierTestCase(int& numInferencesRef, + int& numCorrectInferencesRef, + const std::vector<unsigned int>& validationPredictions, + std::vector<unsigned int>* validationPredictionsOut, + TModel& model, + unsigned int testCaseId, + unsigned int label, + std::vector<typename TModel::DataType> modelInput); + + virtual TestCaseResult ProcessResult(const InferenceTestOptions& params) override; + +private: + unsigned int m_Label; + /// These fields reference the corresponding member in the ClassifierTestCaseProvider. + /// @{ + int& m_NumInferencesRef; + int& m_NumCorrectInferencesRef; + const std::vector<unsigned int>& m_ValidationPredictions; + std::vector<unsigned int>* m_ValidationPredictionsOut; + /// @} +}; + +template <typename TDatabase, typename InferenceModel> +class ClassifierTestCaseProvider : public IInferenceTestCaseProvider +{ +public: + template <typename TConstructDatabaseCallable, typename TConstructModelCallable> + ClassifierTestCaseProvider(TConstructDatabaseCallable constructDatabase, TConstructModelCallable constructModel); + + virtual void AddCommandLineOptions(boost::program_options::options_description& options) override; + virtual bool ProcessCommandLineOptions() override; + virtual std::unique_ptr<IInferenceTestCase> GetTestCase(unsigned int testCaseId) override; + virtual bool OnInferenceTestFinished() override; + +private: + void ReadPredictions(); + + typename InferenceModel::CommandLineOptions m_ModelCommandLineOptions; + std::function<std::unique_ptr<InferenceModel>(typename InferenceModel::CommandLineOptions)> m_ConstructModel; + std::unique_ptr<InferenceModel> m_Model; + + std::string m_DataDir; + std::function<TDatabase(const char*)> m_ConstructDatabase; + std::unique_ptr<TDatabase> m_Database; + + int m_NumInferences; // Referenced by test cases + int m_NumCorrectInferences; // Referenced by test cases + + std::string m_ValidationFileIn; + std::vector<unsigned int> m_ValidationPredictions; // Referenced by test cases + + std::string m_ValidationFileOut; + std::vector<unsigned int> m_ValidationPredictionsOut; // Referenced by test cases +}; + +bool ParseCommandLine(int argc, char** argv, IInferenceTestCaseProvider& testCaseProvider, + InferenceTestOptions& outParams); + +bool ValidateDirectory(std::string& dir); + +bool InferenceTest(const InferenceTestOptions& params, + const std::vector<unsigned int>& defaultTestCaseIds, + IInferenceTestCaseProvider& testCaseProvider); + +template<typename TConstructTestCaseProvider> +int InferenceTestMain(int argc, + char* argv[], + const std::vector<unsigned int>& defaultTestCaseIds, + TConstructTestCaseProvider constructTestCaseProvider); + +template<typename TDatabase, + typename TParser, + typename TConstructDatabaseCallable> +int ClassifierInferenceTestMain(int argc, char* argv[], const char* modelFilename, bool isModelBinary, + const char* inputBindingName, const char* outputBindingName, + const std::vector<unsigned int>& defaultTestCaseIds, + TConstructDatabaseCallable constructDatabase, + const armnn::TensorShape* inputTensorShape = nullptr); + +} // namespace test +} // namespace armnn + +#include "InferenceTest.inl" diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl new file mode 100644 index 0000000000..64f97c1f87 --- /dev/null +++ b/tests/InferenceTest.inl @@ -0,0 +1,297 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "InferenceTest.hpp" + +#include "InferenceModel.hpp" + +#include <boost/algorithm/string.hpp> +#include <boost/numeric/conversion/cast.hpp> +#include <boost/log/trivial.hpp> +#include <boost/filesystem/path.hpp> +#include <boost/assert.hpp> +#include <boost/format.hpp> +#include <boost/program_options.hpp> +#include <boost/filesystem/operations.hpp> + +#include <fstream> +#include <iostream> +#include <iomanip> +#include <array> +#include <chrono> + +using namespace std; +using namespace std::chrono; +using namespace armnn::test; + +namespace armnn +{ +namespace test +{ + +template <typename TTestCaseDatabase, typename TModel> +ClassifierTestCase<TTestCaseDatabase, TModel>::ClassifierTestCase( + int& numInferencesRef, + int& numCorrectInferencesRef, + const std::vector<unsigned int>& validationPredictions, + std::vector<unsigned int>* validationPredictionsOut, + TModel& model, + unsigned int testCaseId, + unsigned int label, + std::vector<typename TModel::DataType> modelInput) + : InferenceModelTestCase<TModel>(model, testCaseId, std::move(modelInput), model.GetOutputSize()) + , m_Label(label) + , m_NumInferencesRef(numInferencesRef) + , m_NumCorrectInferencesRef(numCorrectInferencesRef) + , m_ValidationPredictions(validationPredictions) + , m_ValidationPredictionsOut(validationPredictionsOut) +{ +} + +template <typename TTestCaseDatabase, typename TModel> +TestCaseResult ClassifierTestCase<TTestCaseDatabase, TModel>::ProcessResult(const InferenceTestOptions& params) +{ + auto& output = this->GetOutput(); + const auto testCaseId = this->GetTestCaseId(); + + const unsigned int prediction = boost::numeric_cast<unsigned int>( + std::distance(output.begin(), std::max_element(output.begin(), output.end()))); + + // If we're just running the defaultTestCaseIds, each one must be classified correctly + if (params.m_IterationCount == 0 && prediction != m_Label) + { + BOOST_LOG_TRIVIAL(error) << "Prediction for test case " << testCaseId << " (" << prediction << ")" << + " is incorrect (should be " << m_Label << ")"; + return TestCaseResult::Failed; + } + + // If a validation file was provided as input, check that the prediction matches + if (!m_ValidationPredictions.empty() && prediction != m_ValidationPredictions[testCaseId]) + { + BOOST_LOG_TRIVIAL(error) << "Prediction for test case " << testCaseId << " (" << prediction << ")" << + " doesn't match the prediction in the validation file (" << m_ValidationPredictions[testCaseId] << ")"; + return TestCaseResult::Failed; + } + + // If a validation file was requested as output, store the predictions + if (m_ValidationPredictionsOut) + { + m_ValidationPredictionsOut->push_back(prediction); + } + + // Update accuracy stats + m_NumInferencesRef++; + if (prediction == m_Label) + { + m_NumCorrectInferencesRef++; + } + + return TestCaseResult::Ok; +} + +template <typename TDatabase, typename InferenceModel> +template <typename TConstructDatabaseCallable, typename TConstructModelCallable> +ClassifierTestCaseProvider<TDatabase, InferenceModel>::ClassifierTestCaseProvider( + TConstructDatabaseCallable constructDatabase, TConstructModelCallable constructModel) + : m_ConstructModel(constructModel) + , m_ConstructDatabase(constructDatabase) + , m_NumInferences(0) + , m_NumCorrectInferences(0) +{ +} + +template <typename TDatabase, typename InferenceModel> +void ClassifierTestCaseProvider<TDatabase, InferenceModel>::AddCommandLineOptions( + boost::program_options::options_description& options) +{ + namespace po = boost::program_options; + + options.add_options() + ("validation-file-in", po::value<std::string>(&m_ValidationFileIn)->default_value(""), + "Reads expected predictions from the given file and confirms they match the actual predictions.") + ("validation-file-out", po::value<std::string>(&m_ValidationFileOut)->default_value(""), + "Predictions are saved to the given file for later use via --validation-file-in.") + ("data-dir,d", po::value<std::string>(&m_DataDir)->required(), + "Path to directory containing test data"); + + InferenceModel::AddCommandLineOptions(options, m_ModelCommandLineOptions); +} + +template <typename TDatabase, typename InferenceModel> +bool ClassifierTestCaseProvider<TDatabase, InferenceModel>::ProcessCommandLineOptions() +{ + if (!ValidateDirectory(m_DataDir)) + { + return false; + } + + ReadPredictions(); + + m_Model = m_ConstructModel(m_ModelCommandLineOptions); + if (!m_Model) + { + return false; + } + + m_Database = std::make_unique<TDatabase>(m_ConstructDatabase(m_DataDir.c_str())); + if (!m_Database) + { + return false; + } + + return true; +} + +template <typename TDatabase, typename InferenceModel> +std::unique_ptr<IInferenceTestCase> +ClassifierTestCaseProvider<TDatabase, InferenceModel>::GetTestCase(unsigned int testCaseId) +{ + std::unique_ptr<typename TDatabase::TTestCaseData> testCaseData = m_Database->GetTestCaseData(testCaseId); + if (testCaseData == nullptr) + { + return nullptr; + } + + return std::make_unique<ClassifierTestCase<TDatabase, InferenceModel>>( + m_NumInferences, + m_NumCorrectInferences, + m_ValidationPredictions, + m_ValidationFileOut.empty() ? nullptr : &m_ValidationPredictionsOut, + *m_Model, + testCaseId, + testCaseData->m_Label, + std::move(testCaseData->m_InputImage)); +} + +template <typename TDatabase, typename InferenceModel> +bool ClassifierTestCaseProvider<TDatabase, InferenceModel>::OnInferenceTestFinished() +{ + const double accuracy = boost::numeric_cast<double>(m_NumCorrectInferences) / + boost::numeric_cast<double>(m_NumInferences); + BOOST_LOG_TRIVIAL(info) << std::fixed << std::setprecision(3) << "Overall accuracy: " << accuracy; + + // If a validation file was requested as output, save the predictions to it + if (!m_ValidationFileOut.empty()) + { + std::ofstream validationFileOut(m_ValidationFileOut.c_str(), std::ios_base::trunc | std::ios_base::out); + if (validationFileOut.good()) + { + for (const unsigned int prediction : m_ValidationPredictionsOut) + { + validationFileOut << prediction << std::endl; + } + } + else + { + BOOST_LOG_TRIVIAL(error) << "Failed to open output validation file: " << m_ValidationFileOut; + return false; + } + } + + return true; +} + +template <typename TDatabase, typename InferenceModel> +void ClassifierTestCaseProvider<TDatabase, InferenceModel>::ReadPredictions() +{ + // Read expected predictions from the input validation file (if provided) + if (!m_ValidationFileIn.empty()) + { + std::ifstream validationFileIn(m_ValidationFileIn.c_str(), std::ios_base::in); + if (validationFileIn.good()) + { + while (!validationFileIn.eof()) + { + unsigned int i; + validationFileIn >> i; + m_ValidationPredictions.emplace_back(i); + } + } + else + { + throw armnn::Exception(boost::str(boost::format("Failed to open input validation file: %1%") + % m_ValidationFileIn)); + } + } +} + +template<typename TConstructTestCaseProvider> +int InferenceTestMain(int argc, + char* argv[], + const std::vector<unsigned int>& defaultTestCaseIds, + TConstructTestCaseProvider constructTestCaseProvider) +{ + // Configure logging for both the ARMNN library and this test program +#ifdef NDEBUG + armnn::LogSeverity level = armnn::LogSeverity::Info; +#else + armnn::LogSeverity level = armnn::LogSeverity::Debug; +#endif + armnn::ConfigureLogging(true, true, level); + armnnUtils::ConfigureLogging(boost::log::core::get().get(), true, true, level); + + try + { + std::unique_ptr<IInferenceTestCaseProvider> testCaseProvider = constructTestCaseProvider(); + if (!testCaseProvider) + { + return 1; + } + + InferenceTestOptions inferenceTestOptions; + if (!ParseCommandLine(argc, argv, *testCaseProvider, inferenceTestOptions)) + { + return 1; + } + + const bool success = InferenceTest(inferenceTestOptions, defaultTestCaseIds, *testCaseProvider); + return success ? 0 : 1; + } + catch (armnn::Exception const& e) + { + BOOST_LOG_TRIVIAL(fatal) << "Armnn Error: " << e.what(); + return 1; + } +} + +template<typename TDatabase, + typename TParser, + typename TConstructDatabaseCallable> +int ClassifierInferenceTestMain(int argc, char* argv[], const char* modelFilename, bool isModelBinary, + const char* inputBindingName, const char* outputBindingName, + const std::vector<unsigned int>& defaultTestCaseIds, + TConstructDatabaseCallable constructDatabase, + const armnn::TensorShape* inputTensorShape) +{ + return InferenceTestMain(argc, argv, defaultTestCaseIds, + [=] + () + { + using InferenceModel = InferenceModel<TParser, float>; + using TestCaseProvider = ClassifierTestCaseProvider<TDatabase, InferenceModel>; + + return make_unique<TestCaseProvider>(constructDatabase, + [&] + (typename InferenceModel::CommandLineOptions modelOptions) + { + if (!ValidateDirectory(modelOptions.m_ModelDir)) + { + return std::unique_ptr<InferenceModel>(); + } + + typename InferenceModel::Params modelParams; + modelParams.m_ModelPath = modelOptions.m_ModelDir + modelFilename; + modelParams.m_InputBinding = inputBindingName; + modelParams.m_OutputBinding = outputBindingName; + modelParams.m_InputTensorShape = inputTensorShape; + modelParams.m_IsModelBinary = isModelBinary; + modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice; + + return std::make_unique<InferenceModel>(modelParams); + }); + }); +} + +} // namespace test +} // namespace armnn diff --git a/tests/InferenceTestImage.cpp b/tests/InferenceTestImage.cpp new file mode 100644 index 0000000000..8fc6f12867 --- /dev/null +++ b/tests/InferenceTestImage.cpp @@ -0,0 +1,224 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "InferenceTestImage.hpp" + +#include <boost/core/ignore_unused.hpp> +#include <boost/format.hpp> +#include <boost/core/ignore_unused.hpp> +#include <boost/numeric/conversion/cast.hpp> + +#include <array> + +#define STB_IMAGE_IMPLEMENTATION +#include <stb_image.h> + +#define STB_IMAGE_RESIZE_IMPLEMENTATION +#include <stb_image_resize.h> + +#define STB_IMAGE_WRITE_IMPLEMENTATION +#include <stb_image_write.h> + +namespace +{ + +unsigned int GetImageChannelIndex(ImageChannelLayout channelLayout, ImageChannel channel) +{ + switch (channelLayout) + { + case ImageChannelLayout::Rgb: + return static_cast<unsigned int>(channel); + case ImageChannelLayout::Bgr: + return 2u - static_cast<unsigned int>(channel); + default: + throw UnknownImageChannelLayout(boost::str(boost::format("Unknown layout %1%") + % static_cast<int>(channelLayout))); + } +} + +} // namespace + +InferenceTestImage::InferenceTestImage(char const* filePath) + : m_Width(0u) + , m_Height(0u) + , m_NumChannels(0u) +{ + int width; + int height; + int channels; + + using StbImageDataPtr = std::unique_ptr<unsigned char, decltype(&stbi_image_free)>; + StbImageDataPtr stbData(stbi_load(filePath, &width, &height, &channels, 0), &stbi_image_free); + + if (stbData == nullptr) + { + throw InferenceTestImageLoadFailed(boost::str(boost::format("Could not load the image at %1%") % filePath)); + } + + if (width == 0 || height == 0) + { + throw InferenceTestImageLoadFailed(boost::str(boost::format("Could not load empty image at %1%") % filePath)); + } + + m_Width = boost::numeric_cast<unsigned int>(width); + m_Height = boost::numeric_cast<unsigned int>(height); + m_NumChannels = boost::numeric_cast<unsigned int>(channels); + + const unsigned int sizeInBytes = GetSizeInBytes(); + m_Data.resize(sizeInBytes); + memcpy(m_Data.data(), stbData.get(), sizeInBytes); +} + +std::tuple<uint8_t, uint8_t, uint8_t> InferenceTestImage::GetPixelAs3Channels(unsigned int x, unsigned int y) const +{ + if (x >= m_Width || y >= m_Height) + { + throw InferenceTestImageOutOfBoundsAccess(boost::str(boost::format("Attempted out of bounds image access. " + "Requested (%1%, %2%). Maximum valid coordinates (%3%, %4%).") % x % y % (m_Width - 1) % (m_Height - 1))); + } + + const unsigned int pixelOffset = x * GetNumChannels() + y * GetWidth() * GetNumChannels(); + const uint8_t* const pixelData = m_Data.data() + pixelOffset; + BOOST_ASSERT(pixelData <= (m_Data.data() + GetSizeInBytes())); + + std::array<uint8_t, 3> outPixelData; + outPixelData.fill(0); + + const unsigned int maxChannelsInPixel = std::min(GetNumChannels(), static_cast<unsigned int>(outPixelData.size())); + for (unsigned int c = 0; c < maxChannelsInPixel; ++c) + { + outPixelData[c] = pixelData[c]; + } + + return std::make_tuple(outPixelData[0], outPixelData[1], outPixelData[2]); +} + +void InferenceTestImage::Resize(unsigned int newWidth, unsigned int newHeight) +{ + if (newWidth == 0 || newHeight == 0) + { + throw InferenceTestImageResizeFailed(boost::str(boost::format("None of the dimensions passed to a resize " + "operation can be zero. Requested width: %1%. Requested height: %2%.") % newWidth % newHeight)); + } + + if (newWidth == m_Width && newHeight == m_Height) + { + // nothing to do + return; + } + + std::vector<uint8_t> newData; + newData.resize(newWidth * newHeight * GetNumChannels() * GetSingleElementSizeInBytes()); + + // boost::numeric_cast<>() is used for user-provided data (protecting about overflows). + // static_cast<> ok for internal data (assumes that, when internal data was originally provided by a user, + // a boost::numeric_cast<>() handled the conversion). + const int nW = boost::numeric_cast<int>(newWidth); + const int nH = boost::numeric_cast<int>(newHeight); + + const int w = static_cast<int>(GetWidth()); + const int h = static_cast<int>(GetHeight()); + const int numChannels = static_cast<int>(GetNumChannels()); + + const int res = stbir_resize_uint8(m_Data.data(), w, h, 0, newData.data(), nW, nH, 0, numChannels); + if (res == 0) + { + throw InferenceTestImageResizeFailed("The resizing operation failed"); + } + + m_Data.swap(newData); + m_Width = newWidth; + m_Height = newHeight; +} + +void InferenceTestImage::Write(WriteFormat format, const char* filePath) const +{ + const int w = static_cast<int>(GetWidth()); + const int h = static_cast<int>(GetHeight()); + const int numChannels = static_cast<int>(GetNumChannels()); + int res = 0; + + switch (format) + { + case WriteFormat::Png: + { + res = stbi_write_png(filePath, w, h, numChannels, m_Data.data(), 0); + break; + } + case WriteFormat::Bmp: + { + res = stbi_write_bmp(filePath, w, h, numChannels, m_Data.data()); + break; + } + case WriteFormat::Tga: + { + res = stbi_write_tga(filePath, w, h, numChannels, m_Data.data()); + break; + } + default: + throw InferenceTestImageWriteFailed(boost::str(boost::format("Unknown format %1%") + % static_cast<int>(format))); + } + + if (res == 0) + { + throw InferenceTestImageWriteFailed(boost::str(boost::format("An error occurred when writing to file %1%") + % filePath)); + } +} + +template <typename TProcessValueCallable> +std::vector<float> GetImageDataInArmNnLayoutAsFloats(ImageChannelLayout channelLayout, + const InferenceTestImage& image, + TProcessValueCallable processValue) +{ + const unsigned int h = image.GetHeight(); + const unsigned int w = image.GetWidth(); + + std::vector<float> imageData; + imageData.resize(h * w * 3); + + for (unsigned int j = 0; j < h; ++j) + { + for (unsigned int i = 0; i < w; ++i) + { + uint8_t r, g, b; + std::tie(r, g, b) = image.GetPixelAs3Channels(i, j); + + // ArmNN order: C, H, W + const unsigned int rDstIndex = GetImageChannelIndex(channelLayout, ImageChannel::R) * h * w + j * w + i; + const unsigned int gDstIndex = GetImageChannelIndex(channelLayout, ImageChannel::G) * h * w + j * w + i; + const unsigned int bDstIndex = GetImageChannelIndex(channelLayout, ImageChannel::B) * h * w + j * w + i; + + imageData[rDstIndex] = processValue(ImageChannel::R, float(r)); + imageData[gDstIndex] = processValue(ImageChannel::G, float(g)); + imageData[bDstIndex] = processValue(ImageChannel::B, float(b)); + } + } + + return imageData; +} + +std::vector<float> GetImageDataInArmNnLayoutAsNormalizedFloats(ImageChannelLayout layout, + const InferenceTestImage& image) +{ + return GetImageDataInArmNnLayoutAsFloats(layout, image, + [](ImageChannel channel, float value) + { + boost::ignore_unused(channel); + return value / 255.f; + }); +} + +std::vector<float> GetImageDataInArmNnLayoutAsFloatsSubtractingMean(ImageChannelLayout layout, + const InferenceTestImage& image, + const std::array<float, 3>& mean) +{ + return GetImageDataInArmNnLayoutAsFloats(layout, image, + [layout, &mean](ImageChannel channel, float value) + { + const unsigned int channelIndex = GetImageChannelIndex(layout, channel); + return value - mean[channelIndex]; + }); +} diff --git a/tests/InferenceTestImage.hpp b/tests/InferenceTestImage.hpp new file mode 100644 index 0000000000..45dd8bf4e2 --- /dev/null +++ b/tests/InferenceTestImage.hpp @@ -0,0 +1,121 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include <armnn/Exceptions.hpp> + +#include <array> +#include <cstdint> +#include <vector> +#include <utility> + +class InferenceTestImageException : public armnn::Exception +{ +public: + using Exception::Exception; +}; + +class InferenceTestImageLoadFailed : public InferenceTestImageException +{ +public: + using InferenceTestImageException::InferenceTestImageException; +}; + +class InferenceTestImageOutOfBoundsAccess : public InferenceTestImageException +{ +public: + using InferenceTestImageException::InferenceTestImageException; +}; + +class InferenceTestImageResizeFailed : public InferenceTestImageException +{ +public: + using InferenceTestImageException::InferenceTestImageException; +}; + +class InferenceTestImageWriteFailed : public InferenceTestImageException +{ +public: + using InferenceTestImageException::InferenceTestImageException; +}; + +class UnknownImageChannelLayout : public InferenceTestImageException +{ +public: + using InferenceTestImageException::InferenceTestImageException; +}; + +class InferenceTestImage +{ +public: + enum class WriteFormat + { + Png, + Bmp, + Tga + }; + + explicit InferenceTestImage(const char* filePath); + + InferenceTestImage(InferenceTestImage&&) = delete; + InferenceTestImage(const InferenceTestImage&) = delete; + InferenceTestImage& operator=(const InferenceTestImage&) = delete; + InferenceTestImage& operator=(InferenceTestImage&&) = delete; + + unsigned int GetWidth() const { return m_Width; } + unsigned int GetHeight() const { return m_Height; } + unsigned int GetNumChannels() const { return m_NumChannels; } + unsigned int GetNumElements() const { return GetWidth() * GetHeight() * GetNumChannels(); } + unsigned int GetSizeInBytes() const { return GetNumElements() * GetSingleElementSizeInBytes(); } + + // Returns the pixel identified by the given coordinates as a 3-channel value. + // Channels beyond the third are dropped. If the image provides less than 3 channels, the non-existent + // channels of the pixel will be filled with 0. Channels are returned in RGB order (that is, the first element + // of the tuple corresponds to the Red channel, whereas the last element is the Blue channel). + std::tuple<uint8_t, uint8_t, uint8_t> GetPixelAs3Channels(unsigned int x, unsigned int y) const; + + void Resize(unsigned int newWidth, unsigned int newHeight); + void Write(WriteFormat format, const char* filePath) const; + +private: + static unsigned int GetSingleElementSizeInBytes() + { + return sizeof(decltype(std::declval<InferenceTestImage>().m_Data[0])); + } + + std::vector<uint8_t> m_Data; + unsigned int m_Width; + unsigned int m_Height; + unsigned int m_NumChannels; +}; + +// Common names used to identify a channel in a pixel +enum class ImageChannel +{ + R, + G, + B +}; + +// Channel layouts handled by the test framework +enum class ImageChannelLayout +{ + Rgb, + Bgr +}; + +// Reads the contents of an inference test image as 3-channel pixels whose channel values have been normalized (scaled) +// and now lie in the range [0,1]. Channel data is stored according to the ArmNN layout (CHW). The order in which +// channels appear in the resulting vector is defined by the provided layout. +std::vector<float> GetImageDataInArmNnLayoutAsNormalizedFloats(ImageChannelLayout layout, + const InferenceTestImage& image); + +// Reads the contents of an inference test image as 3-channel pixels whose value is the result of subtracting the mean +// from the values in the original image. Channel data is stored according to the ArmNN layout (CHW). The order in +// which channels appear in the resulting vector is defined by the provided layout. The order of the channels of the +// provided mean should also match the given layout. +std::vector<float> GetImageDataInArmNnLayoutAsFloatsSubtractingMean(ImageChannelLayout layout, + const InferenceTestImage& image, + const std::array<float, 3>& mean); diff --git a/tests/MnistDatabase.cpp b/tests/MnistDatabase.cpp new file mode 100644 index 0000000000..5c10b0c2b4 --- /dev/null +++ b/tests/MnistDatabase.cpp @@ -0,0 +1,105 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "MnistDatabase.hpp" + +#include <boost/numeric/conversion/cast.hpp> +#include <boost/log/trivial.hpp> +#include <boost/assert.hpp> +#include <fstream> +#include <vector> + +constexpr int g_kMnistImageByteSize = 28 * 28; + +void EndianSwap(unsigned int &x) +{ + x = (x >> 24) | ((x << 8) & 0x00FF0000) | ((x >> 8) & 0x0000FF00) | (x << 24); +} + +MnistDatabase::MnistDatabase(const std::string& binaryFileDirectory, bool scaleValues) + : m_BinaryDirectory(binaryFileDirectory) + , m_ScaleValues(scaleValues) +{ +} + +std::unique_ptr<MnistDatabase::TTestCaseData> MnistDatabase::GetTestCaseData(unsigned int testCaseId) +{ + std::vector<unsigned char> I(g_kMnistImageByteSize); + unsigned int label = 0; + + std::string imagePath = m_BinaryDirectory + std::string("t10k-images.idx3-ubyte"); + std::string labelPath = m_BinaryDirectory + std::string("t10k-labels.idx1-ubyte"); + + std::ifstream imageStream(imagePath, std::ios::binary); + std::ifstream labelStream(labelPath, std::ios::binary); + + if (!imageStream.is_open()) + { + BOOST_LOG_TRIVIAL(fatal) << "Failed to load " << imagePath; + return nullptr; + } + if (!labelStream.is_open()) + { + BOOST_LOG_TRIVIAL(fatal) << "Failed to load " << imagePath; + return nullptr; + } + + unsigned int magic, num, row, col; + + // check the files have the correct header + imageStream.read(reinterpret_cast<char*>(&magic), sizeof(magic)); + if (magic != 0x03080000) + { + BOOST_LOG_TRIVIAL(fatal) << "Failed to read " << imagePath; + return nullptr; + } + labelStream.read(reinterpret_cast<char*>(&magic), sizeof(magic)); + if (magic != 0x01080000) + { + BOOST_LOG_TRIVIAL(fatal) << "Failed to read " << labelPath; + return nullptr; + } + + // Endian swap image and label file - All the integers in the files are stored in MSB first(high endian) format, + // hence need to flip the bytes of the header if using it on Intel processors or low-endian machines + labelStream.read(reinterpret_cast<char*>(&num), sizeof(num)); + imageStream.read(reinterpret_cast<char*>(&num), sizeof(num)); + EndianSwap(num); + imageStream.read(reinterpret_cast<char*>(&row), sizeof(row)); + EndianSwap(row); + imageStream.read(reinterpret_cast<char*>(&col), sizeof(col)); + EndianSwap(col); + + // read image and label into memory + imageStream.seekg(testCaseId * g_kMnistImageByteSize, std::ios_base::cur); + imageStream.read(reinterpret_cast<char*>(&I[0]), g_kMnistImageByteSize); + labelStream.seekg(testCaseId, std::ios_base::cur); + labelStream.read(reinterpret_cast<char*>(&label), 1); + + if (!imageStream.good()) + { + BOOST_LOG_TRIVIAL(fatal) << "Failed to read " << imagePath; + return nullptr; + } + if (!labelStream.good()) + { + BOOST_LOG_TRIVIAL(fatal) << "Failed to read " << labelPath; + return nullptr; + } + + std::vector<float> inputImageData; + inputImageData.resize(g_kMnistImageByteSize); + + for (unsigned int i = 0; i < col * row; ++i) + { + inputImageData[i] = boost::numeric_cast<float>(I[i]); + + if(m_ScaleValues) + { + inputImageData[i] /= 255.0f; + } + } + + return std::make_unique<TTestCaseData>(label, std::move(inputImageData)); +} diff --git a/tests/MnistDatabase.hpp b/tests/MnistDatabase.hpp new file mode 100644 index 0000000000..281b708589 --- /dev/null +++ b/tests/MnistDatabase.hpp @@ -0,0 +1,23 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "ClassifierTestCaseData.hpp" + +#include <string> +#include <memory> + +class MnistDatabase +{ +public: + using TTestCaseData = ClassifierTestCaseData<float>; + + explicit MnistDatabase(const std::string& binaryFileDirectory, bool scaleValues = false); + std::unique_ptr<TTestCaseData> GetTestCaseData(unsigned int testCaseId); + +private: + std::string m_BinaryDirectory; + bool m_ScaleValues; +};
\ No newline at end of file diff --git a/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp new file mode 100644 index 0000000000..3c75ed7f24 --- /dev/null +++ b/tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp @@ -0,0 +1,196 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include <iostream> +#include <chrono> +#include <vector> +#include <array> +#include <boost/log/trivial.hpp> + +#include "armnn/ArmNN.hpp" +#include "armnn/Utils.hpp" +#include "armnn/INetwork.hpp" +#include "armnnCaffeParser/ICaffeParser.hpp" +#include "../Cifar10Database.hpp" +#include "../InferenceTest.hpp" +#include "../InferenceModel.hpp" + +using namespace std; +using namespace std::chrono; +using namespace armnn::test; + +int main(int argc, char* argv[]) +{ +#ifdef NDEBUG + armnn::LogSeverity level = armnn::LogSeverity::Info; +#else + armnn::LogSeverity level = armnn::LogSeverity::Debug; +#endif + + try + { + // Configure logging for both the ARMNN library and this test program + armnn::ConfigureLogging(true, true, level); + armnnUtils::ConfigureLogging(boost::log::core::get().get(), true, true, level); + + namespace po = boost::program_options; + + armnn::Compute computeDevice; + std::string modelDir; + std::string dataDir; + + po::options_description desc("Options"); + try + { + // Add generic options needed for all inference tests + desc.add_options() + ("help", "Display help messages") + ("model-dir,m", po::value<std::string>(&modelDir)->required(), + "Path to directory containing the Cifar10 model file") + ("compute,c", po::value<armnn::Compute>(&computeDevice)->default_value(armnn::Compute::CpuAcc), + "Which device to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc") + ("data-dir,d", po::value<std::string>(&dataDir)->required(), + "Path to directory containing the Cifar10 test data"); + } + catch (const std::exception& e) + { + // Coverity points out that default_value(...) can throw a bad_lexical_cast, + // and that desc.add_options() can throw boost::io::too_few_args. + // They really won't in any of these cases. + BOOST_ASSERT_MSG(false, "Caught unexpected exception"); + std::cerr << "Fatal internal error: " << e.what() << std::endl; + return 1; + } + + po::variables_map vm; + + try + { + po::store(po::parse_command_line(argc, argv, desc), vm); + + if (vm.count("help")) + { + std::cout << desc << std::endl; + return 1; + } + + po::notify(vm); + } + catch (po::error& e) + { + std::cerr << e.what() << std::endl << std::endl; + std::cerr << desc << std::endl; + return 1; + } + + if (!ValidateDirectory(modelDir)) + { + return 1; + } + string modelPath = modelDir + "cifar10_full_iter_60000.caffemodel"; + + // Create runtime + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(computeDevice)); + + // Load networks + armnn::Status status; + struct Net + { + Net(armnn::NetworkId netId, + const std::pair<armnn::LayerBindingId, armnn::TensorInfo>& in, + const std::pair<armnn::LayerBindingId, armnn::TensorInfo>& out) + : m_Network(netId) + , m_InputBindingInfo(in) + , m_OutputBindingInfo(out) + {} + + armnn::NetworkId m_Network; + std::pair<armnn::LayerBindingId, armnn::TensorInfo> m_InputBindingInfo; + std::pair<armnn::LayerBindingId, armnn::TensorInfo> m_OutputBindingInfo; + }; + std::vector<Net> networks; + + armnnCaffeParser::ICaffeParserPtr parser(armnnCaffeParser::ICaffeParser::Create()); + + const int networksCount = 4; + for (int i = 0; i < networksCount; ++i) + { + // Create a network from a file on disk + armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(modelPath.c_str(), {}, { "prob" }); + + // optimize the network + armnn::IOptimizedNetworkPtr optimizedNet(nullptr, nullptr); + try + { + optimizedNet = armnn::Optimize(*network, runtime->GetDeviceSpec()); + } + catch (armnn::Exception& e) + { + std::stringstream message; + message << "armnn::Exception ("<<e.what()<<") caught from optimize."; + BOOST_LOG_TRIVIAL(fatal) << message.str(); + return 1; + } + + // Load the network into the runtime + armnn::NetworkId networkId; + status = runtime->LoadNetwork(networkId, std::move(optimizedNet)); + if (status == armnn::Status::Failure) + { + BOOST_LOG_TRIVIAL(fatal) << "armnn::IRuntime: Failed to load network"; + return 1; + } + + networks.emplace_back(networkId, + parser->GetNetworkInputBindingInfo("data"), + parser->GetNetworkOutputBindingInfo("prob")); + } + + // Load a test case and test inference + if (!ValidateDirectory(dataDir)) + { + return 1; + } + Cifar10Database cifar10(dataDir); + + for (unsigned int i = 0; i < 3; ++i) + { + // Load test case data (including image data) + std::unique_ptr<Cifar10Database::TTestCaseData> testCaseData = cifar10.GetTestCaseData(i); + + // Test inference + std::vector<std::array<float, 10>> outputs(networksCount); + + for (unsigned int k = 0; k < networksCount; ++k) + { + status = runtime->EnqueueWorkload(networks[k].m_Network, + MakeInputTensors(networks[k].m_InputBindingInfo, testCaseData->m_InputImage), + MakeOutputTensors(networks[k].m_OutputBindingInfo, outputs[k])); + if (status == armnn::Status::Failure) + { + BOOST_LOG_TRIVIAL(fatal) << "armnn::IRuntime: Failed to enqueue workload"; + return 1; + } + } + + // Compare outputs + for (unsigned int k = 1; k < networksCount; ++k) + { + if (!std::equal(outputs[0].begin(), outputs[0].end(), outputs[k].begin(), outputs[k].end())) + { + BOOST_LOG_TRIVIAL(error) << "Multiple networks inference failed!"; + return 1; + } + } + } + + BOOST_LOG_TRIVIAL(info) << "Multiple networks inference ran successfully!"; + return 0; + } + catch (armnn::Exception const& e) + { + BOOST_LOG_TRIVIAL(fatal) <<"Armnn Error: "<< e.what(); + return 1; + } +}
\ No newline at end of file diff --git a/tests/TfCifar10-Armnn/Validation.txt b/tests/TfCifar10-Armnn/Validation.txt new file mode 100644 index 0000000000..a7b59465eb --- /dev/null +++ b/tests/TfCifar10-Armnn/Validation.txt @@ -0,0 +1,1000 @@ +3 +8 +8 +8 +6 +8 +5 +6 +3 +8 +0 +9 +5 +7 +9 +8 +5 +7 +8 +6 +7 +0 +8 +9 +4 +3 +3 +0 +9 +6 +6 +5 +8 +3 +9 +3 +7 +9 +9 +5 +0 +6 +7 +3 +0 +9 +3 +8 +7 +2 +9 +8 +5 +5 +8 +8 +7 +5 +5 +3 +7 +5 +2 +3 +6 +7 +8 +0 +3 +7 +0 +3 +8 +8 +0 +2 +0 +8 +5 +8 +8 +0 +1 +7 +3 +0 +3 +3 +8 +9 +0 +2 +8 +6 +7 +3 +6 +0 +0 +7 +8 +5 +6 +3 +1 +1 +3 +6 +8 +7 +5 +0 +2 +3 +0 +3 +0 +3 +7 +5 +8 +0 +1 +2 +8 +8 +8 +3 +6 +0 +4 +1 +8 +9 +1 +0 +9 +4 +2 +8 +3 +5 +6 +5 +8 +0 +6 +5 +5 +5 +8 +9 +5 +0 +0 +5 +0 +9 +5 +4 +0 +0 +0 +6 +0 +0 +8 +8 +5 +8 +9 +0 +8 +8 +9 +9 +3 +7 +5 +0 +0 +5 +2 +8 +0 +8 +5 +3 +3 +8 +5 +8 +0 +1 +7 +3 +8 +8 +7 +8 +5 +0 +8 +0 +1 +3 +8 +5 +7 +8 +7 +0 +5 +8 +8 +0 +7 +9 +8 +2 +7 +5 +8 +5 +5 +9 +8 +0 +3 +6 +5 +1 +7 +8 +8 +0 +4 +0 +5 +3 +1 +1 +8 +3 +0 +8 +1 +8 +2 +0 +5 +5 +9 +9 +2 +8 +3 +0 +8 +9 +8 +8 +3 +3 +0 +8 +8 +4 +7 +0 +0 +3 +6 +3 +8 +0 +0 +3 +2 +5 +9 +0 +6 +1 +0 +9 +8 +8 +7 +9 +8 +2 +6 +9 +3 +0 +6 +0 +0 +6 +6 +3 +3 +8 +8 +8 +8 +3 +1 +0 +8 +6 +0 +0 +8 +0 +7 +7 +5 +5 +3 +3 +2 +0 +5 +0 +7 +7 +3 +6 +1 +9 +3 +6 +6 +9 +3 +8 +0 +7 +0 +6 +2 +5 +8 +5 +7 +6 +8 +9 +9 +1 +8 +2 +3 +7 +5 +2 +8 +0 +9 +5 +8 +8 +9 +4 +0 +5 +8 +0 +0 +7 +9 +3 +2 +7 +3 +7 +8 +6 +6 +9 +0 +8 +5 +0 +7 +3 +5 +5 +1 +2 +6 +2 +3 +6 +2 +3 +0 +8 +9 +8 +7 +8 +8 +4 +0 +8 +8 +3 +5 +8 +3 +8 +1 +9 +0 +5 +5 +7 +4 +7 +8 +0 +0 +9 +3 +7 +0 +6 +3 +3 +8 +7 +3 +7 +8 +5 +3 +8 +1 +3 +9 +8 +8 +7 +3 +0 +0 +0 +2 +9 +7 +0 +8 +3 +4 +5 +3 +8 +5 +6 +8 +7 +3 +8 +4 +3 +7 +8 +5 +7 +8 +8 +3 +7 +4 +0 +5 +4 +3 +6 +0 +8 +5 +8 +9 +9 +8 +0 +0 +0 +0 +1 +8 +8 +0 +5 +2 +0 +4 +0 +5 +2 +9 +4 +7 +9 +0 +4 +5 +6 +8 +9 +5 +5 +8 +9 +3 +8 +5 +7 +0 +7 +0 +5 +0 +0 +0 +6 +8 +8 +9 +5 +6 +3 +6 +3 +9 +8 +1 +7 +0 +7 +5 +9 +0 +6 +5 +5 +3 +3 +8 +3 +9 +8 +6 +4 +3 +2 +0 +7 +6 +0 +2 +3 +9 +5 +8 +0 +6 +7 +8 +3 +6 +8 +8 +8 +7 +5 +4 +0 +8 +4 +0 +8 +3 +5 +8 +9 +6 +9 +2 +3 +0 +0 +7 +8 +8 +3 +8 +5 +0 +2 +1 +6 +3 +4 +3 +9 +6 +9 +8 +8 +5 +8 +6 +3 +2 +1 +7 +7 +1 +2 +7 +9 +9 +4 +4 +0 +8 +3 +2 +8 +7 +0 +8 +3 +0 +3 +3 +8 +0 +7 +9 +1 +8 +0 +4 +5 +3 +9 +3 +0 +8 +0 +1 +5 +4 +1 +8 +0 +7 +6 +3 +0 +9 +0 +8 +2 +6 +3 +2 +3 +0 +0 +3 +8 +0 +3 +9 +6 +8 +0 +9 +2 +8 +2 +3 +0 +3 +2 +2 +7 +8 +3 +8 +0 +7 +5 +7 +0 +4 +8 +7 +4 +8 +3 +8 +8 +6 +0 +8 +7 +4 +3 +3 +8 +4 +8 +7 +8 +8 +9 +8 +8 +1 +3 +3 +5 +5 +0 +7 +9 +8 +0 +8 +4 +1 +3 +5 +7 +8 +7 +8 +7 +4 +6 +2 +5 +8 +0 +8 +1 +2 +0 +6 +8 +2 +1 +3 +5 +6 +0 +1 +2 +0 +8 +3 +0 +5 +0 +6 +8 +0 +2 +7 +6 +0 +6 +9 +1 +7 +8 +7 +0 +3 +9 +7 +8 +0 +0 +3 +3 +7 +5 +4 +8 +8 +8 +7 +1 +2 +7 +4 +4 +8 +4 +7 +7 +3 +2 +7 +2 +0 +8 +8 +5 +8 +0 +8 +2 +0 +8 +7 +5 +0 +8 +5 +0 +0 +8 +2 +2 +2 +8 +9 +2 +7 +2 +7 +0 +7 +2 +1 +0 +0 +0 +8 +4 +7 +9 +8 +0 +0 +7 +7 +0 +7 +8 +4 +4 +3 +5 +0 +1 +3 +7 +0 +1 +8 +1 +4 +2 +3 +8 +4 +5 +0 +7 +8 +8 +3 +0 +8 +8 +8 +8 +8 +4 +3 +6 +7 +3 +1 +8 +3 +7 +7 +5 +5 +6 +6 +5 +8 +8 +1 +6 +8 +8 +3 +3 +3 +2 +0 +1 +8 +8 +8 +0 +0 +9 +9 +3 +3 +5 +8 +3 +0 +0 +4 +2 +3 +3 +7 +3 +0 +5 +8 +8 +9 +8 +5 +4 +8 +3 +0 +8 +7 +8 +3 +9 +2 +8 +4 +7 +8 +3 +7 +8 +8 +8 +8 +3 +6 +3 +3 +8 +1 +9 +9 +4 +6 +8 +0 +0 +0 +8 +8 +9 +2 +8 +8 +8 +7 +8 +3 +1 +7 +0 +1 +5 +8 +3 +3 +3 +8 +9 +3 +8 diff --git a/tests/TfCifar10-Armnn/cifar10_tf.prototxt b/tests/TfCifar10-Armnn/cifar10_tf.prototxt new file mode 100644 index 0000000000..9abf454e1a --- /dev/null +++ b/tests/TfCifar10-Armnn/cifar10_tf.prototxt @@ -0,0 +1,915 @@ +node { + name: "data" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "shape" + value { + shape { + dim { + size: 1 + } + dim { + size: 32 + } + dim { + size: 32 + } + dim { + size: 3 + } + } + } + } +} +node { + name: "conv1/weights" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 5 + } + dim { + size: 5 + } + dim { + size: 3 + } + dim { + size: 32 + } + } + tensor_content: "2\033\304\274\307\254<\275\016\354\253=\2520\002=\330\017\005\276\033\203\021\275\300AF=%\310\004\276ft\206\275\250\374;\275\253\301\214\2754\274\275\274TS\370\27487p=`\200\337:h\014\351\275\031\255D\275\375\2461\275\010\210\246\274_\330\266\274\373\373\222=\265\025\242\274\333(d\275\002\255\222\275k\210\221\274=\203\234\275=\301\264=t5\206=\003\023\033> \303\312<d\335/\275#\274w\275\005\301H;\004\320\370\274\372\207\325<\263\211\253=B\201\n\275*\305$=\351\311.>~\261#=\307\2303\275;\221\244\274\206t\221;\024[\n\275\2724\301\274\260\002\"=\371\035\022\275M\001e<\263\302\200\275:\224`\275\304K\247\274\247\352u<\177\266\277=\255~\220<\237W\001\276\317\3314\275\027E\002<W\271\335\275\240Z\230=\371\013\265=\346\264\207\275K\342\305=f\221\247=.\233\037\274\317%b\275\337\234M<5l\364;\2249\216=\250\260\311<\022A\335;\000\324C=\327\'\244=\203\3634\275\237+\373\274\261\261\032<\325P\210\274q\024\362\274a\355<=\324\200\234; h\336=\241\233T<\367\301\205\275\177:\264<\257\211i=:\213\237\275k\000\232=\245\031\233\275B27\275\004A{\271\260\177\034\276\230f\322=\311g\222=\3379C\275\327\030\317<~q\344\273\017\317]\275b\231\006\276\274\373\n\274\223\330\032>M\0214\275\202\375\376\275\237\221\254\275n!\023\276/\270\022\276\213\214\362\274I\247\330<\300\337\237\275\206\304\205;\217l\300=\357\021\234\273\004-\335=\224_\316\275\304\254\313\275m|t\275\037\220\221\275\3250\311\275\251\356\007>\272\253\206<CS><T}K=\373\3166<=\360b\274\010\373\335=\010U=\274\245\254>>\337\326\020=m\327\260\275\331O\221\275\355P\362\275g2\267\273\202\201F=\277)v\275%\320\020\275\303\210\246<\264k\251\275]\026W=\275\320\215;\340\016G=\200j\004;~;x\273\346\221e=\234\350<\274\220\302\225\275 \202\371<\335\316\256\275\337sl\275Y\366\264\275\243\025\'\275\025-\316=\202\003\262=\271\371\342\274\203\264R=]\255\020\274\344\215o\274\307m\324=\034\033\';5\246\333\275\267\264\225=\345\346\306=1{r\274\257L\014\276F\236:=\023)\317\2739\373<\275\244Zw<l\270\330;\343\325d\276\007Z\326=S;\237<\253\235a=\022\341\373\274\365\340\270\274\273Y\351=\330\005\345<[\243\303\275\224\266\007>\356\267\226\2751\'\204\275i\315C\275\352i\270<\234I\007\275\320\307\030>\210<\213<\002\253\274=\232%\212<\376\266\326\274\201$\372=g\2334<+\311\215\275\307\236\241;F\223\221\275<\202\036\275\230\253\351\275\3530\003:\223\0034>Z\210\267\2756\016\000\276R\177\315\275\027!c=\036A\026\276\274\253\213<\324d\223=g\025\355\275\267\016\240<\372\003\211\275\326}G\275\236\0362>\013U\241\275\010\245\306\275x\0161\275\024\352A>f\277\257\275q\223\261=\t\377\005\274\214\214\252=2J\341=%\315\254\275\220\t\305\274I\243\226=w*\226\275\340\215=>\216\315\017<#9?\275\330\036\212\275|\303\271\275e\345m\273$\244a=\3162\272\275\234\360&\275\233p\245<W\337\020=W\213\005=\230W\\=\350\333\325=\202\311\014\275\027\006{<2%%\276Ag$\275j\337\232\275\021J\365;t\305\200\2750\267\364\274\375y\016>\340\347\007\2758o7=,\243r=\031x\251=^\031\017=\376\365\276\275\350\nA\274\301d\255=\201\243d\275V\232\315\275\\\353\364<\210\265\024>\001\275\\\272n\255\355\275\227\224a=\003\'\026\275]\317\303\275\201\260\265<?\223\267<\tI\243\275\003\354\241=g\332\225=\300_\306=\317\202\257\275F\333J\274\232\003\324\274\320Q\356<6p\006\276\245l\304=\221\363\206\275\370[|\275\376\0342>\310\200\367<\023\354\014\275\242\262\341=\031\310\371=3X\326=ns\007\275\227i\300<n\247\327=*p\027\275\316\324U\275\235\324g\275\276\357\227\275\252\335\346\274\321\317\335\275O\214#<)\264.>\327\314-<\273\306r\275 T\225\275o\340\231=\377\233\300\275w\345\250<*>\332=5\355\r\276\304\265\004\274q_\216=\217i \275[d\027>6P\223:u\237\204\274\272O\325<W\035U<\253\372\277\275cHo=[\2511\275\324\365\036<=KC<\336\204\273=\247K\271=q\270\214=\211\202\215\275\257\177\316=\336\360d<\231\263\213\275\303\'\202\275N0\213\275D\375\032<c\267+=Q\266\020=%\t\022\272\243\221(=\325\260\302<\251\300==u(:=\245\036\016>\273\234\265\2753\303G<\032?\223<\234\363\315\274\271@d\275\006_J\274\351~N<\275\337U=Z\273L\274\314$b\275\344u\370<\264dK\274\215\216\270\273.f\240\275\177\267\237=\310\250\350=>\303\202=\315#\177\275\2241\220\275\003\003 <H\260\323=\204\354\221\273zi\313\275u\361\213=\207=\251\275U7\361;*#\206=\035tX=\263D\215\274{\342\220=\337\036\002=B\270\302=\372\234\000\276\263\0001<\351\301y=\373\254\177=\265\243\376\275\235\277\217<}\025?=\232s*\274q\317\371\272\345\220Z;0\237\212\274\013\261\010<t\320\253<\035\260\305\274\320\200\377=Q%\005>\250n\244=\032\302^\275\352\235n\274dp\252\275\352\034\252\275\307\177\203\274`l\263\275w\021&\275A\242\341=\305\225\362;\277vV\275\327\311\267\274m\221x<N\361\213\275q\221\356\273\017\217s=\030\017\023\275#&K\275I\373!\274\243\'C\275\027\020z=\352Y\217=.5\036:\375\247D=\246\370\337\275\211\311o\275G\r\030\274R\211\304\273\'\006%<#b\306\274\022\250\260\271\235C\250<\3676\312=\nq\245\274\023b\211<\305\211\243\274\366I\366\274\207\212\372\274h\376\031\275\376`5\275\240\301U<<fe=\212\346\030\274\243\344j=\206\000\235\274P\3144=E\237\036=\355,\361=\270\342#\275n\203\007=a:,<\027\367<\275\350\275\221;\227\352\257\274n\031\226<p\372\252=d\353\221\275\235]B\275\337\317\317\274\323?\233\2747\036\265\274\206\203+\275\320\204+\275\267(\231=s\322\337=|T\263\274^=\252\274\241\2429\275\312J\213=S1\366<\326WU\275%\310<<\035[\300\275\220Zh=S\375d=\344\206\034=\375u\235<ghX=\263\032\323\273\3456?=\311\270\213\275!\016<=\312\2314\275\013\322\370<Y4\216\275\251\246Y\275\303\231\225=:\266X;.;\274\275>1\220:g\322\'\274\271\354E\275ln\361\274\320\376\326\274c\301$\275|\004c=&F\r>\332\'\024\275U\005\230<\244\216\007\276\350j\035\274_\014\215<\335\242\234=\335\276\035;\222\262}<\251 \346=\267\267\265\275-}\213\275\227s\031\276\263\307(\276\001\024W\275,\340\217\275<\351\334\275,\037\206\273\272\377\324\275\235\366\007=\341~\200\275\247;\001\276\275\273\020>\023\367\316\275t\350><\334b\230\273\032\217$>\244\247\221\275\t\231$\275cu\034=\3276\025\274+N\033\276\376\366\315=\226w8>\000+\025>\244Ah=Z\201F\275\362F\021\275\212\244#>\265\201 \274\314{=\274\253\252\021>t\033\032\2752wL=\024\004\230\275\240\314\323;\351\223M\274\242\226\201\275\014\354>\275\337\223\325:\037+\220\275\351\224\350<|X+\275\034\177\367<\321}\001>A\234\251\275\222\253]=b;\371<\240\320\001>\313\313\010\275M\035\242\275\365\303:=\002\3364=6\003=\276\244\211\234=\374\3525>\202\264\265\275\354\342\325=\325\340\003>\032A\030=\333\016\252=\355\2141=`\371\235<S\026\323=\354\373\326;\003\017\200=\354+X\276\007\273\265=\232\332\351\273`r\244\275\360\036\324\275\332\3769<\264/\323\275\005\235\204=\260\024\200=e4\375=}\343;>O\233\304\275\033\210p=`\246C=r\000\217\275\334\032\003=\212O\032\275\016b\356=\035\253\271<\007\242i\276/v\252=\246\314\">:\010g\275\265\376\022=\204\204y\2754^\"\275g4J=/\020\014=\211\215(=\226V\037\273\001\212\344\2756u\337\275\3170\r=\2266Q\276SVF=\316\223\377\275\330\236\361\275\334\314\002\276\007\246t>(\355\371\274\361\253X=\352\256\331\275J~y>\357\274\217\275\254\301i\276\245\232\262\275W\367\370=L\336\021\275\177\271\007\273\217\324T>d]\026;\r\317\325\275\272\303\027>\225j\205=\375W9>\212\212R=#~\333\275Z}\225\275\371\345\024>\347D\242<\032,\325\274`\357\311\274\352\247s\275\363\032\016=^P%<\223L\025;@@\274=S\371\352\275&\346U\275\322\344\336\275\305\326l>4`\310\274\re\276\275\001\336#=\376\356~>\261Pc\275\016\303A\276S9\001\275G\255\215=\025\375R=\211\327\227\273\nR,>Drm\271\343\320\342\275z\033\366=\243<s=1\010\001\276\231\216\213=\352\245->\302\365\201<u.\253=\251\233\243=\257\001\246\273P\347r\273\327\300\224\274\364\204\220=\031\360\001\276\221\334\342=\325\035\355=E\r\211\275L\304\007\276\300\224\377\275\375\363p>\255>P=\351\233\r\275\330W\003>\210\304|>\017\"\204\275bl \276CW\216<\207\031f\275\033\331\353=\234O\331<$\307\201>62F<gK\372\2759\025\354=\321\320\242=Y\031\224\275\014\367\035;vt\363\275G\225E\275X\315\217=gj\017=s\326\276=\372\256E\276\265\300\351\275\257\257\005\276\354I\240>\007dG\276\246\217s=\220M\203\275\326h\003\276$v\211\275\267\314\216\276\326\271\210\275\376\000\022>\264\013N\275\253\302{>\240\222\275<\232w\260\275\231\235\216\275\\{U<\331\t\006\275Y32=\000r\035\275\373\014R\276\0276^\273,\371\'>,\231\210\275\330\343.>b\000\222<Rs\234\275K\352\254\275!\320G>\n\277+<\3704s\273\214\325X\276\023\355\223\275Bu\222<i\214w>\207\231\t\274\332b\305=\033\030N\275\311\273\224\275\021\354\206\275<\372\253\276?\261E\275\222\030\r\276\276\257\371;\340[\210>\240VD=\000\027\027\276\261S\276\274p\266n\274\343\303\203=G\004\244=\031s(\276Z\037B\276BD\314:\204M\017>\332\345\212\275h\271\362\275f\217\371<\344Yb>L1\201<\022;\340=b\n\230=.\204\234\274\263*C\276\355$\001\274\017\376\203=\303\321?>/ \322=\374f\346=z>\004<VM \276\235\234\310\275\312^b\276\353\204[=\351\276\362\275\253\031\215=)I\211>m\347 <pE\300\275\376B*=\037\3238\275jP\350=\245\206\316=\241a\246\275\352C\030\276\271<;=\337\265\005>\327\257\027\275o\"^\275\"\203f\2759_\003\276\3000%\275\013-C=}\000\r=Q\215\002>\335No=\305O\265\2758\352\341\275c}[\275W\331\t\276\355\200\027\275C\277\251;aA\244\274\346\325P=\036}I>\246\"\207\275\003\2146>\220fO=\010\273\032>\366f\255=)+\306>\243\000\257\275\340r\036\275}g-\275\234\322\342\274\235\360\350\275G\304w>s\t\021>\360k\025>eA\353\275|\373\260=\205\357\237<\250?\277\275_m\314\275Y\3319>\331\030\007<\200X\206<\272~V=\356\374\177\275\265\375\323<f\267\275\275$V\260:%\236C\274u\213\217<6\331\037\274\001\002M=!\374/>\204\324?\275a\310\303\275\354\310\034\275\026\003%>-^\365=po\247>]e=\275\233\205\331\274\001e\305<\"TU;\320cV\276\340\363o>z0!>\316\354\337=\347t\t\276\352\272\241\275\270\342\232<\361\2425>+\351F\273X\273\273=\361\341{=\306\214R\275\236\337\017=\362\2057<yG\226=\331\2560\275J\210\314=\312\365\340\274\240jK=g\244\205\275?\222\r==\233E>\305\366y=@\020\017\2762\365+\275W{B>\361\"c=W\353\250>,\344\260<\204t\022\274\235\035|=\230\240\220<wj+\276q\237\203>\364]M>\301\027\271=\355 \373\275\025r\036\274\266K\230\275D\204\024\276\023i\"\275\265\327`<n\225\024\275\302\262\233=\346\022\311=nO\231\275\305;R\275\267{=\275\002k\310\275\257Bh\275<\255\311=\266\321\310=A\331\246<\247\275\206\275G\364r\275\321\330\341=\335w\005>\235\366\276:\333\343\036=\025+=\275\220\020\200\275\253N\273\275h\356\331\274\314\217)\275[:\205\274\205\021\213\275P0\266=\257\037\314=\223\371y\275\237P%<\234\307\255\274\367\321a\275\265\372\255\275\224\375\t>o\027o\275\017O\270\273v\243\265=\370\234\210\275i\304,=\335\0025\275\336\351r\274\363\376\307\274\351\225\013>q\375\215=e\232~=3g\030\275\246[L\275 \332\005\275\344\367\r\275\317\t\231\274>\235\234=\022\355b\275\241q,\275\262\010]\275\366\3009\274\222\272\233\274\272 \225\274S\341\344\275\346\366\016>\303\242\260=\003\346\247\275\371k\243\274\365\3006\275\005\322\367=\322\030`<@\342\220=N\206\023\274\014G\240\275k\265\234=c\217\3479EeM=\374\001?=\354\276-=\210f\245\275\007P\362=X2\006=\036\003\240=\025>\254\275n\2649={n\345\275po\343\275\303K\350;\3001m\272\261\212\302\275\227\273\227<\314\237\247<Q7K\274ZV(\275\200\313\360\2743D\371\275-\311\033>kp\265=\014\017\261\275\275\246\306<\3409\005\276\303\234w\275\177V\215\274b\346\2708\022-\235<\254\307/\275^\200\337=\025E\222=S\035\333\275\354\3143\274\204\301\262\275O8p<\375\023\252<\373&\275\275\214\213.>\275\355\375\275\033\237\374<\340\330\003\27685\n\276y\016\372\275{\025\205\275\247\010\313=\361y\026=\006\214/>\003\217\325\275\375\014m\275r\256(>9\227N<\330_\034\276\365\216W\275\235#!>\206\244\301=^\024|<\221\220G\275386\274\221\030\252=(\032\224;h\377$\275\340\362\020>\303;\027=\232\235\001=C\363\026:y\353\361\274cc\225=7\347#<,\363\032\275\274\307\025>te\215\275H\263#=\002\226\351;\253\244A=X\301\034\276\233\224\020\275G+\010>\013(A=\325n\372=4x\256\275E\225\245\275\031o\367=D\260\252=2\357/\276b\2525\275\014\031#>\0229\224\275\004I\224=`A\r>\241\321\032=\2416b=\253[R=\304\024\200=i\260\264=\'\342\221=\027\006\213=\010!\245\275\017\344\323;\254\265\221=z\010\031\275\224z\351\275\253\224\017>;\211\006\276~\021\273=\026K\026>^H\032>[5\361\275\235\332\202\275n\375\260=-\264\372<\204?<\275\3024\225\274\325\321)\275\241\247g>\237\017I=B\026i\276qB\353\274K\311\021>\236\222\253\274$\336\203<U\266_\275H\337\t\275l\034\213<\002\033 =V))\275LK(<X8\377=\226\372\346\275\304\205\245> u\357\275P\347\221=\033R\332\275\211E]\275P\240\253<\267\260\225>\267\315\004\275\351\363e\275\263W\352\275\362\3776\276\004S\365<l\261\217=\327y\312\274\016\021L=p\030\233\275/\363\260\272\375\335l\275\014\227\t\2730\010\006\276b\236>\275\301.\250=\000\r\365=\300:\231<\027)\366\275\300\034X\275\017\261\375=\334\270\276<r\275\211\2750\214\257< \367\247=]\241(=c\360\210>H(u\275z\272\000>\025/\346\2750\363w\274_8\255<3\242\244>\374\320l\274\322k\333\274\013\312\t=\362\267T\276P\341a={d\014>P>Z;\353\325\016<_fO\273N\256\371;S\207\r\276\220\275\356;\244\236\364\275\370\374F\275?1\244=\350\345\333\275\344\031M=\026\221<>D\r\025<\324\025\266={\373\212=-\212H=\250\021\037=\022c\357=m\202\323=\276\204u>\271\371\301\273\373\343\010>d{\252\275\020\333\276\275\267\332\375\272\004\251\221>>\320\224=\242\r\301=Q\002\375=\266J]\276H\023\274<2\206\372=\330k\314<\323\2229\275\211\237\202=\362M(=\nF*\2755\270\315\273\032|\025\276&XY\275\007\200\375=ZG5\275yh_:\254\331\002\276\324\367h\275!\006\255<\317\205\016=c{\033\275o>\227\276\226W\025>\3633\340\275-\002\255\275\236\352\330\275e\250\224\275\203\255 \276\326m\006;\326FP\2766\224\274\276\3367i\275\343R\237<xv\247\274\nny\276+\356u=\334\221\313\276I*\326\274\205\'\226\275\000cW\275DQ\225=^o\206\276kIz\276\253\032\340\272<\377\023\272#\230\220\274\"\350\347=y\036\365;\361\236\257\275\301\221\207\275\275N\">\375\201@<~\005\271\275\312\237\224\276\255<\267=\261?\037=:r\024\276\264C\201\275\026\024u\274\023z$\276\013\315\254<\325/]\276\002\240\307\276\225\263\n\275\330\306\224\275-\222F\274\332|\214\276\346\263\326=5\313\312\276\370U\371;OB\201\275UB*=\320y\367=-N\276\276\2670l\276.\245n<\271\037\313\274~\233\230\274\025\302\303\275E7\355<,\211\203>e\260!<\322\221\266=\326ca=Y\337\233<F\211~\276\251\030\022>\256f\333=\363\003\257\275\270\224\271:X\334\267\274(M\251\275\021\365N\275\216\275q\276\253\265\240\276W\272\216=$v\217<](\004=C\322\216\276\343\022q=\010~\257\276z\212Q=\212\354P\272\"\235\270=\320\225\022>\271\331\234\276\312\r`\276\\\2628=\310\230 \275\261\034\013=\223\215\311\274\240\277<\275\355\016\372\275\247\273@\275r\272\210<\025\224k=\2562\031=H\261\334<\032\374\371=\013\260\001\276\261\343Y\276\366r\254\275\217\017\350\275U\346\"\276\301F\351=\036\362)\276\2611\203>R\025g\275\212\354\261=^\035\267=\223\225<\276\006\021N=&\231\202\274\364\275U\275\312\004\006\276~9P\275<\271\303\273\247\267\363<C\313\257>\032\245\006>\312\322\n=Y\361\001\276\022\202\265<\260\342J<\315\274\344\275\377\237\275\275n\315\023>\212\241\273<R\211\344\274 \245\312<\337\372\200=I]\314;\277\374U\276yV_\275xz\244\275\022c \276\231\340\301=e\232B\276}\366\205>\t\027\031\275b\230c\275~\266a\275\377\346f\276b\036\311=\271\3174\275\266\307\224\274\323#\231\275\027)\321<nt2=9\272\003\275Cr\252>\227v\'>\020\016\214\274N\331\023\276\273\246\227\275\210\030D<\245$U>+7\3529\0073\217=\021\207i=H\313i<q\321$=O)\002>\252\033\215=v\331\345\275\256PA<\314\005\367\275\373\370\206\275\221\277a=0 `\276@\335{>l|\200=4ny\275B\223\261\275\024db\276Z\217\'=\017\242P\274\231r\026=x\325\277<\212\357\210=\352\334]=\337\343\333\274\201\257\243>\360MM>/\253N\275l\013\370\275\314)\204<\274\352\212\275\255\251\n\276\234M\000\275\342\216\351;\312\315\026\274a<\021=k\205\317=\265\235y=\351\017\201\275fJ\316<\336\016\033\275As\221\272_\226\200\275\037\000\377=\340u\317\274\237\246\301\275i\002\200\275`H\257=\3327\017>\320w\255\275\336`2\274\017\246\">~\027;\275\324\363\350\275(\216\340\274\202\226\320\274/\314\315=\235\261\242\275\355\321\220=8 3=\305.\324\275o\000A\275\334h\307\274\006n\206\275\344I\352\275X\361\330=\315\322\'\275/\027V\274\203\246\257=\270z2\273\rv\320<Z\201)=\342l!\275M-B<FI\330\274\347\230\273=\035\027\271\274\357\262D\275\264\356a\275\272f\003\273\242\201)\275\224\223\024\276p\311?=nP->I\t\326\274\316;*\275\250\236\001<\214\337\335;T\241\367=fw\374\275:\002\001>\210\027\270<\274\220\005\276\262\023\346\274h\212_\275;\233 >S\016\232\274\357Pw=\033V\202\274\256\031\026\274\347-\254=\330du=\355\214$=Q\256\023>\000t\004<\344d\236\275\024Y\302<c\333\233=\r\331\t\2747\212\343\275\245<\331<V\337\232\2754\333\016\276%\024\033\276;\017b\274D\337\022>\340t\n=\251\037\213=\240\033\227<\362\210@\2743\326\255=\221\2551\276r\257\007>\316I\037;:\225\364\275\001\317T=\235\003\377\275\324\241n\275\211h\010\275~L\232\275\261\250\320<~\354\325\275\006Q\345=X\327D=\372M\374\275?s\265=\267\205,>6\305\301;N\035\333=?\363\200<\335\330\230=\350\364?\275\014\213\325\274\013]\007\276\363d\375\275\026\275\202<\033k\225\273\341}\266\275v\246\200=\002\332\324=\314v\254\275j\222\257\275\353\336\323\274\201g\\\273\023c\300\275\0236\027\276\205\210\305=\320J\370<\205\370;<T\316!\275\242\331k=\206o\214\275h\342\377<{\350K\275o\224\010>\216dd\274\327$!<\243r\326<\256\006%=\007S\251=\267t\310=\025\234\025=u\317\214<\337\003\344\274G\300K\274\026\311\224=\363\323\000=\326\007\266\273`\210p<\202\246\305\275\254,D=#\013\243=s,\277\275\254\201\n\276-;\224\275\270\243\227=\264\032\347\275\233\016\350\275\352\343\267=\257\347\320\274I\210\274=K^\267=\322n\204=\004\001\006\275\236a\215=N\360\214=\344\250\233=\222\351\002\2742\003K=\317\245\241\273\t\031\r\275\230\014\223=\235v\361;&\276\251\273E\377-=\220o\253\275u\0303=y\2308>\034\004\034>\364\312\251\272\224\347\222\274\210\241\"\276\374\025\014\274)\212=\275\307R\000\275\371\230\260\275\010\224%\274r\n\020=N\315\036\276\304\315\312\275)0\216=\232\210t<\316\010\351<\352A\341\274\t\004\306<\032^\366\275>!\214<\223\202\273\275fr1=xM4=0N\007\276\375v\260:\236\362M>n\324k\275V\r\263=bx\264=\331E\213>j\024H>\034\032r\275$\036\000\276\270^\252\275q\340F=\251\370\201=H\025\214>^\233e<\326O\244\275\263\317\227\275\271\257\024\275+\372t\276\357\246\252\273\270Gr\275\276\027\035\276`\311\222=\374\003B=E\233\307;E8\337\275\225(\374</\367\361\275\\\361\265<\247\271^\275/\354\366<\260I\342\274q\317\357;C\022\177\275EM\001=X\204\177<\344M\222=p\312\240=\244\375j>\256yK>i\326!\275\371Y\207=\316`\204<\346;G=7W\273=\006$\233>S6z<\3071t\275\032\3176\275\306x\204\275@j\217\276\3713\242;\376\250C\275\037\242\n\276\376\321q=\374\205_\275\305we=\302\370\016>\237C\027=\333\255\255\2759\222]=\006g\273=,6\335<\031\317\206\274yG\234=\350\232\240\274\272\3272\275I\254V\272\204\305r<doU=h\320i>J\2566>4\237;=5\320?>i\216\341=9\222\022\273\203J\206=\257#r>r\255\031\273\245Wl\275\312K\302<n\344\277\274\201\036_\276\211\345\312\274*r\230\275W\225\034\276\256U\303=\2166\207\274\177?3;G\326\261\275Z\221\315\272\024#\010\276~\t\262<\264\345\342\275]SS\276\302\033\362<[.\335\275\345\005c\276V\312\\>\205<\022\276\235SY;G\300\251=\035\253]>\347\227m\276\026\202\\\275\276\270\276\275f\007\310<q\307\313=k\224\273<\350+f=[8!\274h\204S\276g6\023\275\020\352C=\355f\253=\234\2746\276\212\360\240;\357#!\276\333X\204;~(*=\340\245\374;\203\021\273\275Rm\253<\264E\347\275\331\010\241<\232T\261\275^D[\276\361aS\275\274\242\326<k\251}\276)\205\031=\354\365\223\275GEn\274\355\206L=\276^9>| \213\276\354\204\026\275\210\"\021=\312\267\023\275\316\201\303=\332\363\226=Q\276\247=&\315\357;\336\347\017\276#\257\216<\342\273o=\316\211L=\341\020/\276\224\211\362<\243@)\276$\t\013\273\226\254$\275\344K#=\343\214J>;\213\370<\260\223\352\275\301\037-=Z$\206=b\tM\276\211?\002\275\366~\314=\215\326\034\276B=\017\275\223\350\341\2756\322u\274\2740>=[\310+>\036IO\276\261\306|=Ii)>w\364<\274E\035\375<U\251;=M\016\232=\365,\327<)?\336\274<\032\212=\377E\274=\307\332\241=\000\333.\276\324\352_=l\035C\276\220NE=\341\3637<c\032\020\275N\322\320\275k\025\001\274\360\374\261\275.\305f=\353\354H\275\202\210\321\274\216\010)=\267\311\331\275\240]\007>\251\3101>\244\330\300\274=\251\250\2750\232\315=\344l@=\235\274?>\254o\026\275\343\331)\275UP\377=\244x\340=\035\355\255\274\330\250\205\276\253\236t\275G\t\035\276lB\267\274]5\354\274\251\262t>\023\222u>%il=\tI\372\275\364\322\212\275\266\271\036\275\3352v<9\346\351\275)\373U\273;\271\221\275\315W\033=\204\272\027\275n\200\242\274\305\270#\275\301\341{<\251|\372=\235?\335<\343\007\211;\250\242\250\275\254\351i=\225\274\032\274?G5>\351\277\360\2749\374G<\005\324\221\275\036\016\324=5\000\371<\tN\207\276^\204\374\274\264x\217\275N\221\351<s\247\371\273\010\001\201>Z\327\201>\225,\305=\027\204\034\276\304\345\251\275\212\004\321\274Y\346\304<\350\342\'>V0\351<\305\224\265\275\300\351I=\362\376{=\263X\036;\\c@\274\303f\241=F\036\\>a\242\026\275\234\350\230\275\325\237\005\275\031\005\243=\035\372\034\275\001r1>\020{\203=n\212\177=\203\255\363\275\260w5=x\371%\273\271\354x\276A!\006<\247\002\006=\344\034\207=Q6\374;/\005r>1\300p>\010G\000>\357T;\276\271\232b\275Q\364M=\215\274W\275\360\370\347\275{5\315;\353{\240\275@*\211<\305\357\314\274=\241\333=\004\211b=9\2026\275-\220\024\273\334\220\000>\303md=}E\350\275\375\317\227=\3136\301\275J2\223\275\335\304\261\274h\017e\272\026\202\027>\215\215h=\277\326\212\275\013a\347;-\363C\275\002\327\004\275\213c\221\274\235\"\207\275w`\267\273w\314\205\275\177\210:=\325\254\233\275G\203\236\275;\035\267\275N\177\342\274\2508\203\275Ge\200\275\020\254\237\275\225c\224\273m\020k\274\260\250\371=\324\021\364\274?v\t=\251\032m\274\003\330\305;\316\365^=\246?\217\275\346\263O=ya\010\276\355T\355\274\237\342\344\274(\215\024=A\004!\275\300\017\230=+\t\265\273\364yZ=\000V\345\274\rG\252<JU@<\010\023t\275@\376[=\230\225\256\275\371\025\274=\223G\252\275\335Q\333\275\265\003\306;\343Q\024\275\003\007\005>\002\251J\274\010\360\216\275IY\211\273\277\260\035=\361\356\371=?\306%:\31150=\366\355\273=\262\340\006\275\026\312Z\275\024\'@\274\274\350\246=\036\032\362\275\310u\237\275A\311I=%\214|\274[\031\023\276\325\2411=\315\320\033\275\263Ki=\201\331\245<\322n\317=\021\213\275<\377R\207\275\003H\240<\rW\r\276\\\324\257=\342N\315\275\234k\257\275W7\233=\351\347\325\275\300u8\275w\261\225\274>\233\323<\322\354\002=<s\371\275\'\022G<^Q\225<\325\251\222\275`\023P=[\245:>\344j{\275u.\200=\342f\301<\244v\017\276\275\336/\274Z\214\021\275j\266\267\275\262\022;\275\376\342\211\273\224\201\n<KhM\275\304\345\361=B\332\323<y\260p\274\212\203\027;\333\027\253\275\022\226\277\2748\271V\2750\370\247\275\'\2579=!\304\003\272\020C5\275*I\354<@y\206=\242m\241\274M\332\242<\371\005:\275\206\244e=v\350\250\274\360o\260<\271J\222\274\356s\230\274]*\267<C\247s=P\014\265<\r\025\037\276\3364#\271\030|\235\274\265\000\016=\303\2363<\234c\n=U\305\010=\t\222\222<\262\033\232=\345\350#=p@;\275cH\244\275\352C\006\276,\331\360<\351\312a\275\327;.\275\254<\n=\351@\230;\363\361\256=\330\273\236<\025p\365<,\005\234<wI\240;Kok=Vr\263<\276\303\025\275$\244{=J\277\003=T\311\007\276O\275,\274\253%\376\274\312\005A=\230\017\322\275\244\255\345\274\277b\325<m<\215=\000\221\251={\000|<\243\350\223<\302\303\027\275\357\224\r<\247\026\025\275\202\030\334\273\037\277.\275\307\302\000\276v\032\273<\2213\274\275N\373\344\274\347\354\255<\271\014\315:e\003\025=\206\313\247;v\222\373<v\r\311<\022\200\253<\316\003\375\275\033V\r=\371\200\241<\010\222\305\275K\225?\275r\376C>\301\352\325\275\204\327\266=%\217^=\215\371Q\2753\007\232=\262\017~\275\375.\356\275=\3330\274t\014,<;\317\007=>\220\253\275sh\237=}\303\376\275w\337\350\274v\360\034=u\270T=\366!\273\273\'^w<x\362)\275\362i\"=\\\316\344;$#4\274\213\227;\275W\343\227=\323\200\017\275\264*a<\350A@\275\202[Q<\274\002\306\273\313^\3338J4\273\275L\2319\275\233\2761\275T\216z=b\312\372<3D\230\275l\310\222=\321\361P\275@\"\007=L\334a\274\003{\220=\372\227\216=Ba;\275\360\327H=\354\263\210\275\031A\000\275_\"\013\275S\366\303<\224n\203\274\252]\264<\217\263\200\274Q+\304<U X\274\031\273\252=>X>=\22260=E\226J;`O\241\274~\322\260=\356G\201:\276\265\236\274K\302\213=w\362\231\273\264\254\034\276\325x\303\275F\326\276\274q\351\233=\243\025\305\274C\247\207=\316\003\271<\021\370\320=r\275\253<\212\372\264<l\365d=\346\370\n\276K\214l<\340U\243\274\033\314\255<[\225\242\272fC\n=\221;\342\274\330\014P\274\334\305\013\275>\214|=|_\256\274\235\330\030=-\273\t\275=\2443=\315h\025=d\205J<\233\202\031\276,\0148\275]r\000=\315\327\242\275a\322\030=@{9>3\302\356\274g\202\256=\356}\323;\226\214\227=0f\304\275\221\276\200\275\224\030\321\275Rk\252=\264\033\326\273\225\200@\275\376\3251>|\212D=\260\024\003\276\332s\312\274\207\237\333=@ \037>K\273\224\275q\364\252\273\337g\210\274\256y\250;\035f\004;W\346[\274\326-\343\274L\345}=x\036\275\274\236\205\244;M\267\224\275<_\233\275\016\345f\273\222\026d<\346{\207<\200\0172\2757\306`<2\2709=\036\221\200\274]\236:=\r*\000\2763\022t\275\224\0016=N\370/\275\2444i=\355\014\315;jdR>\253k\t=\326\",\275\030\0218\274\303\227\253=7\315\r>\331\201\247\275\267Q\347;6\323T\274\314b\223\272\273\2359<n\252Z=\350\264\313=q\265\305<\361\334\324\273j\2113\275\030\371\202=d\324\304\275\372%:\274\373 \255=\205\251\000>\231E\025\276\355^\211\275\365\346\331\272\276\355Z=;O\274=\203-\257\275\365\024\341<g\200\035>(\311\237\275?\213\202;\026\210\364\273+\213\031>\372\202\r=\371\212\250=\332\n\005=\003k\357=\363&\r>\232\325\244\275\225\241&<\030*\031\275\330iG=\267\363X</\273>\274\t\366\364\274D\237\230<0\340m=)\260\337<\2013\315\275HXs\275\313\356d=\014r\232\275\223\320\364<\300\363\032>>n\177=<\275\376<\341&/=\371Z\020>\362\207\263=!;:\275X\021\255\275\347\330#>?U\204\274\236\346\276\275\205_\224\275\211\230\310\273\3751\336\274\004\353i\273\216q\023=\27598<\270\021\344=,\037\243\274\024Y\213\275e1\317\274\222\264\212\275\036\275<;\213g\\\275W\274]=\324\330Y\272O\2751<\323\026&\275\340\001b\2756\355J<5\344\344;y\272\017<\251\033\021\275%\255\225=(\334X<\215\364\250<\027\253\247=>Z\225=\357\331E\275z\317J<S\232_\275$\367\023=fp\037\275Z\313{\275DN\t\272\005U_=\241U3<\216\273\007=\256\312?=m;\365=\2769\360;*.\235\275\350\340\367\274?\374n<\200\332\037=~\331\207=\231\r\365<n\315\353:\024x5\275\252Fg=m69\275\216\027\354\272/@\203=!\341\322=\377\003\013\276h@\026\275\201\223\n=\037\t\275=\250\030\304=\210E\222=R* =B^\253=Tq\024\276\003bI\274`36\275\265\305I\275>\372\352<\350Q\024>\246\343:=\310\236g=^\307\313\273\331\260\002>\360c(;\242\355\276\275\235\205\226;\273\253\'=\234\007\000\275u\335\206\275\305@\325<\277|h<\335!\271\273/\354}\275\202\217\231;D\300\217=#\272\221\274\232\005\324\275\304\210\t>\222\3520=.\243\020\275J\222e<\355[]=,\220\037\275x\n\"\275\342!s\274\367\366\021>t\3555\275\340I\254\275Qn\026\275_\356\217\274\252\352R8\0108\320;\324\360\351\274Zn\361\2759uS\274\256\233\253<\373!\302\275\301-w\275ky\261\275\201\036-\275\272\002\364\273\225\211\354<\361L\022\275\327j\242\274\332\201j\274P\361\014=}~\200:\177\016\344<\023\007\272\275\324\237\177\274\240\375(=\"s\226\274\271L\205<\326\323#\272\341\035\306;\000\333j\275G\263\030=\372\261\001\275G\273\360<\360B\377\274\305=\266\273}\305 \274\256\021\334<\t\340\007<\300$9\275\344\033\211\274*\324\360\274\265\221D=\036\r\207\275a\033\222\275!\031==\210\303\353\274\212\232v=b\261\036=0\333?\274\374\020\247\275\216KA=gwt=\312E\236\274\216\034\007=\350\2431\275\206\354\300\275\370^\214\275x\223\003=\357\217\203==\224\270;~\036\022\275\227\350b<\334k0<\252\333\034\2765\224\220\272#\216U\2752W\313<e\212\275<\312B\252=\206f\332<\336H\r\275\376\037\257\2750\225A\275\'\014!;o\370\214\275\263\r\025\275\177e\202=*\307\301\275\264\2412\274G\3025=" + } + } + } +} +node { + name: "conv1/weights/read" + op: "Identity" + input: "conv1/weights" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@conv1/weights" + } + } + } +} +node { + name: "conv1/Conv2D" + op: "Conv2D" + input: "data" + input: "conv1/weights/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "use_cudnn_on_gpu" + value { + b: true + } + } +} +node { + name: "conv1/biases" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 32 + } + } + tensor_content: "\244_\214\274\003!\265\271\027c\204\274\025\314C\274Vm\315\273MD\0338B\311H;\240\214a\274\177y\354\273F&\304\274\344k\272;\007\217\217\273&\302N\274\246?M\273^\337\301\274+*\240\274\363\214\035\274\251>q\274_\014\300\274\210\277\320\273\t\027\261\274f\211\306\273j[\312\273s.8\274a\365\253\273(T\230;\325]\217<\240\326\252\273[\027#\274[\360g\273\326\326%<J\3175\274" + } + } + } +} +node { + name: "conv1/biases/read" + op: "Identity" + input: "conv1/biases" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@conv1/biases" + } + } + } +} +node { + name: "conv1/BiasAdd" + op: "BiasAdd" + input: "conv1/Conv2D" + input: "conv1/biases/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "pool1" + op: "MaxPool" + input: "conv1/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } + attr { + key: "ksize" + value { + list { + i: 1 + i: 3 + i: 3 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 2 + i: 2 + i: 1 + } + } + } +} +node { + name: "relu1" + op: "Relu" + input: "pool1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "norm1" + op: "LRN" + input: "relu1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "alpha" + value { + f: 1.6666666852e-05 + } + } + attr { + key: "beta" + value { + f: 0.75 + } + } + attr { + key: "bias" + value { + f: 1.0 + } + } + attr { + key: "depth_radius" + value { + i: 1 + } + } +} +node { + name: "conv2/weights" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 5 + } + dim { + size: 5 + } + dim { + size: 32 + } + dim { + size: 32 + } + } + tensor_content: "u:\013\274\340\253\230<\024zq<tQ\014\275~/\215\273-\0240;\366\271\232<\203)\274\274\004\'\225\275\351\234*<\353\021c\274 \310\371<\363\377\013=\372\324\367\274\355q\216<\275\343\245<\226\312\223\273\372\3067\275\211\347\031\27583W\275}\003E\275\320\2262=p\\[\273\300r7=\233E:\275\224\251C\274\216\206\360\273M\236\014;\347cm=P\352\223\275\311H\276\274\211`\035\273$\335\371\2734\365\361\274<%\204\2740\347\r=Eq\274<\347y\340\273$\327]\275<\254\313\274O\310\224;h\330\303\273\217z\020<\360U\032\275\367\355\323<k\005\013\275~\362\344;\n\350N<\215S#=\310\224\n<\342\364\333\274\013\"\241=\357\231\014\275v\004\202<\026K.\273|\237\035\274D \024=i\204C\274\016\217\312\274\366\361=<\367A\337\274\2534[<\345k\031<\375\3445\275\\\2409=`\035\030\273k\350\016<\361*\316\273z\023K;ou\312\274\271\317\326\274\0203\217<N\201\277\273\262\2540=%\242\221<,\346\027\275\035\321\234\2749\304\247:\027\224\263;2O\005=Q\315R\274&P\234;\354\214\332\271\202|\310<\252\313\017\275\231\247N\274\264\236\236\274\036\234\304<-- \274\306\354\014\273\033\025W\274!\274\202\274.L\372<\377W\n\274\232v/\275;\001\200;7\035e<\220kF\275xY\210\271\263\270\365<\361a\327\274\342\223\021=\202\316\242; \311.=\0064\027= \020p\2731\240\240\275\213Y\277\274\356\2245\274\264\376\013\275\311\330[\275\316;\360<a\310\232\274m\351\365\274\"]\243\274\345l\275;#\233\262\274\353\217\016\275\007\370\023=\317;\301\272>\322\224=Bo|\275g\344\263\274\267M\316<\005V\235\274\226l\003\273\001\033%\275h-^\275\372\304\377<\314>O<\033\360\210<)-\313<s\264\324\274#\021\035\273o\200\254\274\022PH\274\302\321\353\274\0051g\274b\237\002=\360\216l:&\201;<\212w\274<\276\013\037=Sm\037<\004\244\257<5\215\016\275\374\363\336\273\2009U\274\025\031\\\274\345\243\030<\260\203\375<\004\243\350<\005\276\274\2746\0306\274\322uH;U6\242\273\013\301\257<\252\360\375\273\202\250\335;\016\332\234\274\005\3508<\207\367\271\2711\277\035\275\234\364\233\273.\3372\275\nD4;lO>\275\267\350\336\274\321\227\3429\246\253\354\272\355\373Z\275\'S\016=\200!\266\274&\247\233<v\312p<\272\265\336<\366\365\346<\263K[:\010&w<\021\307-\275\303\277 \275\014GN=\206\263\303\274\001\0074<\336\231O\274C&A\275\330E\364<W\215\006\274\020c\350;oM\214=\177o\331<G\237h\275D\247C\274\226\002\227\274\026\212\355;n$\220<\006\036\321\274\262\305\210\273\350-\264<\274M\350<<\'\221\274\t\270\034\274\377\264\221=\352~c\274U\027\215;2^=\275\207\315J\274\345<*<\241n\032\275*\265N;\007\241\370\274G^\251\272\303{x\274L\275\027<\246\375R=\210uR\275\354\330\313\274\rr`\274\366\256\241\274\262\247\303\274\'\"O\275\373\373\344:\026\334n\274\373\021\275<~\323\254<_d\331<\000\371\261=j=\364\270O\010\231<\270\212-\275&\336\221<\355\357\261\273\356\321\323\274\341\331\254\275z\242\245\272\373\353@\275\320dt<\t\267\355<\362\2348\271L\266n=\271=\031\275\301\002\216<\336\266\024=\243\270\334:,r\203\275\3603U\273>t\235\274\274#\322<\356\230c\274\345#}<\023\263,<(\305\014\2758\030-:\323}\214\274\344H\363<9k3<}D\'\275\310\334\273<\033\035g<\311\355c<\306\364\306\274\000s\327;\233\220\254<\350\333\001\275\204\370\312\273\365\343\313\273\300\355\026\274\313\273\235\273W\031I<\337\362\217\274\364<5=\233\261\006;\253J\036\275\322\032\313<\032\033\212\275\224+\263<\016\243\007\275E3\304<d~\000=\232\274\350\274I9x\274\245\"\025\275\333\305+<Q\223\222<\020\274\241\274\245\231\306\273\305\347A\274\371\301\254<\306\0149<\371\303\206;\3768%<p\005\271<\221\206\305\274\212\230\n\274\205\215u<\273\352\254\274\251i^\273\244b\223\274x\335\022\275:u\225\274\026\332\217\273\030\217\320<\221E\232\274\332\017\232=\306\360\203\274\370K\330<\267\245\n\275\335\345f\272\003\213\313\274+}\031\274\'\037\203\274Hz\214<\260\314\023\274)\\\023<\370LJ;\ry\350\273\026\301\030\273;\274+\274\207\023\366\274\355\3418\275\221\343n\275ESj=\177\327d<\371\302\246=\232\277Y\275\034O\236\273\347\366b=\217\022\373;\021\326v\275\005u\032\275\216\367\340<\366\005P<\374J\221\274\214\377\362<\301\253#<\251\246\203\273)\305/\275\226@\355<t\034r\274\262T\347<\030\024\343\274\271Z\217<\317\021<\274\210\276\t\275|\206\272<\222v\001\2759\236X<\354\213s\274\020T\024<\257\271\305\274-F\001=\334\264>\274\t>l=\361\377\331\274\242T#\273\370t\334\274D\215\237\274\'-\036=s\274r\275XD\202\275\260n\240;\264\005\211\273\215\352&\275%\223b<\264\232\215<\240\321D<\235s[\274%\234\331<\274\0239\274\337\036\332\274x\005\240\273s\334\222<\024qP\274\346\017\177\273\036\001\266\274\017\021G\274\313=\367\274\351J\030\274\242\252\347\274\221c\035\2740XN=5d\017\275\230\250\002=q}\035\275\234\026:<;\246P\275\3479\363\273\2125\324\274\017\342\033\273\033\030\203<$Ua\273l\267\216<\217\250K<\000v75\254\367#\274E\3243\274Dj\245<)\201\037;\315\245\217\274W\016\226\273\323\r\204\273\275\323a\274,\366\317<\324\235\010=\300\332I<\321\210\246\274-\222\260\274\0315,=!Y\004>\010dc\275EOu\275\225\254\017\275E\007;\274\274f\013=!@b\2754\017\222\275\323\231\037;\357+\213\274\272\005D<{\213k\274\030\206\303\273\222\3539\274K\227\357\272\236:)\275\277i*=\031_\330<\003\\\212\273\354Y\307\272\230Y3\274ec\315\274\037j\270;e%\201;\365\345\207;E\346C\274\273\216\010=\275ID<@\236\324\2744)\342<+\376\2419\ta\000\275\021\304\277\274\275*\247\274\305)\232;\215F\001<\212\177-\273.\216-=\363\254\"\275\344\315F<\361e5=7\273A\275\364\273\305\274sV\215<t5\336\273\262\r\025=\310\353y\275n\035+\274\332V\023<\340.\210<\221\211\037\274\216\265\222\273\211~\342<\306! <s~\214\274\205l?\274\306\351\202;\021\304l<\347\2557\274\032H\233<\301%P\274\317j\265\274\335\014\203\274\005&\216=t\351i\274\305\366\305<\212\207\252\274?\311\331\274\201\014:<%Y\200\273\242B\261\274,\354V=[\204\026\275\303\230\253\273\204\016%=\302\207t=\002\326F\275\304\200\027=\"\370\331\273\357\234\276\274Kh\021<\231\"\375\274\327\270\005<e$\253\274\037#,=P\371\233<K\347\337\274\3659>;i\234\003;\0232+<\312\207\324\274\321\272\t=\177A\031\275\361\2328\274\232\377\310\273$\272\261<9&\346<qU\320\274\004\351#\274\305\267\302\274A\332\275\274\270\233\204=K_\214\274\365m1=\274\330\3629F\001\013\275h\275\271<-\305\253\274\023\007;\274\257M\007\275\3261}=\017\354L\274*\343\366<\202\030\237\274\256iG<FK\036\271m\014\273<R;T\275\371\364}\273\323\206_=\032p:\275\212\233\262\273*c`\275\367\234\371\274\373\327\013\275J\233D=$I\274<\275fc=\202$\005=\256\3702\275!\016\200<ay\320<>\355-=\002\275\207<\301\256\255\273k\000\335<\247\3311\275dqM=\213o\301;L\r\025=\346\370$\270R\316\031</\270\353<d\034(<wFU\275\343\216\255;\211\201r\273l\323\333<,\032\246<f\216\232\274\343\305:\274 q\204<\322\272\230\274\300\270\370<\2150\\\275J\3503\273Z\315\022\275\251\355\245;\341%P=\t\365\225\274\242\367\327\274\352.\267\274Q@\221\274\264Ti;\343\211\002\274\240\'\271<\336\331\224\274\030\3414\273RW\233=.K\301\275\034\r\241<\247\327\206\273\211b)\275\232?1=\300\036\231\275\264\360\227\2731\376c<l%*\274\337O&\275N)O<\277\r*;\251\373\200=e] \275\344\032\343<*\310\271\274\024\344\334\273,\355\260\274\033\305#=\321^\370\273S\316\030\274\303DG\275\313E\210\275\315\327\263:xt\027\275\243\337\366\274\224\344\t\273\306\256\027\275\244\251\027\273X\240H=\260m\203\275\312\345\254:\302\014\205\274\206\366\022\275\306\324\251\274(\217\264\271\270\340\006\274f\nu;m84\275u\221\302<\253<\272;)\321\256\274~P,<\211\016\276<\013]\262\274\255\315\226<Q\020\260<\030\252\215:v\242\374;\025\233\004=\214\347W\274\353\360\316\274\2709\242<\332s\267\273~7\252<\001\007\026;;\'\312;e\274\333:\033\372\026=\0379\346<\246\204$=\t\321\234\274\202\233\r\2759\373J\274\022C\017<\264\216\\\274\'\021;\275)\006\246;\200\030\332\273\256d\353\273#\347\260\273\023+\332<D\320\227\274\006\023\021<\020X\265;\376r\332\272\232\216\216\2754\251\2517\023\240 \273<\017e\275l\317{;\304\252:\275kB\314;N\331_\275\336\027x=\020\210\027=\177\235K\274gp\301<\233\"\024\275\001\035O;d\305\017\274\366\033#\275\215\007\267\273\236u\223\275\343R\304;5\273\235<\344:\261\273\332\343\306\272\354\241\003\275\354\001/\275\306\036\037\273\311\020\325\274s\340\245\274\347\374\313\274\001\r\270;\342\323!\275\320<\211\274\001p\203\272O\356\237\273\305\350\245<\204\266?</f\004=-\370\230\274qn\274\274\244\262\340\274\020\227\277\274C\3669=\351Y8<)F\215;F\303\001<h\371\334\274\244vi;\022\005\000<\352\330\272;\320>P<+\353\240\274\253r\202\275\023q\355:\260\214$\275\262/\361<xFr\274\223^j\274D\237\207<\336\350\301<k\367R\273\232\356X\273\343\256\275\271D\250\250\274xb\333\274J\331\326<\210\267\226<ch\340\274\203\341\276<\352m\233;\235\210\317\274b\305@\274I\006H\274o14\275\267\340\034<\373\371\250<\314\010\004<=1\327<\006g\301\274\235}\275\274\261;\364\272\225\220\225\2748^\005=\247M\314\273\254\n\356\274\017`D\275\267\311\245\274\252\002\245<68\322=-\257\246\275\312\037\022\2740\ni<\265~\312;1\225\024=\n\206\003\275\024\006#=\257\254\t\275u&\t=\216\241\026\275\022Lp\272nt`;\337\257\257\275\360,q=\236\ny\275\363.\207\273\227\376\203\274\233\177S<\021\350\301=\'\352c\2759\201\013=\246\317%\275\324\237\301;\244<\246\2746\366p\274H\017\211\274H\t\235<\325s\000=\222\322\023\274a\r\356\274E\264\364;\030\272S<B\262\224<\317r\214\272\220y\300<\344\317$=\336\346m<\367d\361\273\034|?\275xV\354\274\036;\216<\006@i\272\023\3768\275\201w\342<\337\302\242\274JY!=O\254H\274\016\315t;g\342\003\275p\200\264<\350f\001<\266\3407=m?\365=P\272\277\274\'\375z\275\375]\030<S%\230\275?#[=\347\225\324\274\330\372\206\275Q\347W<aZ\203\275-\314[=/?\004=\023\367\225\275\203\005r<6\277\304;}\253n=,\256\274<\213L$=qB\235\274U\240$=|CL\275i\321\223=t\371\220\275\331\032\263\2748\031]\273\225\270\3309\207\023\'\275\264{\253\273\0165:\275\266)\033\275\023\025`=\3662p\275 \351J\273B\266j\274\360\310n<!\2255\275u\332\230<\212\306Q=lB\177<\340c\364;C\210\272<\314\317Z\274\033\213i=|(\016=\200\231\232<\024\350\245\275\005\0328\275\032\215%=o{\200\275\326\240\206=\013\026\013<\220\027\237\273V\020\312\275\214\3744<\345NB\271\177d:=\017!\252\273\327\232\316\2749\365\322\274}*\202=\374\366\237:\370\223T\273\241I\253\274z\232\2129\'\260}<\020\215\213\274\235<#<\353\206\'\274jT\252<M,#;_]\021<\002\035\273<3#\n=\205A\244\275\n4\340\274\2020K<\024O6\275a\347\226=\232\022W\274\236G\314\274\247A\215;\203\374\270\274\215\274!\275\007\340\300<\3204\253=P*,\275\332g\215\274\356\235l\274\033\024\366\273X\274\363\274\313m\017\274\245~\260<\370\334)\275\310\301\212\274?\022\252\273}\212\203\275\377\244f<\271\204\026\274\026\304\017\275\267\001\021\273\357y\201;r\344\005=\207\355\317<*\326^\274/\033\037<\210\270#\275w.\332\274\340\004\3068Y[*=\017\336\031\274\334\023\037\275h\007I<\235\nu=h\036\253\274vE\224<\031\335\002:VX\017\275\246\026E\274\340\022\237\275\014\352_\2743\213\\<\234V\344<nQ\"<u\030v<\216\303`<\343\022\214\2748\353\2539W\215\010\274\364\220\216;9\331&\274Q\035\017\271\2111;\275\036\241\361\274+\315\211\274\013h!\274$E.=\004\002\361\274\333i\004\275by$\275\213Rg\275\264\275\373<\377yM=Q)0\274Gy\240\274dG\274\272\026f\247\274E5\014\275\313\353\213<O@\261<\262@K\275b\rr\274\345\317\006\274\010\360\021\274\251\0208\274n\027\364\274\335;\300<Y\202G\275\024\036:\274\333\3428\273\213g\321\274\265\370\n\2747QT\274\377\357\260\274\363\361\n<\254\0274<j\241\'=\240\374\032\275\016\327\022=\037\343\375<\260f-<\014\360\022\275\204c\030\275gi\222<86\212\273\276u\211\273\005z><aN\306\275\306W\213\274\272AR\275\305]e;\375\204\270<\000p\263\274y\353\213<|s\024\275:\213U\273\375I\254\274\226\354!\275\000&\336<\244\366\037\274Ik9=\206\236\3309JOb\273Y\255|\275:\024\235;\232\252\002=89[=Ok\221=\345\235#\274\354gN<\236Og\2751R\263\273jT\325\274\364\226\306\274\255\tA=Cpd\274\273\323\004\275 \345\353<\3146d\275n\252C<\n\023\020\274\320\270\321\274c\274#=\"\260\345;{\330\036=s\221N=z\3331=\272\245\201\274\210\200\311<Ktk\275/f?=\372\277\247\273\263]\276\274\241\225u;\336<\241;\342@\030\273\237\305\231<\031\213h;P\252B=\016\204z;g\037\216\274\231l\225\273\317\026\244\274\272\204\226\274\244\303\213\273\036\2476;\326o\203\275q\"\363;\332\305Z<\351&\305<\332\255\216\274\336\253{\275\230\255Y<\307c\333\273z\234\243<\344\260\352<\254\214\351\274\025>r;\354\006o=R\370,\274\367\006\210\275\014m\342\274\372\203D\275\276\0330\275\346\323)=^Q\255\273g\034\215<u\305\r\274\340\221\317\274\'\371<<\020\031\261\274\322\346\241=\037\212\242\275-Z\001\275\240%l<\302\265\020\274CM\305\274]i\210\274\344\310)=\273\n\017<2t\262\274\257&\004\275\246\371\r<w\004i\274\035\271\215<\341NF<P\253\024\2741I\200<u\373\304\274\271I\270\273A\303+=\330\0138=\0130V<\373]K\273\001~\207=nR\220\274\263PN;P\022\311<\202\240\211\274\214\266\264<r-W\274\327C\264\273q3C:<\000<\274v^!\273r\037\211\274\"\031\311\274\367\026&=\214\321q<+\226\240;\331Q\305;hT6<\246o^;>\350\006\274\346\220\252<\330l\363\273\020U\032=\323\230\014=\334M\231\274\203q\220\274\356x\200\273\200\023\3449Kf^=\260\345\200\274}\245\267<\246\313\023;\037\262\016=\342\353\n\275mnX\274mY\357\273\033\0316=t~\371\272\351\3727\274\327\250,\272J\002 \2758\023\032=p|\304;\367wW\275\222A\203\274\321w\030<9\254\324\274?\2563;~\031J=$\247\333\274\210?\263=5\330\034<U\347\251<9\246b=~\377q;\366*\310\275\220L\261\2742h\376;\320\312^<\261\353j\275r\220\023=\326M\003\275\272Z\035={|\201\274j\204\204\273H\346+\275\215\342\036\275\024W\343<\023\376\216<\225 \273=\264F\206\275\2276\276\273\031\324\207<\243\262\247\275\352\375\237=\234\021,\275\230\247\215\275\362\227;=\312_\007\274Z\277\244;\003u);hV\006\275[%!<\031\207u\274\317Tu;\373L\357\274\276\316\203\274\301\207\247<\036c\321;\364\215g\273\231Q,<<D\360<rS\261<)!\322<l9\201\2753v\325;\013\246\264\273jR\316\273\317\265\246<\252\0166=\242fb<\250\270\022\274\257\261\200\274\270ew<\267\303\267\274\344\302\024=m\365\364\274<\352\000\274&\256\265;\323\257\021=+O\340;-_$\275\006\207.\274\257\331\377\272\250=F<\326\2143\275\247PD\272?S0<\370tx\274\361>\313\274D^\022<\315\014`\274o\316\206;\215R\250<l\276N<\t\303\203<1\t\212\274\002+\234<\017\261\343\274T\240j\274\200\213 =J}\212;\013\341\240<oO\206\274\350j#\275~\361S=\t\223\311\273&5!;\354\005\273<\030{\032<\212U\207\275\301\342\031\275\274\360\266\274\247\277\177;\224\220\201;\036\343\373\274\205\037\314<\340\017\334<\377\3233=\312\366\377\274b\250\n<3\020\272=\314\252\237\273ig\013\274(\031\274\274\202\017\266\2741\3478<\363\3506\275\374\257]<\356\252q\274\017{&\274kR\303\2734\220%=\2456\020;?\271S\275y\275\327\273m\220\245\274\336\232\310\274\t!\231\274Q\320\376\274\270B\252<\374N\241\274\330\220\206<\273\224\004=UP)<\330P\211=\302U\343\274T\342\362;\314\010\215\274<O~<\250j\037;\030w\260\274\000\243\202\275\010C\325\273\022\2500\275fg\220<\232\204|<\017\370w\273)\234\262=\225\023\032\275\337\345\000\272Bc*=\316\016v<\354r\\\275\031O\200\273\r\036\244\272,0\326;j\351\351:\236[\223<6\260\036;6\377O\275#\274\373;1\373\226\274\367Z\363;\372\225c<W\337a\275\245\315r<\245\307\317;\347\177J\274k7\303\274\320\344\010=B\236\333<@\004\223\274&\245\336\273\36166<\235U\300<\212\333\032\274\375\367\202<\333E>\274k\252\006=*\213{<\253A\036\275\037<\242<N\2138\275\374n\204<\034/\360\274\373\247\333<\271\276\004=`\235\356\274\362\371\037<\276\020\017\275\356 \264<\226\233p<\354\3107\275\356L(;\177+,\274\211\371\366\273{n\257<Q\315\037=\333\026<;\345\0360=\214\027H\274\032x\317<\010\224\006=\\\004\217\274x\243\232<\277\250I\275\247\222w\275\327\376/<\363\313\366;\1772\271<Fo\200\274,=\245=\351\373\010\275M\010\203=k\317\256\274\201J\230<m\025\222;\253\372\000\274\ra\212\274?\235\265;E\215\305\274G{4\274\372\313\344\273Z,\223\274bJ\246<\327\243\241\274\255\333\004\275zM\006<\301\317\313\2743\261\232:\232\026\224\273/R\230<%\374\033\275\270\313\231<I)\256=q\247\3179\005\2312\275\300\260\016\275\177\006\345;7\261\"<\016\357\270\274Z@\217<\377\275\026=\351\342\224<\322\376&\275\022\022\322\273\365:\302<\002T\365<\315\350\030\275x(\241<\232E\327\274\033\247>\275\022KI=H\245\021\275#\214\276<<\343\235\274\"Z\307<\303\177\234\273\330v\'=\304-\322;,\314\227=)/\320;\344B\n\274\006\344\377\273\361\276\225\274\306w\237=\221\021\207\275\202,\202\2750\210Z<\200\021f\273\203\302\241\275h\250\370<%\231\303;\337P\373<\225\317\220\274$\262w=\325\301\340\2745\300$=^\014\202\273\337\340&=\032(\252\274\271~\214<\025\333]\274\301W\013\274L_E\275\346\006\206\274\263*\010\275\362\276C\275\331\334_=\204L\315\274I\007\331;\344\005\361<3\005\243\274\375\362\215\275\353i\225;~\034y\274\337\255\313;\320\235B<\265\021\t\274UZ\006=\0004\355<\320\217\330<\350a\260\274\237>\005\275\333\272\314\274A\016\362<\2266@\275E\025\360<\004c\227\274d\261\260<8\363\036=+P\2669_\023\314\274Z\\\276<\345\017u\274\262\024,=\266P >\271\356\245\275R\250\260\2749\004\325\274\244\016\313\275rc\230=c\316S\275t\341\253\275a\366\256;\016\373\234\274\244Y\023<\274\014\334\274\003\000\251<\355$\304;\006\3654;\374\347\031\275\022\237>=\216\311D<\372\333\210<j\317\343\274>n\327;\226\201\335\274\016\351f;\347\327\224<Z\026K\273d\264\023<9\300$=\224\302\254<\364\336_\274\340\340?<\322~4;N\242\013\275\020Y\304\274\"\366\273\274\224\363\311;\235\245q9\027\320o\274\016\025*<<\037#\275\024qd\274\274\244\177=\267\3522\275MK\030\274h\206\032<J\254\237\273\337\325r=:E5\275\r\016\036\275|\334\003;\347.\033=V!\260\274\327\246\202;E=\307<\035C\354<\233\0205\274R\3769<\340\366\350:\036\230v<\355\270H\274\302#\016=\032\321\306\2743\217k\274\234\030j:\211\262\330=\315ZV:\364\3673<Le\276:\274\2147\275!j\235<\317c%<q\2460\275/oL=\324\260\375\274\350\211l;\210\323\005=\260\000X=\333\2377\2753\371*=[\t\337;\036w\312\274\376R#;K\352N;$\365\310<d\271\332\274\377\005\341<<v\260<I\\\317\274\363\331@=\332\304\256;y\244\r<V\360\306\274_x\361<\003\037\374\274 \366\312\274|\332\343<\002\362f=6\276\352<%\307-\275pV\223;Jm\313\274\350h\355\274*\377r=\033\000\r\2756\357\340\272\301\037\335\273Z\262=\275\200~\211<$\373g\275\366\322\344\274\'W\204\274\367\312\215=9\367\033\273\022O\313<Gg\213\2749\202\301<j\034\355\2735\340\211<\245Z\202\275\327\3268<8\'\327=%\2674\275\247\n+\275\247R\377\274\024\207\304\274\303\202\031\275PDQ=l\210\363<3\026\345<\036u#=\275\353O\275\300L\005=n\001\352\273d\340\230=\244\204Z<\2336\222\274n\203\034=\234yg\275p\232\234<\367`\001<\301\205\026<3\326U<\002Mf<O\255\022=MI\007=\177_d\275\021\241\252;v^:\274\276\016\344<\352\267\336<\300\254&\274\314\002\376\274\372]\027<Ut\313\274\333D\265<\2221\217\274q\034\267\274^:\275\274\334\371R\274k4+=\210\300\335\273T\346\214<T\264\031\275\351\267\360\272\337\251\200<Qdh\273\345e\205:\207w\271\274S\216\327;@h\310=\337\0302\274\371\000\365;\303+\327<\027LT\275o\302\333=G\021\201\275\235\000\211\273\315\305\366<\314g\004\273\273\345\213\275\326eN=V\324\234<B\022\243=h\344\013\275\373\326\n=\272\002\002\275\334\335\262=\253\004\305\274\307\377\001=\254\tU\274<\325\n=,%\221\275F\036\211\275\203\211:\275\037@^\275\367\222e\274\375\017\022\275\341%.\275\035\016\215;/\306\"=\300\'\303<\222\004\325<T\326\302\274=|\375\274\310;\335\274\'\232\255<\370U\211:\322a\346:c\345\310\274\2574\345<j\036\021<\202\321\"\275\210\250\326\272\267E\233<x\313\337\274\312\014\307;{\364\032=\366rs\273\262\356=<\336\213\024=\360C\320\272?o\236\273\0136\271<\021w\271<tsE<7\277\005\274\323\241\323\273\334\214+\273\357\010\362<\302\230\351;\0316\220<\246\205\313\274\346)\311\274\227\356\010\275\333\362\001=i$]\273\312\216\002\275\374\226!<\213\0201;nB\262<\005\220\254;\255\025\314\271\306\003\264\274t(\257<\211\304\264\272(t\252<\332\377\202\275\336\242\200\274\226\347\234<\213\237\203\275\305?\305<\027Fg\275\343M\375\273\242\266s\275t\237\256<$\310\314<|T-\273<\211A=\022\233\205\274\360\353\306\272\344\225(\270W+(\275\2127\213<\004\366\255\275\210\226\007\2738\'\260<\177\222i<[(4\274?\341\231\274\005\342\022\275\314\2564;W\262\257\274E*\252<=v\242\274\354+K<\252n\353\274\010\324\310\273\355\234\276\274%\330\347;\216\000f<\345Vd=G,\224<\363P\021\274\202\026\346\274\005\224B\274|\226t\274$\327\035=\216O\342<\217\334H;<Y\335\273\365\024\364\274\200d\361<m\316\242\273\336Q\305\274Q\037g\274\232\236\372\273\005\202z\275\210\320\002\274\217\253\314\274\221H\324<\000\237X\264Q#\263\274\371\204\275;\355\324X\272\326h\000=<\005\036\274\177Q\256<\355\373.\275\321\301\312\2747@M\273\376.\246<\361L*\275\312\320:=S\021\312\273\r\226\242\274u\212`<\021,\321\274\313 \023\275\356,\356\274\224m\035=%\025\236<\276_:=\341E\315\274\252\3109<\304\\\217;r(V\274Q\004\327;\216\252\311\274\321\215\253\274\002\026|\275\211(\034\275\007\031\323<\270\177\207=f\237\235\275x$b=\366\212\024=>c\322\2730\0202=_\262\030<\313\211V=n\360\257;~\014\226<\247\301\272\274\354$3\273\277\354\016=\245e\301\275\330<}=\357\3768\275\3014\330\273\226>\332\274\036z\317<6\'w={\305J\275N\000\001=\237\017\235\274\005\377w;P\000V\275\330\3424\275\000\304\343<\030\341u\273`\344\277<\351\317\224\274\213\344\007\275\217\3115\274\233\225\210<Y\037\211;\245\343\214<\315\013\242<a\321\000=\332a\350<\376\211\n=\237\320\\\275\025\031\016\275uWB<\317\277\220<\332\262.\275\0243\001=\010\252G\274WpK=\224\203\253\274\343f\030\274\202[;\274\365\337\355<\343]_\274\3309\004=\276\240\236=\345\305M\275Gh\232\274m\353\266;0\362\251\275\016\303C=\347\251\t\274\335\365\307\275\271f=\273\020{\216\275\204\222\225=3?\213=\240\016\247\275\217\2123\274\372\202|<J\004\322=J\374\001=,\024==l\353N\275\233\350u=\322p\n\275\261\311\236= ~\252\275Pp\250\273E/\304\274\247)\353<\205\323\014\273\303;\332\274\332\277\006\275\222~\036\275!\246\333<\263$\230\274RF\312<y\346=\275!\354[=\261\325&\274\224\334\306\274\001\314>=\024F\311:\221R\005=y\230-<l\342\271\272\331\346\236<\265\302\345<\211Z\217<\256#e\275\215\272\027\274\211\331\035=\304#\231\275rJ-=\334m\322<\363\240\036=\372&\245\275\030\276\237<\\M\364\273\200\221~=\343\337\230\274\262\243\274\273\300\1776\274\023\006\271=\210\3751\274\025\021N:g\3060\274\267\326\262\273\020\r\361;\353\316\341\273H\307j<\273\257$\275\3268s=\316P\224<\370/)<5\200\300<\005E:=w\335b\2755\230\216;s\257\235=}z\357\274\030\351\217=8\271\275\273: *\274\200\313S<\246@\306\273|\315\304\274osr\273\252\034\344=\201\233B\275\010\345\r\275\007\244\017=)\353\233\274*\0060<\255\343\3769\031\223V<\273\277\324\274\"eM\275\342\226\n<H\263%\275\017\247`\274\003\241;\275\344\376~\275P+4<t\027\037\275\322\027\223= \217\331\273\276\n\375\274\022\345\272;\026Vv;\361H\254\274\353Ei=\216\221\023=\352\233\310\273bs\326\274\232\205\t<\0071+=\326\217<\274 \024 =\240/\323;E\207\343\274\373,\315;\\\371\210\275M\341\201\274\236\254\223<\3632\305<7\210\210<\210\356\343<X>k;\2050\342\274\244\000\035\274\311R\014=3\224\267\273M\234E;\206\034B\273{\371}\275\246>\271\273(\271\376;t\312\003\275_\210\032=\234\335\270\274\"$}\274\211\245\276\274\034Y.\275\216{ <\342\365L=\202\222B\2732\2478\274v\265\032\275U\224\231\274\237\252:\275\263\300e;V\344\356<\207\255\002\275\270h~\273\rJO=\021\201\010;\234)\000<\336M\332\274\274\275\300<\207\020\020\275\364\237\254\274\037\314\264<\343\201\355\274\274\316f\272\320C.\275!j\346\274\030m\357<\243j\243;\361;\375<8n\002\275\205\350\031=\341H\311<\210\275\260\274\375\013\231\274L\351\036\275\017Y\222<\217\322\032\273\033x\337<UE\213\273\204\372\270\275mU\234\274\316\021X\2757\372!\274i\365K=\232G\005\275\037d|<)\333\340\274\013\n\374\273\315\351\222\275\344\034t\274\274\261\221<\337\037\224\274\210d#=2\2420\274\332\006\257\274\320\250\272\274~\006\331\273;\267\t=\300F\201=\211V\232=_\256U\275\302\255\302<JFO\275\236V\341<\332\201)\275\352#\264\274\032X\233<\013\034\026<\267y);+?\017=\276a;\275\230a\225<\004\022\276\274\24358\275C\277\273<\220\216\027;,_\343<h7I=A\035\201==T\331\274\237X\236<\343\334*\275P,v=T`-\273\301]\347\274\021Q\267<\037>\325\273Q\303\320\273\366$\215<\t\306_\274\370i\357<}\377E<\027\201\354\274\227\224\031\274\023>$<$}\336\274\304v\253\274\211\213\016\274>\370m\275\n\014\\<\236>><\241\000\305<\310x\214;\314\013q\275\334F\250\274\313\215\343;9#\004<\256Co;\014\202\366\274V\252\276<\232\231v=\333\223\261\273\376*\216\275gE\272\274<\222\r\275\254\n`\2740;\337<\037\326W\274x\212\311\271\334NM<\372\"\366\273\001\304\371;ra\362\274\365C\275;\225O\216\275\371i\007\275\221\036\025<\337\030l\274\200\242\226\274D#\022\275L1\n=+A/\274\276\371*\275\233x\333\274\032Z&=\321\267\347\274\272\355\320<\177\373*<L\266\223<\007\205E\274\235=\207\273#\026\333;-\3249=E)2=+\352\344<\257,\262;\"T\035=\240\370\n\274\023B\240\273*\272\234<` |\274k\247\251<\335\320\026\274\266\365\037\273\240\322\356;7O7\275\245\307T\2709R\227\274\007\366\"\274S\\2=\353\026\363<O\321/\274\036\177\026<y\374\312<U\034?<\300\205=\273\032:\323<Z\300:\274\375\377!<\r\262\361<J\372\200\273\227\0166;\325\315\250\274\234\313\025<\"]\014=y\352\252\274\240\3138=\344!\255\272\'\360P=i\265\"\274K\354k\274\352\007\344:\351\030V=\225\317S<\265\203\226\274Xc\'\273\233\210)\275\376\007@<\344\010\205\272\273\331g\275+}\322\274\222\204|\274)\365\224\274Us\214\274\260\371\017=\272\240\327<\237\177\243=\304\305Q<\340\314\265;X\237Y=V\305\302\273\325|\234\275\214\376\301<6;r<\303\037\007=\212\247#\275\316\3119=\226dD\275\307\300n=\346\037\371<\030\236\003=\333\233C\274~\306\260\273\352\326\227;\344?w<\257\214\275<6\343C\2750\274\304<\034\027\235<\265\330A\274\306\342\265:\351\'4\275\"\252\246\275\206\273.=\256\032\002\275o`\217;\037]\372\273_\373\326\2742M\246<\241\2642\274IY\263<\363\t\270\274\262\223\321\274\303T\263;\366l\333;\'d\264\274Z\316L;\352\322\030=\006\372\003=\301\002\344<\006\202\235\275\001\260\231<=\313\267;\253\356S;\024 \216<8p>=U\236\"\274*\213\010\273\017\375L\274\'\004\243<\027\376\021\275|N\211;\024]\026\275P\211\214\274w%A\273A0\031=\235\240\030<\263\212\325\274\353\232]<Z>*=\223\177><\223I\354\274?\242\347<\003U\251\273\200\264\347\274s\367o\273\032L\201\274\3674\314\274b\220\022\274\366\217\025=\200\251e;\222^\304;~x\215\274\350`\360;: \323\274.\276%<{\326\241<\010 @<\220B\257;\315\251\353\274\241G\361\274\237\352\227=_\016\223\274\302e\253\273\231?*\274K\220{;\260U$\275vLD\275\001\030^\275\335=8;\023\233\234\274\226\304\224\274\367\002\026=\254\347~<\253U)=\350\245\014\275\354T\030=\2678\255=\312\0310<\223-\242\274\202\276q:\320\036\326\273(\021\204<\2772\'\275\223\te<\363Nc<\202\232\220\274\355\245\321<\3475;=+\376\"\2750\356\\\275\'\243\013\274\230\266\232\274v\027J\274\351\006\r\274\340\026x<\"\230n<\023g\200<p\330!\274\201m\304<J\022\254;\224\373\013=b\356\306\274=\264\030;A\016`\272\004 \250;y}\205<fR\031\274\027\317<\275*\020\372\272e\3165\275%b\255<\375\303\304;\227\346\026\274O\343\335=\247\357\025\275\364\302\254\274.\3658=-9\030<3]\317\274\036 \371\273T\304\201<\275\265\217\274\034\234\023<\273\274\313<]rs\273=\201Y\275\341\313\340\273\366\"\215\274\236\227#\274\304v\002\274\236,\216\275\327\257p<\231q\301\274\367r\242\274m\332n\273v6\025=\231\214L<\250\032\264\273k\262\263;\233\267\035=:-S=\242f=\273_~\363<\210\2124\274\303\027\354<g\313\275<\234\3362\275\345~\204;E%V\274Zx~\272o\\\025;n\006\177<\374\220\325<\324 \336\274*\263\364;\323`\367\274\275d\275<I\220P<\327,Q\275\223\2172\2742q\226;\334\357J\2747\2313<fG\'=\001\345o\274\344\361\360<\026\257<\2712\242(=\205\001D=w\276\331\274H\262,=\357S\256\275\321\201\240\275C\236&<n\305\217<q\254\003<\332<\036\274A\226\204=d\367I\275\340\333w=\330\007\366\272\2503\032=\224 \343<\033\253\246<b\217\217\274\206\357\037;9H\003\275\032\244e\274\3615\023\275\337C\245\274\305\225\315<\324\006\024\275\310g\036\275EL\364<V\273$\275\317R.\275\222\372\236:\323=\000\275\306\303\351\273\230\005\330<MU\244=\245\251\352\273\305\240\270\274ZFW\275\272\207\000=\371N\252\272\366m\332\274\007W7<h\267j=\3424\032=\021w-\275\213\250\304\274\304\236W=\203P\366<\013\347\322\274\252\346W;K\000P\275\230\257\'\27570D=F\200\201\273\266\242\022=+\373\336\274\302-\r<,\006\241<\316\2467=\213\252\330\274\243\311\236=\212\331>=w\343\356\274f\007:=Zn\215\274\237\263\217=.\222X\275\030\352Q\275\377\031><Ms\257<\301p\322\275_\224V=\220:\t\274\202\226\375<%\344\"\274\r\031\204=\366x\262\274d\2706=\261\242~\274\313\245]=\013\273\352\272f,5=x5O\274\372y\363\274\223o?\275\324\024C\274\267\275\263\274\351\260\201\275\322\254\255<~w\\\275hf\200\274\235\357_=\022\322<\2759\306<\275\363\222@\275*\017%\275\251\344==d\360q<\256\301\351\274\371r\005=\363\n\342<\360\352\"=\300K<\275\263p\362\274\277RD\275\354\365\302<\252\3055<\223\021\323<#\244\272\274x\212\010=E\3041=\361\364\307<\\\037\255\274\227[\346<S\303R\275cWa=\234\316\251=N\022\230\275[X\334<f\'O\274r\036{\275\031i\372<\267\340J\275:\314\263\275\241\217<\274\365\205\244\274\205\205\2409\376t\335\274f\274A=g0\020<F/V\274\327eo\273\010\243\013=\344.%<\306\031\237<\024\377\257\274\\\205\214<Dj\020\2755ah<\247\225\270<e*\321\273JO~<\271>\253<\230C\365<\334\361X<\361\2304\274\036\341e<\265\274\007\275Ca\267\273\313\276<\274\357\002e<\261\032A;\345c\205\273\264\301\251\274\026\313\311\274\207\251M\274n\313U=g\000z\274\346\210\373\273\006\025\235<;\324\376;\355\317U=Yb\266\274\324R\354\274\203d\237\273HzL=\265;\261\274\376\255\244<\322\201\233<vo\217<\007HH;\020Z\204<\323\207I;\310\251S<\255\006\236\2749\267\014=\315\262;\274\331\336&<\027w\235\273\263\332\334=o\256\224\273\006\004F;\\_\215<\206\2514\275x8\312:W\315+\273$\027K\275=\242\007=\t\2463\275\245\307N;\311\344\257\272|\0005<\244h\237\274\365\253V<\230\323k\2733.B;\022\333\005<\364\243\306<\260\221\373<X\343\035\274wq\306<\030\021@\272\206\ts\274\342\274}=\232\320\277;u\236\365\273!\270\207\274\325\022=<\360\336\255<\037\257\333\274\377\254\r=^\2040=\014\357\371<\300$\017\275\377K\313<\320\361/\274O\217`\274\343\374\336<\307O\016\275c\257\007\275\200\201\235;\326\0048\275\205\322S<u\\W\275\247uF\275\n\033\204\2737#C=Q\'\264<\323\237\331<\001W#\275\277\204\242;\214\270\214\274\360\220 <5\036\225\275>0\350<\r\230\t>h\021#\2753:{\275EF_\273\020\325\210\274\207\213\240\272sk&=F\255\214<N\212\365;\220Q\227=s4+\275\267\223\256<@r\316\274\025F\335<5\317b<\245wY\274\345\356\324<\001\0004\275\024\225\3239\373\365\211\273(9\363\274\026u\357<.\035#=\210\215\373<K\007\242<\257r\330\2742\312F\272\032\001\376\273!\367\r=\352\317\230<\252\217\252;\365.?<#\234$<i!\277\274\245\'\250;\212\215\341<\214e\006\275n\206\336\274E\326&\274\263T\207<&\322?\274R\216\336<\354\364\037\275\254u=\273\031U\210<\021\362\372\274\246\375\234<\217A\241\274z\t\260\271L@\262=\321m\335<\3478\007\275\376\310`=`\214=\275\256$\342=\272\372w\275V\303\240\274\216\2001=u\302\310\274\364\002\247\275\371\225h=\332\345w<\324c\230=Q\003s\2744\220a<?\\\372\274#\031\345=\233-\334\274h\262L=i;\t=\340\020\224=\314\207\200\275\355PW\275\366\212v\275B\313J\275S_\250\274\030\2505\275\007\243\367\274\266\340u\275\247\241\374:\240O\230=\260\331\316<\303I6\274\237~\343\274\203\305\340\274\333\257\346<\367\374\205\273\245\373\022\274\276\322A\273\177\202\267<\363\231\261<\332\255\205\275QR\026;\016\311,<\243\351\315\274\030\0074<\3203\"=\2067k;\33410;v~\374<\325\273f<\321\351y\272]/y<\2475\343<\370B\200<\264\343!\274\212\3064\273{b\244\273H\277\240;\026\033\246:{P\020\275\242v\344\274\241\005[\274\372\360\217\275\316rX=?\240\376\274\245\370\345\2747\331*=\003zg<~\356\365<4\014\002=,=\353\274uy\036\274\364HG=\333\220P<\254\033\221<&.R\275\365\243\311\272sn\025=\234\037\210\275\307\365\266<\354G\271\274\305\3472\272\303\345\270\274\037\240\017\274B:B<\2340E\2742\276E=\326\234\225\274\210k\215<t\223\237<\232\223\037;\362\242\027\273\355\216h\275\333\272\255\273`\365\361<\211C2=\367\225\260\274*qK<k\272\236\274\341\207\036<\024bk\274\321\304]=\203\240;\273\3708\215<S\014\351\274\351\372p\273\234w\362\274\341\314\376;\'\352\005=\322\354L=gDg<\305A\364\273L2\341\274\2560\321\273\312,\001;d9\034=\321\206\235<\300\226\267;\273\315\302\274\330\343\254\274\336V\361<\023\301\203\274vnj\275\304a\335\274\242x\n\274\271\344\353\274\327\017K;\223\256$;RX\212<\263g\322\274\366\346.\274\024\346*<S&\255\274\272S\274< 7*<\177:I=\261\005V\275\344\026I\274\334\346H\274R\010 =y&><P)\376<5Q\255\273\3668D\274s4\233<\027\007\255\274\216\342\311\274L\027:\275\337\304v\273iT\374<tA\207=,\262\026\275n\262\365<\262\207\337:D\206\231\274c\232f<\035\340\204\274=\230\371\273\001\252\241\275C\030s\273\2120\312;\257\2054=\352tb\275\350\301\250=\231\241\217<`\200\023;M\2336=Zj\233=O\362\257=\t(\217<\342;\364;\033,\033<\2753\036\274\2302\205=\275=\267\275Ru\351<V\027\023\274\032l\243\2735\214\246\273\235X\004=\323\335\260<\037\240\217\275\030)\030\274D\211|<\246\251\245<\220\220 \275\177\0369\275!\201\332<\337>(=Ov\225\274\r:5\2759\320\355\274p_\334\274\226\006\242\2749\362\351<\305N\331<\265\362\255\273\345c\323<\264\334\373<\305c\222<\021I>\2745\010Q\274\231\035\330\274\310\334\002=#\002\317<\376\305}<\270\004\234\274\376\363\305<\352\2675\274U\024\257<\344,\343<gN\035<\326O\017\275\355!E=3\375I=\032\340R\275\313\257T<s/\224<y\345\256\274\257$\021<\263\311\010\274>\273\275\275\371\266\234:\212\332\251\275E\005_=\216o\006=&!\343\274\036\341s:\014?)<(h4=/\215\027=\352Bz\273\347\250\'\275\325\262\213=8U\225\275\364\222\231=(\2205<\333\246\005\273\013J\032\275q\245\352<![\372<)\037\226\273t\244\026\275\357\301\366\274\247\0016\274z\025\225<\237\301\273<\305VH\274\367\222\242=\007\372\233\274\3004!\275\003\250,=n\365C<\313^\205<y.h\273,\215\2478#]\262\274\232N\215<=G\013;K(\020\275\351\207\235<~^6=e}\255\275\367 \226\274\322\323x<\275LN=1\031]\275\000\036a;[\370\250\273 \265K=\301N\346\274n\211\204\271\267\230\020<Ew\356=\030\327\256<v\345\266<H\302\241<\305\327\357\274\365\2419<\366\347\201\274\213(\016\274K\242\371\274\014\256$=\317\023\'=\037?\234;Z\373\030;\345\233\\<\260\301/\275-\351U\274\272\332\234=n\270\020\273;\255#=b\336\313\274\264\023\207;\020\341\017\274\342v\036=\013\027\021\274w\004\246<\251\021\215=\200\207\210\274\004j\343\274\203\272\211=`zO<\313G\037\275.\201L<\270\257\377<\240\350Z<S)\222\275\207Lw\274\273\006\322\27345@\275(\263K\275\333\024\311\274^\266\356\2737\236,\275\224\374\336\274\226\317\254;;=\354\274\203\315\244\274\353\244\231<\177\330\251\274G\364\223=\3701\006=\374\022A\274\215W\254;\346E\224<\232\007\221</\357\250\273\247\3119=\366+_<\016\233\307\274\n2\265;\337\231(\275\337jG\274B\026\261<\314\234\n=\244\305\236<\2720\365<\364~Y\274\004\0168\274F\010\333\273\234i0=\22612;:p|;\267$\007<j\266\204\275\2314-<I5\325<}\007\376\274\206\373*<M\006\241\274\205\334\003\274\211\031\326\274q]\037\275\247\253<<\226by<u\240e\274\000\305*<D:\205\275\205\332\251<\024OH\275\352\375\321<\363\277n<O\223\316\273\231\372\260<!\253\216=\306\216\247<\246\343\311\274t\366y\274\000\312\263<\360\262\033\274P#\n\27579x<\022V!\273\366Zy<\325\345\034\275}\233\350\273v-^<\030\304\310\274\375\2028\273\024\242\007\275\022\334>=\026jS<\030\253\373\274F)\352\272U\036\216\274\344b]<\243\2050;+J\r=\030\273\260\274\342U\226\275s\237\355\274Yq\034\275\032\302\027\274\017\004L=\247\356\362\274T\251\203<m\003\377\274\233y\356\273]\241\306\275@\270];\207\365\240;\017\337\025\274\300\026\024=\277\347\001\275\365\224\327\274\230\007\027<{\330\261:Xm\016=\265\272V=\315\272&<J\371F\275\004\233H<\326%\312\274\330\304\250<\351\205\340\274E\257\302\274\225&\217;\3607\231<IO\020<\030\300\330<\026\242\024\275D\267\211<n\216\344\274w$]\275\312t><\306\343\212\273\ns\245<\222:B=\013m~=<\327\256\274P\004\202\273\250\022\367\274\236\372\201=W\316\006\272R\363\243\274\244e\000=\023k\242\274;\343C\274%\311,<\333\300\214\2745\333\r<\305nQ\274\320\370;\2753\021\316\274(\265\251<!\365\353\274\025\021\377\273\t\353\177\273\242\325\005\274\211=\301<=\330u<\321\023\263;\364D\260<\202\243I\275:\246W\275\357\277\234<\330\277\265\273\316\333\271\272{\363\320\274f\353\222<=\347\241<\357\3174:S\005\374\274\224}\304\274\037M\276\2746\202\213<\267w:<\2511\314\274K\356\017<\031z\207<eE\270;\347\027\375\273\252\230\277\274Q\031\305\274\350\\\334\274O\2108\275wbU\274\037\237G\274\225_\210\274\003L \275H\252\030=D;m;\004\345\233\274\313\3365<\200\211,=`P\023\275!j\207:\035\353\223\273\354>/<\365\345\303\274\377\230x\272\256\364\005\274\354\023\202<k\322&=\227\214\305<+\253\313;\370sR<\033^G\274\237^c\274+n\237\274\223\303\203\273\204\205\217;u\215\251\273\200\343\333\274x\214\220<k\247@\275\177\355\356<\273\264\263\2748\226\212\273\2344\023=kE\222;=\337b\274/z\376;v\212\351<\266s\217<\346\326\\<\315\363\246<c\265\277\272s\0147\274\255\317\212<\312\233\230\274\314\'\301<\276\374\002\275\360\345x;x1\242;\344\356\205\274r\214\004=kh\034<\252\026 =f\005\254<\375B:\274\220s\304;\334\250\352<\361\271\257<=\342\"\274\245u\207\274\254\270\237\274-.\226;\351\247\003\274\354)m\275\025C*\275D\365\302\274\240\246E<g:6\275\021N\330<\357\371\253\274B0t=$bG\274\341>\020<D\217s=\375\035S\274\352e=\275o\321/=\2576C=\033\372\t=\237!m\275\334\267\206={\023\365\274\310\t\254\272L/\001=\245Ts<\300\005\345<\0176\200<\002\n\316\274\"\342\241<\266\250\250<d|\010\275\006l\037=\007\362\207\272\226\335\035=\026\036I\275\223\357^\274\035\365|\275\177\302\032=s\3062\275Bn\017<\251\'\270<\361\277\263\274\222P\002=a\341\322\272\035+\361<U\354\352\274\231I\004\275\337\317y\274\004\243\256;\036Z\024\275\215\256#\274\3149\020=70\242;;\303\235< j\242\275 \371J<K\006\211<\356>d<tF\241\273+\231\034=\305\262<\274\261\307\217\272\272\222M\273ca):E\355\251\274kv\2329\303\014\247\274\006E\006\275\312l\273\274\\I\315<E\260\233<\362_C<\370\3370=D\265\030=7\335\023<\\Lz<\361\305\016=\201Yl\274HP.\275u\332\313<&J\222\274nk\343\274\353h\267\274\310\260\265<\320\243\254:\265~9<\350\272\213;H\033><\255\014\237\274\322\202,<\266UL<\221\033T\272\270\226\324\274L?\265\274\006\205#\275\2243I=\347)\204\274\271_\316<7\237\n\274:oe;^N\255\273l\352_\275[\203\231\275IGW\274\247w\352\274n\314\'\275\372\265\027=\253\303\200\274\315i\263;$\366\366\274\204v\346<\312\022\202=OK\260<\350\240\216\274\354\313\247;\370\364\254<\340S\352<\t;\360\274\000\365\244\273\215\255\023=Mt\254\274\207\002\013=\345\"\014=#=\210\2758\3666\2755\211\253\272-\3271\274\246%p\272\356\356\017=\262\256w=\314eG;$\316\372<t\240\334\274\346s\300<=`\213\2729\223\315;\000\332O<\325\221\252;k\315Q<\026\300\220\274:7\300<\327\215\206\274\314\353\367\274\231\365\026;q\357\n\275\177\311\036=\032={\273[\246\231\274m\213\236=\331\364\337\274|\315!\275<e\373<}Q0;\242\254\242<\\\307\252\274wo\315<\213l\323\274\300w\222;M\266\272<\024\037\013\274g\376\343\274/k$\273\206\316\207:\364 \351\274\034\322(\275\367l\217\275ys\033\273\265?\250\274\031r\007\274\207\336\276:\356\3147=0`#\273\234/\313\273\177\277\223;3\013$<M\274l=\036\213\206\272\003b\246<n\222\003\274x\027\240<\211\200\227<5\240\275\274C\025\006\274K\270\252<\362s\025\274\313\262\022=\340&\366\273L[t<\356\344+\274&\311q\273L\020\316\274\002~>\272!`\324<\344\265\316\273n\246\375;\216Z\371;\220A\001\274x\265\251<\364\3152=\314s@\274$\337/=\304u\031\274\331\3746=\317\031 ==\362\226\274l\221\001=\236\332\252\275,3\220\275v\342;\274\371\023\323<\024\331\212\273&\2314\273\030Z\r=\\\210D\275\267\224\271<\032*7\273\266\263\336<\221\324\014=\337x\335<\377h\214\273\355\354x<\323\003\356\274\035\206\210\274s@\202\275<\212\t\274\201\001\026=-8\326\274\247\332\t\275\025\003\\<\240\305\014\275\321\375\021\275\252\010\271\273\013\216\235\274\304\214*\274\215\006\201<\\o\350<\270T\215\274\272\225\002\275\245\344j\275f#\224<b\024\264\273\234\333\034\275T\021\000<\031J\332<\204\311\375<\370]\003\275\\\256\277\274\356\036/=\315\025\203<!JH\274\346\252\343\273\033P\013\275\312\224\034\275\262,\035=\312\337\304<\343\341\341<\236\037\210\274\364<\013<\346\272\001=9\352\034<hi0\275[\025y=\2043i=Sf\310\274g\354p=p\302\302\273\375\240b=\375\350\302\274\352\037\356\274\201\312\345;\005)\223;\224*\353\275X?\026=\247\302\247\273\262\006\246<\031\201j\274\010\352\002==A\226\274SB\200<\240\275\000\275IS\203=\212\232\366<_dA=\223\0354\2748?\300\274}\005\326\274^\355\270\274(\210\014\275KW[\275;\\\251<\007o\343\274\245\313\205\274\322w$=\213XN\275\320NG\275\031\022:\275\233\373=\275a\305\342<\332\244\\=!42\275\374d\210;9gE=\027\216\226<\222e*\274\nM\323\273Z\026*\274f#\200=\217\005\3429\266~,=[qS\272\327Z&<\352\205\367<\014s\006=\317$\217<\317v\275<\361\213\210\275\354\'R=\361\025\245=\\\034q\275\261\2353=\014v\367\272\241\314\006=u\'!;\246\362\034\275y\025\302\275\203$\312\273-\313\237\274\206K\252;W%\261\274\215\330\027=\237\263u< QD\274\026\311\244<9\275\265<,-;<\323(P<\320\030F\27427\312<*\212\024\275\311I\2259>\215\243<\225\214\304\273\222\343\034=\234+\201<\331Q\024=\212\305\253<B\216u\274\261\327M\272\313\344\023\275\374\017\347\273\032\214\325\274!\0005\274\230\326\313;\253Y\330\273\225!\000\274\370&F;G\n;\273\r\300\013=\331$\212\273\261\326\";\014|\026;/\341\001<Z \024=\246\230\370;\014\355\363\274\213PX\274\207\002\002=\314\243\000\275\377\014\211<\315V2<\221+f<o\333\354\273k>\300<F\210\025<|Y\220\273\024.\034\275so\023=S\027\261<\377]\263<\017\232\222\274I\364{=\236\0064\273&\3170:g\"D<UT\374\2742\225\341<\231J\323\274\244\331\016\275\355\365\240<\030R\034\275\014S\2469\241\326\020\274,\237\240\2746\225\202\273\226\303$\273:\3038;=\222J;T\253\316;\r\322&<\354\272\365<\341G\232\273\213\373\264:/\377j\272\370\336\321\274\223m`=\n\307\257<Jq\320\272=\203\014\2740+J\274\030\257\204=N6\302\274\341u\215<\247\031\241:I\232\r<_\316\341\274\017\227i<\244(&<I\005\t=:\337\226;\335\207\252\274\226:\347\274\330\271\217<\260\005o\275(R\010<h\255\016;Z\2727\275\334\003\241;N!\213:6\013\251<\025H\274<\364\230\240\275qm\265\273\330x\343\274P\177&<C\026P\275\216Y\010=\347\302\246=\016\260\022\275d\372\223\275jc\233;\200\246\003\275\377\213\355<\367u\007<\232\016\363\272\211)\207<h\004\227=\016\0014\275\244,\340:A\300G\274\355\025\275\274&$\253<dh\177\274\020\264d<0\344]\2752%8\274\321l;\2729\303><\201\220\021=\034\3166=>\262@<\374\355\232:\017K\004\274B\317\264\274\001\265\035<lt\356<\311X\203;\326\370H<\243\357\325;\317\035I<MT\306\274E\253\003;U\024 =\276\364-\275\221Y\017\274\346z\326\274\'h\n\274p\215\264\274\004\306u<\362\310\032\274\377P\210\271<>\313<\344\272\025\274\335\256\313<\021\376\241\274w\221i\274W\226\215=hi\001=\255&\025\275Ws&=\034*\032\275)\344\310=\263;_\275\240N\316\274;\213\246<l\367\'\274\022\207\203\275\334\034\036=h\254\236<Y\265M=\212\204\302\273\036\213\356;\326\252\337\274\255\326V=\271\277\036\275\204\323\022=\322\227\267=\037\277l=j\247\003\275\203\250\217\274#\347\016\275_@F\274\231a\027\275\250\006\337\274E\037S=K\324\227\275\330I\336\274\3673\033=\013\244\230<\3275\327;\2253%\274\3232\r\273\215\016\332;\242\342\223\273\375\021X;x\035;<^p\300<\234\206i<J\344\221\275\014\201\313<\'\305\254:\203\353\263\272\367\355\013\274\275\325u<\006\017O<\023\334\262\273e\265\010=\335\332\263<\201\234|\27396 <\270\340U<\315W\233<\245\350\026<\031\322\211\273\366\263\323\274\261\002\203\273\007\334K<I\021\216\274(T\232\2748\217\272;\211`p\275\303\372\253<\033\n\010\275\014\214S9\022%\004=2Ud<9i$=\265*#=\374\367\372\274\336_i\274P\366Y=l\303\222<X\362\202;\3067@\275\277Bv\273\374\310\343<EDD\275\201Kr<\237\037\212<\337\234\351\273\222oh<\354\233\356\274\221\221\006\274\361\223\346;\270\243F=\244\201\016\275\351@x:>J\030=852=\232h\366;uN\034\275z\341\321\274\037\317\271<\303\241\037=\337|\343\273/\375\351<\000!\340\273\370\325\225<\254\211\003=\332\221\004=\372\r\033\271+\261\243\273\317cC\274\256\301k<\037\027\007\275\324\327\037<g\232\342<\236Y\237<\211y]<q\234=\274\211)\204\274oS!;=\353\203<\2534\252<,\n\031\274\356\271N\267\215a\003\274\026K\264\274+v\310\273\355\022\272\273C\243]:\232\347v\274\031\277q\274-\367,;\3151c\274\271|\337;\250@\006<uJ\257\274s\034 <\177/L<\236\236)\274\246>q<A\343\262<\201\356\361<C\020\n\275,k\020<\311\030\302\274\306\376-=\026C\256;\373\204{<\365\251\307\271\231\226\360\274y\t\233<\2176g\274\362U\354\274\023\202B\275\231!9\275\330\222\325<BA<=\273>\255\274-~\t=\034\256\352;\376i\367;r[Z9J\203\233\274\347\305b\274P\204\214\275\n\216\206\274\311\277q\274\307]\373<\377\230\211\275\322)\236=\036\313\010\275\010\213\255\274\000^\345<j\304L=\2537o=&E\r=\333\241_\274$3\010<\241\356\002\274\314l~=_\200\235\275\343xT;T>\242<\037W*\274\016\252\023=8N\326\273\202E\'\274}\030g\275\324\327\032<\371;\272;7\014:<s\301x;\336\"6=vx\216;\020\234==\231\361\301\274\267\262\243\274TX5\274\311\313\024<\022L\016\275\215\035\023\273\031\013\357<W\266\324\274\271\224;\272\372e\253\272\360\373\306<\321\027l<4\326\\\274\214W\247;\241\325p=\240\306{<\343\305Y<W\240\030<\016d\r\275\222\004w\274N\262\025=\324\331o=2\016h<M\354\305\274\334\002\234<\177\351\227=\224I=\275\206X\027=\253\320\302\272\022\237~=g#\t\2757\037\205\272=f\335\275-s\254\274\022\014\365\274\335\357\252<\316e\007=\001\352\211;\242\323L<\305\305\205<\341.\335\274\245\337\002=\375\333\033\275\004(\256\274#p\201=\335\024\205\275\211\303\201=S=#\273\334l\t\274\004\340+\275\273C\016=\n\006\271<&Ua<\340\225\n:#\271\246<\021\312\223;\346\303\267\274\366\266\225\274\3127\342\273\332\241\002=>a{<\032\002\223=\003\023k=\035\271\337<\214\026\020\275\343\304w<\244\310\260\274\035)\016\275\034p\007\273b\t\270\273\351,\324\273mh$<tY\024=\223H\253\275\037\036\251\275\212\343\200<\227\334\032<\370?:\274k\025\017:\364]L\274e\202\245<\261=\013\275\326R\361\274&\261\207<.\254\265=p\022\277<#\n\262<?}\010=\314\tj\274UPF<\360\002\201\272FS\364\274\337\315\261:\241\355\277<g\3300=L\217\270\274_\027\374\274\241\002\217<d\0043<\241]\251\274\255~\225=\335\022\222\274\2666$<\025k\2639\3142\217<\033\307\324\271G+\361<\325\257\250\274\270=\225=*.\224<\360U\031=\322\353E\275\253\227\265<\010y\n=\264\340F\275\225\375\202<K B<\316\001\231<\366\275\356\274\251r\276\274B\304\237<\021\376\036\274\3116:\275l\367\275\274\220\203\021\273\177\000\306<9\313\213\2749\021\334<\220\022\006\274\221\253\033\275\304\215\264;\325\346\371\274\314\006\363<\020\231E<\276D\371\273\241d\313<\271\310\r<\375\025|;\001O\231;\306|\366<j\275\300<\255\371C\274\311\360[:\221\n\020\275=\201\306;E\007\327<\240\271?=\336i\025\273\223\003\335<fq1\274\204\356\230;\356\325$<\236\211 <\001[\313<8\374X\274\207\025\240<?UG\2759\265\306<U\307\313<dV\346\274\344\227\260\274\002\005\244\273\262\316\022;\224\256\260\274\026+v\274\366t\345;O\220\276;\240c\202\274;o\324<\23304\275x:i<\017\217L\275ZYh=,\\\215\274\330m\315<\370\t8<7\370\315<\277$\374<*\345L\274\354\3472\274\271e\207;|\020\325:l\320\275\274\'O\244<\322\317\266<N\364\203<v\235\306\274X\335\261\274\264t\366;\355\346\375\2730F\224<\240\317\356\274\234H\204=L\020\214;\243\235\342:\353\322\232<\337bT=\016\027\217<W\035\230<\000\331g<\000\203~\274\320\203j\275\244<\013\275\3311\256\2737\366z;\343h\371<V\017\231\274~\363\034<5LJ\275g\340\032\274H\306E\275\021K\202<y\324\200\274\252\212\260;\024\373B=\252\353\206\274\3429\265\274\204\246\316;\213\262\210<{\350\333<v\273\250;\001a%\275j\227l:?8\352\274r\335v\274\025\327P<\256&\203\273\023\350\027\274\017P\000=\3025\253;\031?|<\370\014\221<\\\3479\274F\023\177\273\357G\325\274\233f_\275)\327\206;0\026\272\273P\366\344;\316\260\035=\317\364+=IS\253\274\235\364\276\274\335+\335\274\355qk=\323\027\251\273\335_\331\2725?\037=O]y\271Yr\232\273c\266\212<\177\257\000\275\365p\251\273\232\220\245<Q0\223\274\245\347\361\274U\202\306<+>\360\274Z\345\353\273Oa\200;\376z0=\274\035\026=\265g\377;\211\262\213\274\224\013\334;\376\240-\2755=L\275\237\301\206=\255\352!\274F\242S\274\365\004\257\274\351\324\360;\220\350\205\274\311\025-<\264\316$\274<\255\315\274\253\353\256\274j\246\323<dQ!\274>;\036\275(\257\372<\0165\002<\360;\344<\243%H;\007\3759\274\236^|\275\206C\342\272\247\344J\275\202q-\275,>\227\274\376\2107\273\332\207\034\275] \004=&1W<Fu\3019]ER=\245\323\321<\274Z-\275+p\223\274\314\213\326\274\336h\274<>\215\242\274\363\364t\274^\270\276\2749\2400;\352\334\372<h\376\372<\254\352k<e:\243;\316\rm\274[\002\201\274\257\025\271\274\340d\334:6~\352\274\260\023\272\272\014\321\350\274\"\365\r=\000\304\273\274\315\224\365<\255\330K\275yR\347\273CN%=\331\273\231\274KU\263\274.\323\002<\364\033\014<[t\351;/\350\177;\207\t\206;e\023H\273.h\357\274\203\177@<\221\243\013\275z\307-=\203\246#\275\373\002\211</\360\242\274\365^r\274~\236\014\274\033\n\342<\347\275\r<:\363\022=s\356\375:\333\222\311\272\253<\242:\211\366\365<5\"\244:-{\252\274e\203\021<\364.?\272t_{\274Hp\"\275h\3565\275\026\032\230\275\037}\006\273\014\256\n\275L\333\371\274\224\225\266\274\310\212\351;\275m\317\273N(w<Qm\006=\376_\2178\205\371%\274a\2177=\233iY\274\303\005n<\n\366\375\274\216\265u=\317\017i\273\236\317\030\275\242\344\203;m\334\251<\213\360\224<\254}\303\273#\325\330\274\370\357L=&\220\033=\221\002C;\326$3<B\035\204\273\323\202B=\036\236:\275G\301\261\274\235\270\275\274\313+\017=\324\361%\275\370\311\211<\315\2443=wi\342\273p\000\312<\022\241&<\017Q\260<C\212W\275H\213\"\274\322&\327\274\330\352\217;\t\203\036\275\014\025\257\274\221\221\361<\331\317w\274\272\310\202<wPt\275R\371\036<\257\305\262<\001M\322<SD\346\274\036\357\240<\267\260\266\272X?\251\273r\377X;\351F\006;\337y\222\273E\251\320\273\367\340\263\274*\336*\275MCB\275\274X\212<\246\325\257<f\356\007=e\373?=9\272\320;\215\241W<\347\3376=#D\014=\232p\324\274#I\026\275\010\266O=\002\357\272\272\320h\240\274&\235\313\274\323\347\232\273\253\211E<ED1<\275j\377<\251\246\254<DV;\274\312\024\314\271P\276\231<\271\266\246\274?\265N\275#\200\312\274\250H\031\275\364\301\255:F%\023\274{J\022=\366\037\";\226\211n<){\266<\030\1775\275\240\331\244\275\200\321\205\273\3401\241\273\214\264u\275\265C\214;?\251;\275A\036\313\274C\323\323\274E\026^<$\215\352<qU\264<.\236\210\274\257\013\021\274\215k\242<5_\353< \010\261\274.!)\273\375\373\340<\003\226\256\274\212T\251<I^\232\273\276\222k\275\375\005\t\275\354\2051;]\333\222<f[\365<\307\337`=\341o%=\\\010]\274\266\000\251<\230\326\254\274{\201\230<\346\260\335\273\337\326\217\274\376!\373<[\312\360<9\310e<>\"\236\274$m\241<9Y\037\275\rzh\273\326\223\027<\333r\345\274y\2078=\024\253}\274\006\345\275\274\211xA=\372\265\263\274C\350\003\275\360\362:<\250O?\273\023sl=\315\233\027\275\034\215\177<\220\207w\274\345\370\300\273\242\025|<%|-\274\241\315o7\336t\244:A}\377;Zg0\275Qi\243\275\315\207\204\275\216\220\276\274\347\312\340\273\367\237\304<I?\373;k\273\016=\247\226\021\275\315\000\253\274}\367\016<f\356\177\274\tg4=\301\260@\274<\017[<\354\342\001<i\004F;TU\356;\204\245\025\274\216\037M\273I\310\'=F6\215\274O\357\254<\020!&\274\032\\\376:\2207\373\273A\372\272\274Q\0340\274\215\314\201;\362\031\350<\241\021\201<\033\033\004\273\013\375\272\273p\332\177<\202\205\017<\305v\016=;\334M\274\224\026\345;^\240\253\274\317\203\005=\236\326\363</1W\274k\244\316<\255\0078\275\371$d\275\005k\010\275\221\313e<Z\215\244\274\325\202\005;!\201G<u]J\275\224\235\351:_\333%\274\253\312\247<\300\216\245<L\026\210<\313l{\273)l\234<m\205\301\274\310v\\\2745\350y\275\005\233U\273\034%A<\204;K\274\377{\t\275\212s\325\274\301o\322\274\302k\263\273\256\242\202\273b\254\362\273y\"\227<\362\221\006\275(\221\300\273Gw\370\274\315\036\242\275\253\355\025\275z\332\006=\213?\264\274|\341k\275H\021\204\273\367o|<_\355\231<\366\351L\274\t\037\267\274o\213\325<GY\023\2740\017R\2742$\350\274\276\315\n\275\215@F\274Rc#=\257\271-=`\353&=i\223\215;\316\213\035<-\021\237<\000<\337\274\336\003?\275\355\333\363<\354\267/=\010\233\345\274b\021\204;\340\225`\273&\374?=\r\273\214;^\316\345\274A\3470\2741\017\272\274x\020\323\275\024\214\001=\033aq\274@\025\344;\324\035v\274*a\270\273\030\205\260\274v\220\220\274\335\342I\275U\364\212=\331e\036=(\265\333<}\316\033\273\026X\326\272\273\223B\275(\000\r\2754\210C\274\256f\030\275Oj\000=\303\275\330\273`\202\003\275h\021\016\275h\336\376\2742\232\312\273\236x\023\275\3254i\275\235pZ;Q/,\274?\302)\275@\266\225\2743\343\337\271U[2=\255\037%<pe\212\274\211\210\244\274P\0253=\206\034\032<\245\007\331<)\022\224;M\371\357\274\201\026I=\352I\307<CZ\n<\340*\342\272\335\035-\275\000\257(=\244\225\243=\237\330\230\274F\nQ=\221\355#\273\276\017\275==v)\275\301\274\244\274\370\220r\275\031\261.\272.\250\n\275l\336\250;\253\'\036\275\207|\345<\304\261\224<\'\362\006\272V\221\246<:i\225<`f\232\271\343\352\204<\211\313\273;f\220\323</q\022\275w\215\307\273\272yZ<!BN;]\227&=B\227\201;\020\3074=\235\303\304<\201\013\017\274\347\"\242\274}^\320\274\214Y\256\274\205\205\221\274|r\005\275\372\262\261<\253\266y\274\230\024 <> \323<$A\250<\375\244\245<\274\313\323;\005X\010<\317\300\250\274\221\211\001;\033,/<e<\006=\204@\010\275\207\030\024\275]%\027<\355\000\354\2740Q\333;\271\363\002<p\212\225<\322\225\302\273\2056;<N5s<\341\242\235\274\r\250\031\275=\314\343<?\215\331<\343\371\327<?\223\254\274\247U\315<v\n\033\274?\265\241\274d\364\204<\253\313\350\273\237V\030=\305\303\000\275C\365\310\274\22012\273\227N\232\274o*c\274|\004\210\274\263\017\004\275sA\343;\003\036T\274\341\367\372;\262f\200\274\255\010\237;\353qM:\346\t\024=\314\215\232;W\315\365:\306W\260:g\254\026\275\352\025K=>\217\017=<\241\270<9\233\350;\002Z\330\274T~T=3\025\204\274I\312!\274\224\343\t\275\014\231~\274\'\016\216\274\224\250\027<]\341\330<\335qs=\310\200\010<m\353\334\274}\360\004\274\206\211\225<\320\372E\275F\000)<l\361\027=\246M\355\274\346\246\3149\031\364;\275\370\225j\274\207L\362<\254\217\215\275\r_\032\273S[\n\275\265\325\250<\007\207P\275\243g\026=1\373\266;\265\364\230\274\037\t?\275\013T\022\274I\307K\275\2712\363<\256o\'\274\006-\3249\177\026\361<d0\"=\320]!\275\315\352\250;H\213X<\252\362\200\275\030\224\000=\227\322\232\274\006e~\272p\020H\275\240P\240\274V\203-\273v\337\326<\025\3512<\353Y\025=\'\005\337\274m\362\305\2749\272\216\274Y10\275\277[{;\227g\332<\031[\003\274I\252V<\206M1\273\222\251\230<\201\225\225\274\037\010/;9\010\r=\270o\204\275y|\316;\306\223\"\275\212\321o\274E\220\227\2748\360\033:\267\341\033<\021\2664<h\324\350<\3138\004\274\342\230\343\273\255\302\r\275\364\240?\274\354\035,=(\222\350<\376\203\005\275\361\217\351\274\025\214\031\275vT.=BRH\274\332\276p\274~Y\305\271\225K\313\274H\355\223\275c:\240<\227>\332\273U\312u<\364%\224<\203\377q\273\367\031\002\275\345\231\201\274r\320M\275\321T\356<q2\322=\333\233\230<\355mp<2\227\225:S\372,\275\204\"h<\234\341\330\274v\273<\270y\257\206=\021\251Z\275\240\031\342\274\273\370\330\274\036\260&\2736\241\250<\300B\245\273\237K\003\274]9\'\272q\177\010<\255\205\315<\033\321P<X\371\n<\214s3<\337\355z\275\347\231\002=$\265\230\273\276\366\036\273\265\177\252\274\362n\241\273[n:<\247\r\305\273\344\257\032=\267o\r=e\263\203\274\\\177\313;\352\355\254:d\317:=\375\002J<\252\336\252\273L\3757\275\303\350\004\274R{\037<\210H\303\273\3328\330\274\264q\355;?\333u\275Fq\304\274\305\263\322\274\303\350\322;rt\253<7\027\217<\221WG<\2614\302<\367\327\263\2744l\355\274%)\377<\316,\255<\341\277\035\273\347\374\037\275\3167\212\273\252\016\256:|\264\260\274\213\253d\274\254\017\017=\240\365D\274Vu\254\273\032O|\274Y@=\275^\025\024<\247\017\020=\276b\270\274C\347\306<\004\211-=c\357\217<\314R\\;E\344.\275J\321\347\273k\023\203<\303s\020=\353\244\350;\362\376\327<7\267N\274\371\302\260<.#\206=|K];\326)\375\273p\377@\274\017\330\257\273\257\316\234<)\207\270\274\375\264\271\273\346/\210<n~\031;m\341M<\247\352\242\2747\323m;\250\265\216\2738\rR<\207\027\251<\227\272\270\273d,\006<L\376w\274\346\202y\274\350\373&\275\227\'\224;\200xg=_\207\375\274\250\006\352\274X\225\273<\323`\277\273\024\221\232\2739\264\016\274\217\345\272\274\277\314\005;\005\273\277;c\317\004\273.C\316\274\260\267><\351\275\200\273\254+\002\275\333\272^<\251e\003\273\010\216\216<Y\023\327;\212Zl<\034\2626<\037\027\260\274\254j\256;&\321\360\274\326\'h\270\034\341!\275\033\301\224\2748\315\206;\004\220\314<\t\\\203\274\034\014\213<4\320\334;\333\202\212<*.E\275\372w\303\274\r\001\367\273\313\312\217\275\365\036!\275\366{\016\2746J\310;\221ea\275\206\276\274<^a\216\275\343Aj\275\235\255g;\326;\234<\231&7<\256<\265<\305\016\355\274F\340\n=M\000y\2748\202H=\014\343+\275\036\362\343\274\3457/=\347\351\226\274V\'/=\365|f\275\3346\251\274\335\222\030\275\234\365\030<\316\263\216;\252\204\254<\363[f=\214\275+=jmo\274\332\302\314<\332\362\220\274\324\261\274\274\261R\215;\000\266^:\204\350>\275\336b\261\273\353\302\243\273\357\272\206\274\245;K\274\310\374.\274j\306\205<\'\213\352;(\340\316;\205\305\226<\214\355S=\347\031\322:%\324\265<V!M<\375\252\360\274\364\376\274\274\203\322\375<JQ\304<|6\224\273\375M\352\274F\037\010=\002Z\223=\211(\351\274\263\366\007=\206\352?\274\327\363\314<F:)\274{Xc<oH\214\275\254\332\206\275\031\013%\274\364\034[\274\006z\305\274{J\220\274\356\316\310\274\335\272\350\274\207\212\234\275\342\225\014\274\0067\376\274\273\325_\273\275\261\033<F\013Y\275Ac<=\241\204y<\241L\371<6\357\367\274\365\357\206;\025Ir<k\222\270\272)\316\260\273\303\327`;q#\340;\326\277\244\273\014o\311\273\250n\214<@&r;B\210\032=\223\313\376<Rr\241:)\016Y<xHm\274%\207\275<i\227\014\275t\r+\275]\223u\274C\031%\275:\000\205\274+\r\232\274\005\231\233<\236\003~\275\362\255\300\275\373~\n=y\257\035\275x\300\221<\001Sc\274\321a#;3\227\361\273\271\333\026\275\347\306s\275u2\t=\231\343\024=\\\210\340;>Qu<K\373\010=\004\242\225\274\223\260`<\026\302U<t\210\026\275\216\376$=~D\271\272\202:\027=!\3373\275\366\030\203\275\343:\033\274\375c\2209x\0345\275)y\351<~t#\275\020\0307<\313m\004=(\330K<\333T9\273^ <\273\\\'\366\274\221)==\252\341\022\273G4\376<\252\316;\275\2527z:u\022/=4#\013\275r\037\344<\327\200\017=7i\021\274!5\230:\"x\023\275\367\374\014=\261\260Q\275k\316\"\274P I<\n\355{\274p:`=\004\213\252\274\263\363\234;|\025\376\274!\203?\275\3154\210\274\271d\005\275\212\302\270\274o\r\372\273\306`\253\274\024\276\017=\n7u\274\231\351\256:\343 \200<\315\350B<vj\004=\0326<<rs\242\274\227\272$\275G\362\274<\246\027\346<\323\032,=\336\225\373\274\326e1<\035gE\273-\257\206<O\2430<*\277#\275\344\341\370<\265>\013\275\025E\013=\266\220\316\274\364\037\364;\003\343\304<\036\316\216\274\244a\031\275Q\036\306\274i\000\206\274oB\364\274[\004\316\273\013\207`;2\243\244<\364\335$\272\200,\226<:\320\347\274\327b\022\274\354\216O\275\032\205\021=\346w#\274\276\007\313<\320\024\360;Z\271\230\274M7\343<9\241\220;8F\362;,x\257<2\241\235\273qI\276\274\031\313\232;\360&\265<^;\030\275ky\367;K\371\004<b\231J\273\n\006\300\273\t\232\277<\034r\177\275z\237\001=o\376\220\274w-9=@\006\035=\332^\331=\\\1773=\302!\316<P\336$\274\230\306E<\032L8\275\262\335(\275\020\362F=\000\302\001=F\001\037\272\234\212\313\273\365\016\222;\016N@\275\264\232\244\273\001W\276<\230\314\023=\210}\r\275\355\343r<\256\211\"=)\276\260\272]\"\377\274\251\337\002\275\327g\363<\253\341f<T0\312\274\374d\371\274kY\332<\017\374t\275?q:\273\003k\001\274M\262\244<\325\3234;u\t\314<h-\321;y\220\242<%\210\000=\301\334\276;p\361\303\274\302\220\256\274\241;\362\274\021\n4;\2709x\274*\177\354:wX\006=\265X\350;\245\257\236\274\300\031\252\274R\255\253\274t\\P=\221^\325:\325\363\0169\177\233\026=\245\222\205<\324\212n\274\221]\034;\222\205$\275\264\022!\273\214\314\017=\344\000\350\2736\372P\275\201\362\340\273\333\223\r\273J\004;<\006d5=\266[1\275\314\346\307<\035\t\310\273\355\212\276\272\325\2706\275\010\262C\275\030hr=\260\357J9\262u?\273/\366\216=wm\213\274\030w\276<\336\272F=\377\326y\274\037&\036\275g\236I\274/\247\306\274\3249\203\275\331\210i<\250\336\317\273\253M|=\007\237\255<\326\350\333\274+n%<U\005\213\272B\230\305<N;\327\272\303M)\275\204n\222<\363\352k;\324I\201\274c\273\235\274Z\264J\274\353\202z<Gh@;\320\000[\275\001\004\001\275\027%\355\273.\303\340<\204\314\321<\031xd\274U\271M=\361\207G\2755[\364;nm\241\274\324`>=<\302\013\274&?\252\274p\200K=nT\302\274\266j\303\273$\\D<2\302\010\275\220\237\025=\256\t\025=\207\360\021\275\364~\367<\320s\263:\306go<\212\325\212;A\324N\275X+p=\270{\325<a\353\332<\n0\304\273^+\017=\254\224a\274\234\323\006\275\026Pr<\031Wf\273\000EE=\357\327\200<\304g\r\274\225\214N<_\277\262\273\330\t\261<\2012[\274\316>t;\310\001\246\274\215U0<d\256\035=w\246+\275\215\373!\275\227\231B; \300W=\221\032>\275\332\"\220\273\207\026\263\274\031c\312\274,\323N\273\370\033\010<\312c\212\274F\r\330\271B\323\225\272\2154\221<(\367\203\273s\200X<\275HX\275\345\257{\274\231\274\341;Jj;=\245G6=#S\233\2730\231\272\275\207\240\t\274\033\263\371\273:G\210\275\352\310c\275i\245\023=\204\250|\274\244\233-\275\230\313\376\274TZ\227<\207\343\330\274\004\350\205\274\334^\222;@\020\254<p\265\334=H\213&\275\340\020\346\274\264\241H=&\345:\275\247\224 <\205\352\322\274\346\224\037\275\220\314\330<\214\232:\275\246\243\365<\253)l\274\035\247\221<\357\327\245\274\267\002#\275\211\365\344\274r\354:\274X\226m\274QUB=/*\240\2730A-=:\322\202<\266\004_=\033\243\021=\350\312j;K\017\211\274\305B!;\243N\037\275X\314m\274\246\001\223\274r\201\226<\2659X<x\275H\274?\260D<@\336\366;\250\262\202;\203\333\205<\357Ng<N\305\304\274#\306\177\273K\351\232<\276$\254\273H\203@\275\016\272\034\274\305\205)\275[5\214<F?x\275#X\266\274\036\340=\274G$\212\273t\323\264\2744\365\350<*\233\350\273\332;\253<\333=Q<\325:?<\262\323\333<\331%\t\274>D\233<\351\226\332\274\334\204.\275\275i\335<\321\315\230;bt\263<\216\036@\274\021\327U\272[TB=+%\002\273x\336\312\273S\217,=s\311*\274\017\214\'\275\322\330=\275@\221-=\364\200\261<V\021\315;\215\201#\275\232\260\350\274\014\266v=\2066\026<P\351\037\275x\373\213\274\226\377G=\214%4<\305]F<\"N\003\275\315\214\277\274\213\013\322\272\260\270\035\275b\320\027\274\037y\243\2744N\336\273\232v\245\274\237<\027=\223\361@=U\266\370\274\302u\342\274\265O%=\306\204\230\274@M\205\273\345\001\226\275\351\352m=4\352D\275D\031z=\347\344\247<\235\024\220;]\264\253=\337v\254\275\273\352\203=\354\236I\275\235\315\315\274;\304Z\274\034n1;\373K\223\275\322\037\322<\001\177y\274\005\312\004<\352_}<\272XA=$?\275=\234\3200\275\n\244\236\274\312\365\246;RS \273\361\033\253\275\246\262\233<\207\016F\275?\r\031=\036Q\227</\354\016=\000\"}\274\016m\025\275\352\231q<c\302\000;\215\302\001<}\231\277\274\267\026C\275\340\3477=\302yy<\272\"\225<\307\322\266\274w\023\034;\223\307\r=t\252\241\274 \000\242\274\r\246\343<^N\031\2747\233N<\007\265\301<|\351v\274\343g\340<\000\237\0059(\341\265\274\344MG<4\255(\275\216\361\036;\014\235\347\274\360;\265<\277d\013=8-E\275\234\014e;RK\360\273\373\242^;\322\266\203<l\232\300\274\237\007\034=\364\221\216\274\233\305 <\177^7<)\250\201\272B\266\025=\003\004\276\273\255i\204=\234\324\370\274G\255\007\275\354\027\r\275~\022`\273\314\321\250=\216b\016\275\371\265\001\272\032\347,<+O\315<\3062\362\274%\263\002=}\203\373\2746\002\235<D\032\205\270\300\241\'=?\212\373\274\231\206\262\274B\342\342\274\005\232\201=\344#\033<\021\320\t\275<+\375\274\346\257\343\274B\221\251\274w\357\223\274n\235f;\311\201\"\274\202\020\213\275gx\226<\322\031\002=\3257T=\353,e\274\352\352\033\2755\240\333<\244`a\274\n*;\275%\246\340\274\261:\204=\020\241)<\372/*\274\311\316\241:^{_=\203X^=@\230\221\275\370\017$;\310M\234\274\352\307\305\273\"\203P\275\340\260\336<\244\312\215;\211\353\035\275\2665 =\212+\326<\350\347\013=\222\235f\274\354\372\025\274\350Q&=W\353\345<\207\034\255< \367\354<\346\206\250\273(\360\010<\323\263x\275\022-Y\274\333\251D\272\331Ae\275\327h\247\275\372\004\270<\261\273\3268c+]\274\277\010\330;M\320\267<\205H\326<^\372\255;K\021\2719\304\006\350\274\347\253\0239\274\204\262<\201z\362<\337\344\351\2744aK</\3459\2757\2563<\323\353\236\274\036\325\017<\366\3540\275B\303\235\274\202~\024<{\216 \275\313\365T=!\365\215\274\315\207B;\024\304*=\316\006\240\274\345\277q=1q\263\273\223\351\211\274\312\254\017\274@\207q<J\3566<\\&\037\275\315fI\2759\236v\275\314\303\026=\177\366\226\2731t\214\275\364\212h\273\316nE\274\225\003v\274[ \352<fd\235;,#\210:\272\264p=7-@\275c\346\233=9&\032>r\014\347\274\330\215\212\275\225|<\274^\320\336\274\353\303\034=<4T\275\344\305x\275\310\336;<\353\231\306;\301\314\026<\201\246\206\274\250\020d<riB<@\261\243\273[\243\334\274\t\347\333<\200\213-=]16<\300\321\013<\031\344\217\274E\374\311\274\005\t\t<~:\001\274\312(D<7\273o\274\017\021\r=\342\366\352<S\025\316\274\376\003\024<W\021d<\322\033\223\274x\n_\274\336\370-<\332Su;<\357-;\325{\034<[\020l=$\017\211\275*Aa<\321\334\326<\237R\267\274\2377\007\275\211S\010<\010\273\321\273Q/%=;\322\212\275Pu\016\275\303*\212<\375\371(\273\254\346\360\272\334\333K\274\361\251\345<dH\321<\332\353\200<\"\271U\275P`8\274\246\037x\274-RZ\274\324\275\026<\034\263\203\274\230g\030\275\275\221\243\274\362W\322=\027\030\374\274\222Cb=\332~\004\274\322@\243\274n\305\302\273\275\342\350\274\203;\031<\354\360\225=5\032l\275&\n\024=u\024\010=\337\224_=\272\"\010\275\214,\360<\307\027\322\274\031\312\216\274\026\035\223;kh!\274\351\\\367:\231K\234\274\222T\202=\250\300\347\273\352\356\005\275]\3367\272\313m\244;\370&m\274\261\217\315\274\360\322\004\274\020p+\275d\303\023\272\203\177N;\202\344\214<*\254\253<\232\231X<\003.\245\274\r\261\253\274K\3041\275\237fQ=\214\370\'\275: 2=>\351\311\274I\346s\275\013K&=N\003w\275\314]r=+\331\021\275\2234\247<_\363\300\274\227\322\264<D\007\265<\242\330\373;\263w\274<@i?=\032\301\262\273\220\037\357<\226\224m=\031%9\275\323\321\357\274.\034x;)Z\346\274\3239\244\275cv\347<\202\233\217\273\002\346T=m\233\005=\367\246\316\274=\032\020<\377\375\000\275\203\026\372<O\266h=\020#\006=\314n\261=\212\225V\275\025\2742=\007R\232<@R\207<%\2262<\323\001)\274\263I\313<\356G\366\273*\376J\275\346gV<Ell<h\213\206<>\251\006=|\322E\274\276\353\214:#\376\366:\206\266\265\274V\235U<a\333\004\275\312\225r\274H\266\236\274\201:\224\273\375\236:={\332b\274l4\263<&$\225\272\345H#\273d\255~<\351\202\211\274\031\321\256;\205\024\324\2746`\274<5)\257=\356\246\023\275\274B\341<h@\357\274\234,N\275\t\353\034<oT\250\275b\370\315\274\233S0=\315_\013;\377\265\233\275\305(P\274\274\206-\274\034f\220=\371\205L\275~\221\277;xT\273\274\327%\014\274\345\200\030\274\211\264\202=\304\210\000\274[\344\312<\200\361\270\275cYJ\275\207\215\350<\007S\033\275\257\\N\275\317\246e\274\320\300\201\275\021J$\274\223\307\272=\244\313\005\275\026\227%;6\373\001\275N\340\000\275\037\331[\274\200\314Q<5Ja\2740\017\231\274\341\n%\275+\377\310<\376\361\006<G\324\244<\227\300^<\006b\323<_\336\306\274:Yg<U\327\312;mh\227\273\365\300\000<\347m/<Z\355\305\273\013\200\251\274\344\037\013\273\327w\267\273\354\364\245<\'\366\271;\344\367\211<t\246\225:\305\302%==\372m<\275\247%=\211\235&\275|\353\240\274\350!\006\275g\301;==D4<\312\305|\274\201,\260<L\256\242\274\006\207y<\302q,:\375\300p<Dn\023;.\215]<t\014\002=\335\353\225<\036\036\204\275.\347e<\341<\255\274m\360\033\275\223U\205;\226\300\022\275\261_\307\273?\337\221\275\016x+=\305z\'=\353\270O\273\010\367\253<U\255\207<v\347\333;`\245K\273\256\005\025\275\312Z\335<\323Z\215\275\360\027\033<t\371\314<^\244t\274p\361%\274\222\202\022\275\231K\014\275\370\353\242<\206\032\201\275\030\273\271\274\325\261\207\274\177\246\245<1\305R\274\257\224{\2734\201\236<\322@V\273I\322\303;\311)/\273\n=\030=\030\243\r\275v\030\275\273\317a\237\273\305\264\260\274\354\310t;\376\266\247<\243\030Y<\344\312\340;\205\241\230\272(\357\257\273\021Je<\345\256\235;\251A5<\206\257\002\275\276\202q\275%c\321\273\317\315\352\274c9\335<\376\200\252\274\352\230\025\275,v\017<\325\216\343;\004\016\024<\370\004\222<*\023\000\274\334\375\210\274C\222\035\275\025\221\313<8\346\014;\210\231\274\274\200\306d<\206h\374:\211\242J\275\324\240\337\274#\341\301\2739G\031\275\251\236\224;rNu<P\026-<\r?o=\\b1\274}\237\260\274j\371\241<\316z\256\274\306\247\326<\351u\364\273\314\273\202\274]\233\230\275H\006\372<3\303\014=\372\221\262=\316I\222\275:\371I\273iI\223<\335\337g\273\2745\236<&\301\242\2734X\021=*u\203<|\226u=P`\023\275_4\023=\006\201\007\274\363\373\250\275\363\226\016=TM\314\274\275\335\237\274)\376K\275\020\273\317<%\206\200=\325\036+\275\017\356\254;7\266\220=\340\353u\2749\247>\274\221\304Q\275J\372\321<Xk\037\274\224\001R=\262\244\302\274\354\207[=-\341\013\274\216\2439=\262\231)<\343L\035\2754\202\364<?\0257=\037\001\223<]\247\257\274\231c\245\275\205r\220\275\214#\273<\260\265Y\274b\341\254\275\361W\361<\035\311\270\274\316}6=P\205\233\274\303l]\274\337)E\275y\036\243=\351\2068\274\000q(=)G\370=*\247\030<\225.\201\275_\033\241<\326\276\362\275Z\266^=\261\240\230\274\212\217(\275uD\320<\243\020C\275\360s\303=\300\370,\274\033s\326\275\276Z\204<};\024\274`Q\224=\362\217\031=\235\200e=\032a\2519s\3771=\251\333G\275Z\302&=\'\200Z\275\252\036\314\274g\267\006\274m\371\226\275\003\357g\275P!\255\274\327f\r\275M\206\267\273\300\242g=[\001\032\275\333\275\333<$\270\327\271\237\265\274<\316L@<\000\030\204\274\216IL=\365og\273\035\362\253<\317\335\016=\233\"\335\274q\201f=K\036\221=\034|\244=dg\217\275j\035\206\275;\346\321<\265\212\240\275y>\362=\335\213\314;\250\272\350\274\022\273!\275\207\t|<o\201\001=\231\273\351\274G\210<\274\202@\003\275\177\234\253<\0147\376<\331t\004\275\3214\307\272\203\305\337\274\361.\001\274(\227\330:d\365\220;\302\231\300\274\225 \037\275\200\177\332<Z\267\342<\263\351@=\356\345\331<\231oU<?#\211\274\347\332\220\274>\277\307<\264\261z\275\331{4=\3521\361\273\0011\\\274\002\227x<\256\337\201<\265\277\260\273@\376\033=\377*\267=\251\262}\275>/\236\274\267\024\017\274>\261\2319G0~<\343e/\275\237=\024=\342\020\034\275\222G\"\275\003\375\302\273>#\363\274\035p\033=9\t\376:\344\312\014\275?^\327<[uI\274\273\006\204<\237\371\337<*\310\257\273\350\320\222<%J\337:H\240\030\274\257\213C<|PF=\352\002m;\332\364\022\275H\232\t;m\313L=\017\202>\275XA\244;df\226\273\021\346\223\274\017\024\031\274)\226\013\275\277\241E\274\260\031\371\272\331*\312;\324]\036=\024\316O<\177j\007<D\020\334\274&p\233;KH\366<\0202\222\274\001\244\233;O\0348<(- \275\356\245\214\274\262/\247\274\243\375\211\272\200\305\354<G\326\331\274d\213\256;\342\213\261\274\341\nj\274\306`\344<\034\326\000=E\213\035\274lF\274\274\210R\017\274\3418\021\274\251\201\257\274\036E\250<Xb\271<\236pH\275\213n\266;\"Y$\274\006\216c\272\227e\310<F\n>\275\227\301(=? \367\274\tb2\275P\327B<!\"\021\274\217\357\220<s\334\231\274\307h\335\273\362y\251<n=8<\334\010\326<\031\224\270;#\314\277<\021%\244=1\227\254\274W/!\275\350\220l\275\242\241*<D\304\207\273a>\'\272\301\341h<\310g\207\275x\030\367\273Z\312F\273\356g\022\275w>\377<\210Q%\274-){<\217\370G\274\357\002\364\273V\225|\274\346\010\345\274j58=\032\017P\274\352\207*=\301\254T\274\2632\272<M\036\026\273\033\376\370<b\002N=_\033U=\340\272X=\365\364\026\275\264\001\320<{\037b\275f\227\342;/\324\023\275\206\221Z\271\035\277\007=\242t\006<%\346\032\275\023\233!<\335\3134\275\0317\027=\265\261\264<\234@\217<\263\\\002=\265\265\216\2746I\246<X\217:=\240\347\021=\261\304\314\274r\375\223<^\226\214\275Kwf=\027\337\222\274M\244\247\274\006\230G\274 >\201<\020\361\320\273\301b;<\311\261\024\273\031\014/=\033\243\307\274\252\245\232<\345\362\264\273\030y\377\273\204MZ\2734\267A\274\243\344\303<\256\342\270\275\252\323/=[\023\006<\256G\001\274\017\353 \275\250L#\275\210T\276=\256\000j\274\322\002G\274\246\350l=\346E-\274\226\306\305<\025S\250=\3355\273\274\024\325\215\275Q\320`<\264\335:\274U\203i\275\370\243\217<*\377\177\274\004\376\356<\\\223L=\220\021\005\275|\202\366;\211%\003\275Q0W=\347\340&\274\331\231<\275H\2068=\270E\'<\277\362M\274N\354(\274\312Az9\003k\254;\\\036\276\273hT\035\275h\300\304\273\251\366|\274\357i\021=\004\327\370<\316\246=<BJ\337<\265O\377\274XO\034;\005w\t;\004%<=\371\254\217:u\3319<ou\t=\014 P\274\253~\326\274\340h%=~\210\026\275\033}C<\350\036D=\214\242N\274S|\026<\025\255\203<0\356\307;T\277\262\274\245E\354\274g\016S=\010\203 =\313\346\213<\360 \361\273\t\n\010=D<\346;z\334\312\274\'=\300<\262\034!\274\223[\207=N\372\233;F\3317;\203\036\273;>\r\323\273c@C<\240\264\246\273Q\375N;{_V\274\374\267\261<D\341%=\002r!\275dr\031\275\266F\302<)?\221=\340,?\275\254\274L\274\005<\360\2729\212.\275Y\371g<2\017\210<\363\234I\275\200\311\035\274\\\205\020;\344\017x=\372I\205\274\230\376\221=\30584\275\235\236\230<\204\267}\273hm\364<=WN=W\204\276\273\214\321\313\275\306;\014\275m\302\006\273\301/\246\274\323\275\265\275l\315\004=;\276d\274\244\254\330<\216\213H\275\336\266\265\274\264v\033\275\210}\027\274\2773\252<\302$\212;\025\303\301=\216\231\'\275|\255\010\274\3169\013=9\271\020\276\237\351\211=\362R\352;\003\335\217\275\'U.={\227i\2750\235\245<x`?\275\333p\305<\203\275\306\273\365}\020\275qr\306\274\\\035b\272Pfw\274S\004\013=\302$\311\272\360,\257<\337\205!;\000/\203=\264\222\200=$\020\355\272@X\263\274\342\303\227<\345X\320\274\363\367\367\273\305\317\203\274(* =k\031I\272\326\205\210\273=\244=<7xo<\311\256\203\274\217h@=\261\276K<\200\337\013\275\246\310\325<\320\253&=G\300\237:\215\3733\275A\202\334\273\372\2201;!\276\276<\322\341C\275\375\277V;Q\025f\273\263H\257\274Z\027\371;\006\\\030\274\233\237 ;/\0040<n\340\300<\205\\v\274\210\216T<9(5\275\340\260\234<i\366\241\274\240\255\276\274Q6\215<;>m<\322\373\346< \346\016\274\tb,<PP\241=\345\226\214\272\2077Q\272^T\363:\361\224\t\275\356~S\275\207\322\210\275\311\323I=!Y\013<\256\323\365\273\001 \020\275\317\264\021\274M\031\241=S\200\377<\202\020.\275$\204\314\272\235#\266=ozq<~\024\325\270\222\334M\274&\035L\275\264m\303\273\330\276B\275\244\244\205;A\235\265;\264/\241\274*\355\226\274\346$\253=\232\265\313\272\373fe\275L\210\274\274+\333\020=t\344\243\274\314\311\370\271:\241n\275)\226\212=\037)\030\275\373\216W=\351\000\270<\\\0214\272@\234\203=\313\335\005\276\315\305T=/@3\275\232X1\274\314\006\356\2737\247\204<\016\371R\275\205\311\257</\271\000\274\320P\024<\355\3668;B\004}=f#\352=B\315!\275\306\305\024\274~ab<\224\343\265;\210p\206\275\267\014\212;\207l\311\274\373\377\347<\342\033(=e@C=\352{\254\274\321\003N\275\215s\334<\\\235\352\271\312\266\245\273(\353.\274\234\035M\275Z\006*=$\006\376\273\027\002\276\274\267\360?\274Gq\000=\226uR=\017\002\303\273~\361\213\274\322\365\024=\325\3047=\300\351\254;?\256\376<\1779\262\273nx\234:\357\217\357\273\264\214\265\274H{\246\273vrn\274\207\3155\274\014<\240\274;Y\254<\322\2769=Lt-\275Z\350G<\333:\360\273\346Sq<7\272\227<>\025\213\275=\275-= \t/\274NY\000\274%\220\004=\307\372\351<YMw<\204\272\336<\363\006z=gG\\\274*p\221\274\027<\353\274\007T\330;\305\t\221=]A\311\275\204\267\320<r:\255<\316\341\006=\245\000\312\274\013\342C=\030\272+\275tE\247=`:\020\274\245\030C=\377\325c\273\243\251i\274Y\005\007\275\003=\t=b2a\274p\r\203\274\231Q\323\274\374\0333\275\337\r\271;\030. \275&\331\242:%\230\"=\023\316\177\275\303\203\234\273\246\215\264<g\n\225\274\023rO<\316\316}\274_7q=\260*G\274\313\2514\2747b\341\274\267\351w=M\300\372\273\332\355\344\274t3\201\274\024!\235=\310\357E=(\324\246\275\373\214_\274\211\213\205<\256\242.\273kH\200\275&g\256;\214\374\234:@9,\275\366o8=#_\020=\"zc=\302\332\305\274\300\253\336\273#\001\210=\215K1=\017\315\346<3\363\222=\313\340\343<\276\243c;\226\231r\275S\005?\274\261\362\233<]%p\275h\346\227\275\211\013\000=\364\243\004<BY\265\275_\205\356<\362\300\t=yq!=WA\033< r\265<\001j\003\275.J\215=)\357\272<\254\t\037=\252\366b\274\034\345\211<\300\231\271\274\243\031\213<uM(\275\\:\334<\365\2441\275A\274q\275\200\234\001=\370f\325\274\273e\034=\265\022U=r\255\331\274f\351\037=xz\233\274\037\352\214=\246\265><\237\301\376\274n\255\034\274\206.\020=a\270}<._\211\274L\215\232\275\302B\272\275Ny\307\274\324\3175<\322P\331\275N\243m<U\252\250\274bE\212<\303\354\013=\353\343$\275c\312\352\274Rh\352=\373s1\275\226\217A=\033\344\034>\357\265\323\274\270\273\312\274\277\223]\274\031\305\032\276\022s\203=\205v\006\275.i{\275\200q\232;V\031\215<\375\361w9H\214\301\274\354=\"=|\355\312<[}\231:Xz\254\274Q\226\003=\266\321\341<\031\235\017=\321\217/\274\366# <B\016\331\274h\000\322;\\ \221\274S+\305\272\016) ;2\265&=\177F\341<FV?\274(\260\210\273\366X%<\211\032\266\274oY[\274\301\223\355;Z\260U<\234\202\324\272\352\231\270\273`\360\n=\263\261\240\275\351\355C\274\366)\023=G\227\305\273\213\206\273\274~\3220<\317\016\260\274Ybz=\361\010b\275I{]\275\211T\255<\324o\014<\363\221\277\2747\001\313\273$\276\303<\347\2677=\356\256\307<\352\006!\275rkM\274\001\362\030\274\335\252%\274\356\262\324<\337\034\360\274A\267\007\275\2730,\273\360\230\031>\210H\275\274\356\334\\=\'\022\005<?-,\275g\243\245;:\362\023\274Z\016\301\274\305\305\233=S\232-\275\340\340;=\231\266\266<G\327]=\026#%\275}\221\005=\334\2056\274xo\214\274\234\217\343\272H\"\262<\3520\007=\277\361\030\275\344F[=\343\014\3329\222\321\034\275z\203\244<{\271x<V\022\321\274\365\t\211\274\007U\230\2748\033\377\274$\372\307\274\240V8=\253\024\204=\253&&<\010.\277;\324\0333;z\371\250\274##E\275U@\\=aeS\275\226\215Q\273t\236(\275\213\023\221\275\224\026\245<a\264\350\275\006\270x=\370L\333\274\356\326\256<_E\357\274\232f\250<\367\216j=\004\375\306;\020h\237<\177wS=|\210\243\274a\031g<X\221\323=L\034Q\275\247\363O\275\023\275@=[\032\001\275\272\342\220\275<v&=S\320\000\274;5\310<\025$\206=\370\222\250\274f\332\251\272\305(\203\275z:p=\251!A=Jr\304<\263n\343=\271\357\224\275q\277\321<4\013\337;\006l\027\274V\225\231<A*/\274}\344B=I\251\265<\356m>\275{\233\020<.\023\347<\246\035\275<\033\223\224<\024 \254\273\326\353-\274\232\'n;IH\274\274\267\000\035\273T\3470\273\310\001,\275r\232w\274\207Z\205\274\002\337\022=\310\355-\274\354a\325<X/*\274\234`\037<\017u\262<\277fJ\274\242^\022<\037\233\356\274\330\034\344<\016\036\014>\034\361N=\202+\017;\326\210\344<\377dw\27533\'=\324\324\266\275\224F7\2747\3532=s=H=\367\216\026\276\235\016+=\024\321S\273\206\315\245=H\374w\275\316\307\254<p;\227\274V\261\304=I\333\021\275o\277\330<\243\244\340;1\216{=\200\255\254\275\221\313q\2754{\005\275e\013\362\274Gr\t\275E\032O\275\256\241\261\275\227\241\202\274\372D\311=\367\204r=V\301\276<\240<&\275\313\230\365\274\325\"\237\274@\016\354<\004\377B\274\324v\326\274\177v\356\274\370~\264<\313@\027\273\302\367\245\274\201\347\235;\016\303\023=P(\032\275;X\202< &=<\344Os\274\225\367\212<b\266{<\203|\013:Z\304\376\272\023V\264:\010\202\243<\3620\255<34(\274D\301w<\372c\230\273\3328\005=m\216\207:\007\013,<\224\017\031\275\003\267t\274\007\375N\275\261[\320=\217\272\231\273\313\347T;\014\344\335<\347@\267\273\016{;=\363\024\332;2\241\240\273\236\345\264;8l6=I\225d<f\037B=\025\361c\275\005o\277\274,$\005\275g\257a\275\333=\t<\002\214\027\275\230-\347\274,\301\210\275\004\305\322;\353\322\n=\317\324l\274Q\314\222<\205|\323<\332Vl;\217L\215<\372\233`\275,N\037=\021\236\237\275)\305.<\311\247\020=bb\034\271\276\220~\274<\375\272\274\242s\033\2745\321\203<\000\260\201\275\35242<[D\232\274\005G\034=.\314U\274wQ\203\273\201\305\343\273\225\032\333;\242L5;\\G\365<\273\310\217<\343\322\007\275\2761\213\273\2611s\2739\201\305\274k\201\276;\233]\006=\312@z<\325\272\016\274\276$\246\271N\201\250<\316n2;\'\2209\274\326\371\204\274(J\025\2750\rI\275\216\211O\274\246\371\274\274\252\262\305<\262p\202\273;\250\275\274\020.\024\274\273|\221\274\322\324\367<\227s\017;h\005E<Mu1\275o\324\376\274{eT\274`\347\3116\332\266\010\275\272\\\363<$@\270\2730\034:\275,\363[\273U\2210\275\215O8\275\253\353\273\274\235\267\345<N\264\000=\321Bm=)\311\341\2730o\345;\377\337\275<\242t\r\275J\247\271;\2337\326\274@\304\216\274-\325\317\2754\031\220=\207_\315<\\\325D=\341\350\200\275\217\332\025=\004\303\207=k\224\010;\220\214\346<\362\034-=?\257\257=\216\036\014=\177\202-=\3237\232;DCz\274\361\271\211\274\031\241\252\275\243\211\023=\331\276/<T\242\003\275n\261{\275\244\220o=\005\0347=b4A\275\272A\026\275\316\270\257=\301oV\274\360(\031\275\3154\231\275\374\334\270=\371\216C<\217h\t=\225\n\272\274\021\372\n=T\270\356\274y\"\177=\234\350\277<\337\225\313\274\267f\013=\310w;=\017e\017=\035_;;Z\035\264\275\220\372\250\275\201N\300:\311Ax;\347\032\265\275e\327\356<\341\255\342\273\023$N=\354\351\306\274\237\3439\2751\252\202\274\236\014\255=\376\306\217\274}\320\336<\306r\224=\242\357\272\272\013H\217\274\031\316\203<\036z\001\276\037\217G=\202\370g\273\251\324\201\275T\300\250<b`$\275)\236\353=\1777Q=\252\244\256\275\340\221)\275:\357u<B\260\334=\"\201[=0B\245=\302\240\300\274w\031y=\335\302P\275\"\345A=\233`\245\275(\346\021\274:\241\376\274wl\362\274\316\224\365\274M\346T\275\224ZK\275\026\260\234\274Yw\237<!\205\365;.\215\376\271\374\251\3328~\213{=9\340\212<\225Z\200\275\201\303\213=\244\3176\274\224>L=\266\220\023=\240\025\017\275\"C\265<\035\356\334<\352@s=g\t_\275Me\021\275\202\202\001=y\225\267\275\353\340<>1\320\246<\'\242\"<\226\254\032\275\324n\026=\253O\224<\352\033\014\274\261\377\314\273\350\273,\274<\004&=\341\373c=\257\246\022\275\2569;<\250#\021\273\321y\215\274\346\r0\273G\021\202<k\352\036\274\361\260\217\275\366\253|=\234\237\000=\236\274\007=\332U\"=6$B=\360M\215<\231H\021\274\253\211\214=4?\327\274\007\365\'=\342\037^\273\374\257\233\274\177&y<\372\337\024\273\241\220\235\2746`\320<\035\254\020>\3639\210\275\034\r1\275 ::<\302\275\234\274\2103\225=\276 3\275\227VU\272\030j9\274\326#c\275\266\177\235\271\314.]\273\310\374\023\274\352\251`\274\216\274\220\2752\266\373<E\234\220\275\255f\214=$\213*<FO\207\274\321TO<T S=\025\302\241\272\274h\214=\027\3719=\247\316\230;\371\242\251\274KH);\323`\331<\364\270\371\274\365+\205<\222\224\025\2748)C\274\251\234\005\272c\333\333\2742\306\372\273\\&4;\212\2262:\343jd=K|\227<\252\276\321\273\216\204\347\274^\033\031\274\224B\215=\225\026\005\275J\205\333<cg/<A[Y\275\235\307;;\302\0372\273\007e\001\275\320!\360<\325\223\212\274\355\200\223<_\'\270\274\216S\243\274\201~O<\265\223\006=d\320\252\27347\250\274Hm\030\275\274\330\366\273\323\211\n\275\326I\010<_M%= z\007\275\'\253\263<\\\265\272< _\006<:)?=(V\037\275\235I\316<\027\357\374\273&k:\275\324\251\n=i\335T\274\r\203M<\242t\357\2745Q\267\274\201\236\364</L\315\273\312d\344<\217\007c<\270\317\306<\331\363\234=7\212=\275\202\216\366\274\325\361p\275S*\004<f\014\267\273X\211\035=\316\341\026;\364!X\275F\202l:\030r\322;\"c~\275l\323i=[\376\315\274\305\267W<4s\005\274\207\010\010\274Q\372\234\275\377PU\274f-\037=\002\3349\274sv\035=$s \275i\222\206<\262m\032=\227\013Q<\327\222I=\365\361m=\370\236\213=\007!\241\275\222\010\"=\215L_\275\026_\353<B\030\005\275\237\305F\274\010q\202\273\306\262\216<T\001\022\274\231\356\260<P\2341\275+\260\t=\020\0050<\3272\266\274\337\227\376<b\235\034\274\032J|:\026\026l=dB1=5\317\356\274*\261\020=\323my\275\256\261\220=B\366x\274\026\217\005\275\200\003\010<e\243\022<\264Cz\274k\333\001=\216\325\364\273\240\233\246<\250\241\350\273\312\336\236\272\325\365\021\272\t\256?<\305\231\332\273\023\214\330\274\367\347\273;M\243\243\275c\342\034=\265 *<Kb\t\273\233f\303\274\232R\353\274\000R\207=\373?\332\274\241\337\265\274k\026\276<n\242y\273%\204\344<\351\270\301=b\242\215\274\375l\247\275\247\023\223<\352?\307;Up\357\274/9f<\"\372\017\275\204\372A<8\244m=\326\002;\274\217\362\033\273:o9\275y\315`<\354\032\202\274\027g\036\275\252\023?=\313\333\306\273 `=\274q\016\236\274\254\2619\274Y\311\267\273\350\275\001\275J\320\365\274\360~\272<{;\272\274\363\rG=\221\013\002=\300\327\036=\212^\020\274?`\270\2735\271\255<\024l\375;\310J>=x\033\310<Sw\253<<\317\256<@\006\323;\301\317\332\2740\277,=\"W)\275p3\r<\037\262.=\203\273\272\273\375\351\240;!i\203\274DHq<\252\004\354\274\205\350\253\274\"},=\222\212N=\271\275\354\273\214gU\272\014 \r=\241pK<\275\3573\274\030\035\351<x\231\262\274^\326]=\334\231\225\273\252\226\251<R\201\232<\312\247\245\274\342\202\313<\0370O\274D\315s;\251mH;Y\211V:\252\243?=\"(\316\274E5\025\275\371\334\000=\016\333x=6\255\007\275\366n)\274&\314\207;\026\360%\275\305\340\332\274G\3424;\204e}\275Mv{\274P~;\274\211mw=\276\313\374\274\344\337\334<@\362\207<7u\362<\226\336\212<\216.\212<e\006\003=`&\004\275/\362\261\275\374\2628<\036R\032<u\315\267;\276\034\266\275>?\353<3\377\356\274\252p\246=\325g<\274\007W\227\273`\336L\273\207\022\204;\177N\254;P\312\302<6\367g<\344\262\202\274\354\224X<\252\315\363<\343v\027\275\223T`<\006E\234\273\2659n\275\364\006\004=\016\275s\275;\315?<+p2\275\275\331\375<\363\252\023\274\364\222\305\274D\237C\274\312\344W<\r\226\200\274\001\366}<\214\003\266\272\032\303\255\273\226\274\245;EX\225=\201\266\223=/`+\273\273\216\301\274z\263\361<\323\264\210\274\314p\204:.\n\273\274`\0070=\220\017\213\274/s)<\220\327\242<\262\313H<\2347\355\274n{Y<k\261$<\235\002\022\275\351l\360<\357\375,=\340\\\355;\357\215\206\274\350v\260<\006\376F=\235\341\234<1\025\352\274\210f\365<P\232\233\2745B\367\274\366u\257<\026\220\025\275\355MJ\274\222{\347\273\242\277E=\372o\342\2740\352\3428\247_J\275\252\rv;2\263\203\274#\226A<\243\356;<\266\221\230;P\223m<\341YH\274\240\026\244<F|\324=\2065\223\273Aw\351\273\234\332\362\274\320\344-\275\256\207\377\274K\202\260\275\354}m<>\244\215\273\036W/\27524\020\275\243\234\253<S \241=S\310&=q1\356\274\t\020\223<s-\353=+\332\220<\262\214B\274\316\316Y<\3108\017\275\177\303\362\272\233\"F\275\353\255 ;\033\264\372<qg\261\274\n=\300\272x\240\273=CIP\275\000\002e\275d7\322\274Dy\246<\271\351\323\273\\\341\023<C\364g;k\251a=\207\316\016<;t\230<\031\272\217<_\t\2449\340\032\315<\332\306\336\275\033DE=\354\225@\275+e\307\273\340\323o;\360n\027=\310@\326\274\207\320\304<\310qz\274\224\330a<\252\211\022\274\255\276`=d\321\361=\031\221\005\275\337\200\241\272I\034\212<\325v\212\273N\331\236\274&M\364\273\242\353\227\273s\257\r<\242\243^=\256\031F=\377\270\225\274\213TR\275\277\351;<e\0074\2738\n-\274\024\242\231\274\2242\220\275\204\376\255<\256\306\035\275&\256&\275)^\000\274\243\373B=xE7=\356\034\021<\315a\265\272\255.\007=\313\340\235=E\337t\274*\000\372<\026\277s\273\2223\025\273P\364\004\273k\017y\274\257\344\205\274\"T\023<X\255\275\274R\335\025<\013\177\001<\316\262\000=\'\2602\275\242%\302:b\257\216\274\326\225\304<\370\000{<!1s\275\326w\253<JW\241<\375\311\231\274\313\301\033=\225 (=x\000\214\2730C\t=<\ni=Sav;C\034\305<\243#\010\2754EP<\277{\346<1R\t\276K\252$=\221\201\321<\307\270\233<\312\021m\274\0336\365<\344\327F\275P\334\351=\337\023 \274UG\220=5b\220<\351\016\321;`\005\032\275\n\267\275<\353\252\322\274\214K\025<[v\n\275\372\002L\2757G><>\314-\275?2\022\274$\333w=\030e\260\275>s\004\275\326\304.<\372\362q\275\344\334\354<\375ei\274\351\312p=\365\303\215\2746\337\221<\217#S\275(\023\301=\326\225\n\274\222\264\037\275w\314\330\273T\005\221=\270m.=N\201\243\275Z\360\277\274\251\200\343<\340\273\355\272\223\241&\275K\027\240\274\204\0345\274\264\t\000\275\035\014\214<\302\3103=\373\321\204=\216\237\021\275#8\317\274\200\022\254=\002\327[=R\303\034\274\3632\320=YvH=M\r\177\274TQ\265;85o\2732\305\215<0p\202\275\241B\204\275 \343\365<\250&\230<\372(\014\276{\301\234=\331l\004<\267H8=ywp<4\231\354<\030f\022\275\237t\263=b\362\301<_\364L=\254C\344<\027X\366<3!\276\273\023l \274\002\325m\275\034}\035=\233\024\226\274\304!\231\275\357\006\230<\037\275\024\275lP3<W\343\270=9\315\347\274\276g\"=e`r\275\316\203v<\257s\221=\355\202\370\274:O\307\274S\224\313<\357\372\330;\312\215\r\274\366Z\205\275\303\300\274\275\244-\210\275\331*\231<\317?N\275\307\3247\273\335\360y\274\001aK=6D\301<\341\252\354\274\234\236\250\274\353\014\316=z8\224\275\344s\032=\220\034m=y\204\275\274\265\313t<\213\032J;1\021\222\275\273A\265<\251\177\273\2744\030\217\275\025\372\215\274\201\213K;\260Q\311\272:d\260\274~\370\202=\007\350\261<\376WR\274\001\'I<\337$\275<j\304\212<\254;\022=\2170\350\273\243\010X<\363\3563\275\237\334X<]\n\251\2746A\331\2734\371\014<v3\260<\221U\343<?u\310<\274\003\300\274]!E<b\014\270\274:P#<xS\275;\233\021\271<\023\264\010:\241h\2369]]\002<\210\207\203\275\360\240\013\274\2708\333<\360_\260<\2433\232\274U\263-=\025R\273\274\\\343`=-T\032\275c\340=\275A{\361<\333\322J<^\352\001\275L\231\306;O\317\226<\320\321$=\277`,=Q]\317\274\257\216a\274X\270\234\272\024\026\247\274Ph\277<\177\367F\274\2200\206\274*x\360;7g\033>\336I\020\275&&b=\234\301\317<\227\213@\275c\016x\273\201\036n\274\023\023e\275\232\016q=\r\216F\275E\021\305<\'\026E\274G\216\231<u6\002\274\327:{<\013<7\274J\361\247;\270e\006;_\203\330<\2776^=\310\277\024\275\267Z.=\002!\201\274\276\276\272\274\221\020\014=\r\036i<\267\3105\275K\035X\274\'\3709\275\224\022f<\211\210\007\275m\313<=\014\231N=\r\217Z:\311\306\234<\217\360\005=o}~\273\004\3165\274x\203\231<@;]\275\337\232\365\274\0050\031\275\265\251u\275iX\017\274\240\320\314\275Y4\024=t\371\277\274G\331Z<5\341\213\274\005J\327<\r\"4=(\347\327:\316\\T;\003\221\003=\354\352\006\275w\250@<!\025\007>\224~S\275K\326`\275\'\333\201=\t\230\266\274\255\035\027\275\346\300(=\013\307\025\275)\021\332;\037\003\326=7@r\274\000\203d\273\247e\237\275\301\216\301<\270\\\024==\226%=NC\314=\031\020\227\275\212P\225<?m\003\272Z\301\r\275Y\247\032=\033\273\257<\365\264A=\267\235\\<\203\017\334\273\036I\321;\250\030\010=\251\276\203<\236<\337\273\351S\260;E\341\005<\273\223\320:\014Y\322\274\337\271\341\273\374\277\212<<OK\275\261\247\224\274`\217\001\275\276\300~<\223\326(\2742\177\202<\365\246I;&|\213<w\020J<\221\001\312\274+\000\010=\221\224b\274\377_\n<K\326!>\307\n\240=\023Z\014\275\354|i=\374\307X\275\222\257B=\253e\240\275\340\335\275\274;\363\032=::\241<so9\276L\310\247=\267\370x\273\307p\270=c\355>\275\237{n\2740\202\300\274o\034(>\237\336\005\275)\032\261<U\204W=\351\341\272=%\235\235\275b\275V\275d\004\243\275\231\343S:\022\310\274\2741\024W\275g\032\022\275\357\317H\275D\021]=\337\n\r>\276Z\267<\372\265\373\274\232c\000\275\352\345\257\274\032\'\024=\014?\177\274\376\031\337\274\202,C\274\260\240\333<\344\216\345\271\251\326I\275\203\3020;\265\212\350<\257\310\034\275x\332\377<2\334\200<@\324S\274\250\200\235<\241\215\316;\376\255\003<\255\331\315;@\247\243\272^Z\001=\334\207\"<\213\372\243\274\005_\212<D\373\014\274\355\271\003<\003H);\245o\014\275!p\017\275\222\202}\273J2\233\275\260_\322=\026v\304\274U\336\024\274.\rl=\253\205\352;\264?c=\325V\324<\217\332\026\275\267n^<\207!\234=\027\311+<\200\253\371<V\354T\275t\253\314\274q\232\264\274\340.)\275B\271\2329\252\025\256\273\354 \035\275\364\240\253\274Tr/\275h0\215<!\340\305\274\031S\\<\022\000A<\246)\241<\020\243\357<\0316\323\274\036\351\262<}\216c\275\352\352\237:\013\004!=\230\372\024=\023\035\234\274<b\016<K6t<3^)<\nAN\275N>?=BJ4\274=@U=e\n\014\275\371\027\367\272\262\373\377\274t\245\204<\361\021\354<\320/\247<J\240<<\036\255\r\275\375\367\275\274\336\345H:\230\216\232\273\233M~<`\257><\366\215f<\\1\326\274H\335?<\036\252\031=NsQ\274\254\243F\275<\241,\275\005}\016\275\3753\240\274\033H\r\272\242\231\221<\306\301\306<x:\236\273\002\206\034;1\225\220\273\364^/\275E\021\266<\324\r\357<\'\2128=S\341@\275\252X\236\274\r\315*\275\260I\213<(\215=<s\277@<\036\367\036\273\216\006\000\275O\204\312\273\257\227)\275.\270\277\274\250\003-\275\300\210\210\273\223\026\014=x\014P=\317f.\273\274\337#=\2413Z<\277\275\235\274\260\217\027<%\006,\274\262B\317\273\203\003\334\2757\221\235=\334\304\177\273\020\240\231<\216\351)\275>\3226=w\377\236=6\377\311;\215\035$=3\205\263=\007\225\006>/\373\017=\240P\020=\246\235\030=\322\311S\275\270.\244\2747$\273\275\274&\371;\323Ji=\2648T\275\344\t\335\274E\314\225=H\324\016<L)\212\275\375b\222\275\014\020\235=\241S\316\273\244\024\301\274F\3158\275\337\030\261=\366\t\212= \022\217\274\3434\376\274\3526\014=Z\205\271\274\367k\207<1\213B=\361\014\206\273l\317@<\237\256\310<LD\315<\020\370Q<9C\374\274J\361\234\275\355\266\027\275\351\256\334<\262\355\255\274C7\217:\257\330g\273f\036I=\233\355\213\274\254\360\327\274\000s\266<\237\267v=\2573\016\275\300\337\004=3\023\006=\020C^<\336\220\215<|o\334<\'\314u\274\2413\246;\013\017\013\274\341\365P\275\035b\323<\362\360\362\274v\356\353=\236\236W=\373,0\275\307\036\306\274\027\310h<Z\277j=\2468j=\re#=\035At<V\251\256=\377\321\353\275\0031u=\344(\375\274\037*\234\274\372\271\031\275n_\304:\355[T\274\177\301)\275\030{\364\274Q\346\250\274V9\327;@\364\241<F\rC\2751\017\353<f)\317=\364!\343;3\032[\275\237\322\200=,\373\210<\023\025\353<\307\005\215<\350\373\022\275\307\206\264\274\307%];\217\302\212<\335\206\r\275F[\027<\236\030\035=\323\251\311\275m\347\031>k\275\302<:\255\247<_\340\306\274\2028\336<\356\223\200<N6\221\272\2557\223;\350\277\027;5e3=\005\"\227=\307\024\206\274+\322\274<\213J\021=\010\241O\275\302,8<;\355\364<\330\253-:\271\240\177\275\3017\321<#Z\262<\030\231\333<\210\253\232<m\340\024=\210N\363\273p@\n\275\277\264v=#>d<\204\035 =\\\037\325\274\rE\362\273\312\026\003;\251\025\306;\225(i\273T\022O=T\021\272=\254~V\275>!\r\275\304\344A=;\375]:\201y\306<7\032\276\274E\014Y\274\356\0234=\013\206\214\275\0077f\271m\036\257:f\t\214\275a:\360\2733\3717\275\362\035\216\272\362Ls\275\024\311K:\3020\203;\33556\274y\271n\27455\210=\017|\001\273\333\326\263=\314\373\024=\232\216\201:~\372\303:\333eL<\217\237\255;\232\004\224\274\240K\226<\356\356\346\273\006\'r\274)\010\304:e\321\3548\220\214x\273\273J\034<\203\216\200;\370\271~=$\341\325<\340:\325\274\260\t\203\274\275Q\307\273\210\237\215=\271$\325\274\367\002\005=\016\267\327<\032Rl\275\240o\t<wNp<\032\331\034\275\322\3009;Hv\036\274\177\222\353\273<;\017\275\2325\275\274\010!\201<\347\265\236<\273\250\330\273)\323\3469\300$Q\275\3174\202;7\227\351\274=\213\025=y\005\261<\023m\256\274j21=\217+\030=\257\341\311<#z\216\274V\240\362\274q\2563<L\267\220<\213lP\275\267\324\'=\036\327#\274\323\0043<T5\353\274U9\210\2748\215K<\345\372\316\274\200\370\207:\024\271\370\272\232\021\034=\027\365\221=\214uP\275qg\217\274\002\206\020\275\206\252\220;<\243\217;\246J.=\036;\367\273QW\001\275\353\350\246\273\273\250u<\306v\214\275\025Q\202=\340x\275\274\000?\247<!#D\274\257`\372\273\350\000\377\275~\245)<\367\364\356<\344\331\271\272}\324\016=u\370l\275\000\260\235<\361]\212=\231\306J<=\245\032=\3260@=e\t\020:\336\177\213\275)s\373<t\342\324\274\033\375\267<\342\032e\274M\017\277\274\331q\325\274O!\346<\037o\326;\274\240{<`\347+\275\014{?=)\3617;\353\220)\275\316\244}<R}\025\274G\243\272\272x/\204=\220 \013=K)\373\2742S\037=NNE\275#A\244=M\022>;j\311\017\275f{\322<-p\276\273\363U\225\274*\365\010=\252\343O\274\316\036\370\272\214\330`\274\301\226q\274\237%.\273\344\370\276<\314\337\346\273n\001\222\274\252\362\221:J\353$\273\030o\000==\\ <\014G\014<\212UN\274\244>\257\274K^u<-\026\007\275\243\323\241\274n?\307;\307\316\356;\332\330B<\344\036\214=\206\352\025\274\364\302\177\275d:,<\354\215\214;2\244+<\333v\377:)\363+\275\215#\206\273\017\312P=\2653\020\274\222\201\213\274k\243P\2756[\027\274\200\254\377;HYD\275\214y\237<\327]\214\274\302\3032\274\237Z\215\274\251\016\241:W}D;`\274\257\274\364\326\310:;,\361<\037\360\017\275\204\020\r=;\022\"<\212\226\010=]b\024\275\347\217\005<\344\365\215;!\347\207\274\013\037R=>\030l<\357l^<\321xL<\022[\271;/\346\032\275\006W\035\273\263k\010\275\205\323\241;\026\212\003=7 c\274c\334/<W\315&\275R]*=D\357\257\274s\367\326\273\236@\247<%:d<q\366\200\274Wwu<3Bp<\036\020\353<\330i\240<\315)\r=\321\350\022\274\207\307\365<9K\312\273\036\006\362<&0\343<$<\365\274\263\302n<NN\002\275\201\263\213;\314\377\243<\354\327\275\272jn1=eVs\273\036\035?\275\276s\220<a\221\010=\237\017\331\274\000J<\274\005\323\274\273\2258\335\274\353\307\"\275\2630\333\273\357r\220\275l$\024\275\225!\275\274\031\241r=\321\021\"\275\037\203+\273\320X\013\275\340\213\035=\277\374\275<Q\314s<\034\010\"=j\'M\275\240eW\275\342\003\273<}#\233<\207A\002<-\270\273\275\025\374N=\376Q\214\274$\210 ==-\204\274\327\235\000;\231\344\210=\005\206\220;\312\336\242\274\346\227\233<\006\221\220\273\222\315\263\274\377\253\026=l\351\314<W(\242=u\345\362\274\022\014\231\273}\374\006\275O\211\n=\233\345E\275b#\257;\27433<\245\347\263<g+]\274\215\202\023\274z/\372\272\232\365M;\322(\214\274\277f\222;\026\022\330\273\no\177\274\345\3037;\311\271\204=~nI=27\377\272\304\334\335\274:\220\232<|\314\375\272\220OS;$k\016\275\276\351\004=\343\315\004\275\2523<<\247\344R<\325#\316\273\344\221F\274\020\375\204\274-\273\256<\021\372\344\274@\220\240<\232\026\334<\315\356\034<!R\342<G\232G=\251*==\367\'\244;\244\342\246<7G\022=\214\202\000\275\261\327\016\275\330S\014=\336V0\275H\221\251\274\360{\232\274\021\331\006=5\025\331\274\202\261\231;5\033\304\2749\240\360;\250\346\032\274\367W\216<cG\206<=\307\211\274\242\373\336\274M\036\233\273UB^<\314C\237=\362\342\277\273N+\245<\314\026\374\274H\273\314\274[\275\364;\213\345\254\275\237\344\302\274\257\031\203\274\245\237<\275\243\000\211\275\372\313\030=:\353\036=\0007C<p\231\371\273\"n\001=\256\373\327=m\037\250<v:\221\274\306@K<\263J@\274\225\031);\013\016\030\275Q\322\363\273\245Y\016=\037\366\312\274\233\260\345<\333\037\216=m\354\263\275\211\316F\275\215\t[\274e\362\274\273$\n\336;=\302(=\373 \205=\033\306\234<\237\361\014=\274\232\351\273\226[\233<\371\236\342\273J\316k\274\000o\031\2751[\t=\310\355\021\275\005\237l\274\023\362\013<\276@\334<\341I+\274\t\300\016=[\005N\274#3\231<\230h\241\274\n\2434=\325\314\267=Qa\304\274R\266\007\274+\277X<w\016\364\272U\'&=\276L\207\274\253\301\253;\326\031_:Xq^=q\337@=\215o\341\274wR\315\274\311{\r\2745\207\253<mg\251\274\021\234\014\275x@\226\275S\204\272\273\200\002\014\275\363\022\272\274\366{\230\274\231\027\222=P\215\325<\350d\032<\255\337\276<\353\3702<\366\354\235=\275H\306\2749\243\371;\267\021\354\273\275\230\341\273b\376\001\274\312\253\026\271\236\361\253\274b\035\343<\277D\316\274I\201,=\302\025\033\274^\035\356:\2347\374\274\305r\306\272d\265S\274\274Y+:\212J\n<\204\2406;,\360V<u\336\364<o\303\240\274\220\010a=\240\232\341<M\272\370\273\245\310\036=7\341\000=\3179]<\2224E=}\334\370\274\013\244\253:\232\244\036\274\203!\264\275\375\000\034=M\306\254<\340\317Q<\254\036\301\272\373-S;6H_\275\014\004\232=\261\1770\274\237\224\203=\352H\'=Q\t\204\273\223\335~\274|\003\177<\247YW\274\255-\356<\336\262D\275v\330\007\275\005\037\030=\315M9\274\330\211\244\27480\035=4\271\225\275\206\200\365\274\305\276\034\274\006\203\017\275\353\222\252<\035{2\274z\244\300<\374\237\t\275x\223\237<\220\213\316\274K\336\241=\254\270\344\274\000t:\2752\362\225\271\t\206\r=\344D\037=c\265D\2755\364\234\274\365\330\215<\n\263\005\274\232+a\274\007\247\310\274\351\002I9A\274\336\274#\222!<\203\243\370<\303\202\013=\220\026\322\274v\007\226\274sU\253=\340\026\353<\017k\242\274\250>\342=4\274\035=\r\025\311\274u\016\271<0\316z;\251\273\001<\305\270\352\274\202Z^\275K\313\204<\316\277\207<\177\253\010\276\354\307\253=\257\365\n\2739Q\t=\343]\251<\272\357\004<\254\021\271\274c\210j=\000\350B;\226r\213=\271\236o=\020o\271<v1<\273\345\255\253\274\340u$\275\242\020\342</\255\010\275%\037M\275K\331\'=7X\'\274\274=\337\2731\337\215=h\013\333\274\000\2261<61=\275#dk\274\3501\017=\322\014\266<\270\230\342\274\245q;\274\276\017\353<A\3046\273)i\354\274\"\374\254\275\225r\002\275\335\0068=\322\"n\275T\274\037;\005\016$\273\201\333\035=\302$N8D\210\312\273\336\0275=\315\302\231=H\243\254\275\275\3376=\005Y\231=\374L\211\274u\371y=scD<\\\315\275=,\205&\274.W\320\274\326\317\232\275i\371\230\274\231Y\301\273\337\177\236;\205A?\274%\020S=?\313\306<n\304\315\273y.\020=G\317\260<Fsk<\204\363\301<\010\322\033;\016\341\'<\336BE\275h\222\021\273\204\002\202\274\364f\241\273`\235\007=\234\203\207<C\273\375<c\264\377<z\205\217\274\343O\364\273\320U\024\275w\336\326:\340\304\246\273kXk\272\311/\214\273`\274\031\274\333\377\265;\325\210\377\274IP\004;\347\270\232<s\243\321<E\317\3369\217}\374<\020\177\335\274\312\333\022=\"\346\351\273\256D.\275\205\350\313<h\242t<\204i\363\274\03634<i~\234<,\026\372<%\325\344<v\312\212\272\364\265u\273\3654\241\273\016\022%\275\253^\326<\244$\302<\275\214\306\273\352\376\232\273\273h\310=\207\306\357\274\276\312P=F\355\246<\326\317\363\274\356\353\310<\320\343\372\274\202fQ\275M\\6=j\361\030\275\227\r@<4\252\r\274\317j)\27440\r<s\"\233;p;$:H\314G;e\r\247;r\364[<\320\273w=4\334 \275\367\324\036<\017\022\r\274\242\332\330\274\017|\004=\347P\372<\315~\276\274\255\375B\274\r5\\\275\366 \213=\021k\357\274\225s\211<%\333\217<M$\005\274x\373\334<\316%\262<\355\033s<Z\316c=\212%$\274OF\377\274\341\006\273\274\220\257\001\275\261Bo\275\242q~\274\220G\246\274\262T\214<A\006H\274M\236b\274\354_\343\273\206\243\026=\317\025c\273\255\324\340;\017F\305\273\004\211\243<QV\256\274\362we<h\002\304=\227$F\275\220\177<\275\236H\020=\353\251\307\274j\261p\271\215\023\344;e\224x\275d\r:<o\363\342=v\356\002\274\367\251\275\273_\354\206\275\211\211\317\274U\2455=a\300)=A\235\201=A\305\250\275\033\310\237<\335\323\274\273\376\321\254\274}\002\272<D`\363<\200k\350<\201\337a\273\276\351\215<\365\3171\274\004}3=\003\241\202;\312\322#\275\244\357\253<\252\237-\274\216\346\033;\244#\251\2740(D\2731\346\312<\271\024P\275&\262E<\035N\007\275\337\254\320\272\225\2329\274\206j2\274\261v\351\2723\206\253<e\214\214<Y\034\274\2742W\316<\033\014\267\273\014\034\270\273O\212\">P\274+=\226\376*\275\2009\317<B(;\275\030[U=K\277\353\274\363\341\002\275\276*\247<\332\2519<\017\377\t\276\270\211\235=\225\013\203\274\262\313\222=\202\217\247\274\211.\302\274\034\346\342\274Q\017\001>\234\027\t\275/\031\254<\276\242\003>\230\365F=\263=\033\275\350\234t\274X\316\222\275\007\200%=\211I7\275\262\243\354\274\010\353\314=+\3250\275\203\016\"\274^t\343=\030\001\252<!\362\036\274a\237\303\274Il];\203zu<\217|\327\274\264\\,\274=\274L\273\212A\314<&\207L\273\355\300a\275\375\013\257<\000J\212<\250\342\260\274m\273`<G\377\t\273\036\264\321\273ECg<\231\240P<?{P<Wl\356;\252b\020<;\346t<P\225\034\273\367\261\341;\2508\206<\350t\325\274#\255\373:\227\246\257\273\261f\210\274l\016\332\274\377V\203< \234o\275>\213\037=\221\275\014\275\274\367d\273\214\346\313<b\026M=\200\324`=\362\240%=\201v-\274\036hL<qT\242=\010v\210;\251$\373;\006\213j\275\370\350\351\274}`q\274\026\344\356\274U\017\210\273\332\r\342<\304\323\017\275\366\216q<\350\216|\275\000\234G\274\002?\310\274\321Lb<*\273\205\274\037\256\364;R\234\037=FI\364<\343Lr<\371\313\357\274C\252\210\274\211\022\350<\262\016\017=*f\020;\024\235\033=V\225\266<\233\343 <N\354i;p6\014=O4<\274&\371\010=WH\267\274\022\313\354;\016a0\275\241T\203<1s\313<\202}t90\257T<\367\307\361\274RQ\266\274{\221\014;\350\215`<)\307\023:\326\032\332\274]\321\260;\373\240H\274\256\035\255<\271\223\224;\013\230\214\272\324\035\350:f\356\013\2752\343\024\275\033s\200<H)T\274\014\364<<\237\244\247<\220\3560\274#\377y<\014\0070;\037\352\n\275\227\316s<\226Q\017=\261\3167=\263\271\253\274)\334\220\273\360*]\275\222\177\001=\256O\205:\'C%<\034\320\204;\352\221%\275\232\302\333\273\304\303\n\275\202\037\017\274\244\272\020\275\024\0200\275l\022\032=\233\212\373< \033\210<\036\272,=\366I_<\3415\010=xx\032\274\247\004\240\274\202\"2<,\246\266\275\203\220&=\373F\337\274-\026\222\274\253bj\275\006\307q=\351\257\236<\277\246\256\274\023\001\027=\342\217\264=\352\021\326=\031\260\214<\313\2769;\234J\367<^\323H\2753\264z;\245\330\205\275 8\227;*<w=\025K9\275/(\326<\376\"\343<7\237f\275^gc\275\272\360s\275\307\363\326<<\263Q8f\026);\352w4=\241\222\032=Rj\336=\316\273\003\275\234\264\252:\n5\307:\3242\026<<\326\325\274\211\373b;\253^\017<!\357\326\273@\014b\274\346\016\221\273@\300\337;)\334,\274lP\222\275\376\233\216\2746+G=\027\010\277\274h\203\271\272<`\"<*\276\300;\355\t\223\274\253}*\274$l\203=\265\3158=\224\241.\275\240\321\257<\212\313\243=\266\253\251\2732\267#=\323\212\334;m\252\315=\216R\001\275\034\021l\273l\250D\275\212\333\273\271\200\tx<z\366V=nmx<b\2401;[~~\273\024\210\312<\202\nh\274\346\223,=3H\226;\277\312Z<\270\234\240=\273\372\376\275\3108+=T\312C\275\360z\300\274CD\031\275m\225\r=\201\365\320\274\004\324\201\273\"\211\205<\2522\370<\3578\216<[?2<\350[\214\275zz\213<>1}=P\323\306<\316\231<=\341\t\231=y\367\006=\243\026\037<O\274!<\017\360k\275n\235\007\275\036^\210<\000\336<\274K\371\277\273\317\250\376<jl\032=\242\372\252\275\263=$=\321\301\245<\265\345\235\273`D\013\274\230\213\300<\313R\006<wk\325\2743\033#\273\266\273\315;j\253\303<\030\222\223=g\272+\274\367v\243;\224\3631=\364\270\001\275\253\343\030<\357\2545=X\307C\273|t*\275x\233w\274y\235h<Ho|;r+\252\274a\215\010=+\254\263<\265\031G\275\240D&=\n?\224\272\230\\\235<\246\354:\274\322\265+;\314\273\027<\221KA<\354\203\342\273\005B\260=|\371\003=\263\005\\\274\224\311<\275\000\271\230<\317\316H<\026q\023<\307\0052\274\\\302\334\274\310\000\204=\244O\210\274\342\026B\272\323\304\314;\206\001)\275\377\312\0138\206;&\275\314\223B;\344\304/=V\276\202;\003\016\321<\003\2758<t\335\030\275\317\3709=0_O\274\207OS=\303\017\227;.\342?<\231U\311<\233\233\r;3\023\214\273HV\253\273r\001R<46\224:\350\304\332\272\255l\372\272\"\360\302\272\341Di<\010\365\201<#\262Q<\261}\037=%\272\235<\301\017\262\274\335\010\013\274A\2057<M\336\346<0\010*\274\240\240t;\202v\021=\014\351\032\275y\371R<l\006\274<J\344\027\275\260\262\360\274\274\022\2539r\\K\274\'\272\025\275\330\365\276\274\354\236h;!\177~<t\324\215\274\207\276\210<I0\261\274\370\002\025<Pd\320\274L\026p=\030\231^\274\333=\241\271\205k\'=\304\030\306;\343#\007=\323\315\247\274\342\277z\274\345\021\323\273\366\354\374<OH\352\274_\2775=$\343)<\362M`<G\037\232\273\000\321\372\274\367l\243;0\211\324\272i\034\t=/\205H;K\371d=\346\372g=\ty\004\275\314n#\273\317\361\006=\335\030\013<\204\361\314<_\255\252<\342n\230\267\232o\301\274!\371\223\274\271L\262<\314gV\275hFL=\006:K\274\307L\253<te\027\275\367\021\220\273\376\211\320\275n\300\261<\275\365\177;\365\367u<\006\020P=O\227\372\274\2541\276<\203\224l=\273\354\312<(2\312<|\335\372\272\347\025l\275\270QB\274\350Wj\274\362$\251\273\027,\253<\342\210\352\273\264k\243\274\207?\r<\332\277K;!\270\234<\342ez<\277\300\273\274\217\014\367<-\201J9\341\265\014\275b\222;<\252^\202\272\021\335\242\274\300\250r=\203\264p<\276\r\005\2757\265\212<m\307\370\2746+\234=*\265\237;\024\341l\274\017s,=\213\330X\274R\254\033\274sa_=2\032\006\275\357\271\020\274.\237\004\274\032\257y;\210\006X\273\337V\013=\366|\323\274sQ\221\274\025\177\246\272n\231\\=C\202\311<\345|\215\273q\002\235<o\177A\274\"b\361\274>/\270\274\334\032G<\271\246^\274\377w4\274R\235\r<}u\235\272z\272\332<A\305\2149\251\021\010\275\337\210\026\273SQ\036;\026CT<\363k\305\273\032ge\275L\212\024<\227g\376<0e\213\273\312\203/\274\017\302\003\275\235\255q\275\013\355\256<0\023\036\275\216\322~\274\2614\331\2748eQ<9g\263\274\212x\0077\022\273)<\252\177\301\272\032\200\n=\'\242N<:\366A\275\260\245\034<\366\036\352\274]\267\016=k\212\377\274\377/\002\274\325\316\205\274\233\006\305\274\330\326\037=rS\031=\222\031\237<r\223\200<\325\274b;\332\260\000\2758\030\223\274\206n\177\274\212F\003\275\035i\006<\214di\274\213\225\321<O\344\026\275\250\202\007=#0\020\275W\233V\274y\273\304<>\356\200\274\2174\327\274?\222\252<\322L\226\274\255\245\357<\354\370\001==\314\221<\210d\221\273\362\335\301:\275\"D<c\203\307<\331\345\007=\306\r\035\275\201\267e;\024\013&\275\325\007\354;OC\307<\212\271\242;Tr\262<0\357\342<6_\007\275\3372\211;tl\373\272\326\237\305\2743\234\023\274\234t\031\274\253T\027\274\271\273\034\275\367$\023\274Pud\275\032HH\275\222O\235\2758E\301<\367m\247\274\273\033\231\275\310(\016\275DH\211<\177\362\200<!\223\220<@^\021=0\242\374\2746\225\'\274\203\352\002=6\304\304\274\217\367\017\271\254\326>\275\021\274K=\320`\254\273\206\034\206\274\322\227\367\274\363\304\372;\003\270S=\n\312\330\273\023\212\201\274\376\252`=0\373\026=%\215Y<!`\344:M\226z:\361Q\312=\301)\231\274W\327M\274\201D\272\273$\316\361<\250\323\321\274\327k\334\273\234xA=\016\177\337<^\010\327\274D\215V<\313\203\251\273z^\177\274\236\364\002\274\216\374&\274\207\235l\274\2330\273\274\350P\306\273t\250*=\316\023\261<\264UE;\004\277\200\274\210\224\372;2\311\\\272\226\305\002<y<@\275?6\002<\216\303\306\274G\t{;\3565%\274H\351A\274\3519\005<\\EI\275X\362\207<\277\002\273\274\005)B\274\322;\237<\1776\326;\206\244d=\351{F=X\2631<Cb\\:Q\227==\t\354\t=\260\302\035\275mR\306\274KxY=H\000\307\274\3463\317\274{KZ\274\333p(\272\217\316\261\273\213\0222;\370\224\375;\316j\206<\233O\333\272\321~\023< A\345<\275\212\035\275\223y}\275\312{x\272\3713\n<\315h\344<2D*;G\376\360<\255\262\200\274\027^\022\273^\214\036=\007\236u\275\032\243*\275\230*\253\274m\014\253\274\303\267\244\275\373]\244<\010MW\274\272h\023\274\252x\014;\204\010\267<b\321\204=+e\300<\237]\242\274\340S\365\272\"J\263\273\232\r\312\272\213\260\325\274\343S\236\2725\t\242<\345~\267\274@\326\017=&Q\210<\234\350\236\275\356c\017\275\212\261\3268\276\240\004\274\306{\311<}\306I=\325JI=\327;.\274Ub\r=\205*\271\273\373\242\033<)\026\021\274B\267\"\275\024\346/<[\037#=nh\254\274Z3v\271\202\304g;\016\317J;0\022^\273\316\221<=\343_0\274\031\306\342<\367\312\t\275R\317\001=\205T_=\r\211\211\274\017\2042\274\201\260\324:\207\013\014\274k+\224=::\270\274\253\240V\273\252\212\234;kp.=\0008\020=^\016\335\274\2062\363;B\301\004\275\035\304\337<]\346\030\275\353~\216\275 Pl\275Y\346\016\275\0162p\274*8\023<\'\227\367\274~Fj=G\247s\274\373\021\314\273\215\270\332<\247x@\274%3L=\007\006\347\274X\364)\274\337F/<\023m\"\274/L\257\274\325\r\377;\211\006\007\274\301\334\020=\300\225\017\275]\2778=\371L\212\274M\266\363\273;G\246\274 >]\274\007@\205\274t|(<\210\336\315;\361\345g<xg\206\274B\323\207<\240\316N<\335\026\252<$\302\204;\005\021\326\274\007\253\264:\376\255i<\361~W<U\357T=!\346\332\274\325\321\263\273\021\352\016\275\341\353\334\274W\235\202\273\000\252\234<\245G\254\273\247#\037;\002<\211\273Es^\275@\376d<\356-.\274\213XR=\024\303\252<0\224\221\274\230\344&\274g\336\225<\275=\250\273De%=\217\347;\275i\310\301\274mh#\272T\nS<\353m\336\274c\027\211\274\223lH\275\232_\032<p\356\211\274\016_\200\274/\350\002=?\324\t\275\331\204\n;/%>\275\220XH\274\355%2<\217\312b=\025\266\267\274\345\323\211\275\313\231\033:\327 \004:\376C\204<\204\253\273\274\017\245\326\274\362\2053\272d\013\273\2737\337\234\274\320L\356\274\266\245\n\273\034\307\354\274O.\333;M\344\224\272\316\'\313<XVv\273\017\2704\275\014\236j=@P\203\273\000\374\363\274\205\301\217=Vl\230<I\236!\275\334\202\332;\212\375\024;ml\230;hG\251<8\247 \275\264\205@<\261\367!<\355\014\271\275\344\032p=\214\000b<\221{\312;F-\214<+&=\273(\036\265\274\271\317\204\274u\000\237\274{\023i=v\004\201=\005\255\374\272\202\341\002<\025&\214\273\254\342(\275\\M*=\213\222U\274-\t\342\274\024\243\023=Ygh<\264\\\344\274pW\305\273\200\002z\273\253\0323<D\3771\275K\327\t\275\367\035q\273\342\225\344\273r\260\321\274\321c\013\275x{\272\273<M\332<B\304\020;5V\236\275={+\275\226\343\000=\376\031,\275\312\362?\274y\200\001\273\230X#;j\t!<m\200\026\273\303=\r=h>J=\224 x\275\232\205J=\262\222\274=\343\257\317\274\255\222\\=\024)\345;&\037\021>\t\332i\275\214?N\274Tq\020\275\361\316^\274\216\177\265\274g\226\023<\222\353\336\274\tg\260<\307\240\305<w\325\375;\307\226\014=2\232\235<\2702\335:9\024\252<V/\230;\373\032\212;\333\250B\275O\215r\274\302\021\233\274\3248\213;%I\025=d\310\352;\223\222 =\353\364\022=D\300-\273\310\t\030\275\302\357\r\275\3433\007\274Wn2<\305\272\227\2740hL<\025\270\204\274\204TZ<o\177\303;\314\343\t=\265\230\220<X\253\226<\231\006@<\337\364\025<\236V\375\274\'\210G;\332:\356<3v.\275\333]a<\205\243\t<\255\327\367\274\323\037\335;0\341\375<\223\275\216<\273\250\222<\037\235\324;\242?\274;\367\233.\274\177gT\275\272\332\245<\271\207\354<\033\342\240;r)\206\274\344\336\021=;\273\026\275\302\'\221<\310\037\207<\024\007[\273\203\344\003=\327G\356\274\307j\010\275X\002\306<\002\\8\273n\306o\274\277\033\342\273nB\250\274\300\034j<\261\2222\274\005>\251<Ux6\274m\205\313\273\271\375\322\273\353\360_=\364\323\315\274k)\034\273D\304\002:\022\234\004\275\n\365\375<\205\004\r=\030\351\373;\370\305\224\272\002\036s\275\240\277\214=b\014\320\274\355\332\225\274\244,9\274\310\255k\274\313q\320<Oz\263<\326\\\361<d\274\240=W<\220\2748n\344\274ZN\242\272\236\255\312\2741Jn\275+\252\346\274\3475O=zOu<\244\335e\274\274\276\003\275\n\275\014\274l?\021=@\251\'\275\0343\033=\r\356\250\273\207\234\260<\367\333\323\274\235wU<\237|\003=\203\017\037\275\017\031F\274\317\300\344\273\313\370\010\275\257N^<\032S\360\273\213\317~\275i\371\325<tQ\260=\0347t\274\032\325\210;\203x<\275\014\212\225\2757\025!=wx\320<\007\023<<\335\327\216\275R+\265;\247\246\205\274\331\361=\274@j\272<o\346 =\002\353M\274\363L\346\274\004[\026<\311?\272\274\350\254\334<\312h\017\274\370\tF\275\231\265\021=\344\263\036\274A\202f<\253\"\030\274lm\370:Pu\252<\2226\211\275\203q\374<n\276/\275o\357L\273\2044$\274\244\352\232\274\254*\030\274]\215;<\240\300\305<\016\214\235\274\3341\221\274z$]\274\376\302u<c\277\325=v[\304;\371F>\275\215v\010\275\322\032T\275\037\371\334<\376&\341<\313\241\225\274GwP\274\276\361\205\274%&\303\275\302\373R=\344\177\241\274tj\025=K\031H<\013-\261\274\204\324\250\274+\037m<\307\264\276\274\031\"\216:\271\244\002>\016\344\r<\323<\234;\266\r\247;\260\177V\275\237\032\252=\320w\004\275Z8s\273J\203\263=\001\377+;\037\352!\275z\312#=O\005P;\264\250\262\272\353\3334\274\361{\350:)\241\026\274\270\'\322\274\022\366\244<\242h\031\272\020\000\337<d\3372\274\253\313+\275\227\206\261<r\353M<17\307\274C\014A;I\356*\274\037n\305\273\217\275\237;\334\365\353<\250]\272<\000\nz\273cvD<uz\215;\203\3642<\033T=<\235\n\377;(C\017\275\037\027=\273\306\371\235\274m\223\205;\331\304\001\275\307\204\354<\177\254w\275\312\226L\274Y35\275\37330;.\263\215\274\211\314g=\277\016\214< \377\025=k\032\023<S\005\370\273\272\234g=\'|\200:\331\224\007\274M\270d\275T(\344\274\205\r\027\275k\372\260\273-\370T\274\212P\010=\375\371\004\275\304Cf<\205\344_\275\266\211/\275\363P\212\274\262\263_<\352_\036\274\3612\032=\007\r\324<\017\307\314;\377F\221\272\326\245\004\275\3407X\274\343\351\224<\027\251\232<0\223\226<\375\251\350<\004.\226;[\354\244<\177\004d=I\333*<_i\306\274\306M%<\026$0\274\202g\276<\320\031-\275*\217\333\2710\005z<\225\"3\274\332\021c<6\266\260\274\365\020\273\272\\\\G<y\305f<\2569\273\273u\324\367\274\220:|;\025\006v\274C2\261<S\320\000\275\360\336\362;\016\261\201=\312\316\030\275-O%\275\336\302\016=\257\206^\274\320p*\274Wb\203\273\213\220 \275\323\2363<\2460<<\241\275\260\274\360\223\262\274\020\rs<\354l\324;\003\310\244\274C\006)<\2629\036\275\007Fq<o\245\n<\327\356T<\375\223\\<\260\252\230\274\345_\231\2748*\346\274\262&\374<\310\273\374\274n\002\227\274\346\236\261<\355*\005=\221\007\263;\236z\252<\205\226S<\034ij=\253\313?\275\277\364\311\274\316\324/<4\311e\275\030\201\264;\340\354\264\274\023%\226\274?\235v\275\226\3675=\346v$\275\310\317I\275L\354\363<\032x\212=|\370F=\312\0379<\242\312\330\274\267,D=\304d\356\274\010w\224\272q\024\r\275\376$\262\2723\315H=\204q\005\275\323\3002=\324\362\247\274\325\036\233\275c\1770\275\317\336\274\274C\213\245\273;I\2367%k==\232\377\'=\216\356\212\274\245\255\201=\206z\200\274\2735\231\273\236\266\252:m1W\273A\214\020\2754\000\355\273\2250\336\272r-\210\274Lj\222\274s\342[\274~\r\267;\377\tO\274~L\'\275\264\344y:\231 /=\3238\302\274\332\304\275;6k\003<4\244\333;\2217\010\275\270\245\024<n\300#=\\M\002=\337\000\032\275\006i-=B\310\260=\303\256\216\274\270\r\327<!~J\272Fd\217=MZ\321\274X\016\261;\215\2533\275;\3731\275`\376)=\377\242\214<~\317E\275D\007I;5\240\273\274\270\356\371\274\010\014\227\275\345\230\231;\225E\n\274]\265\204:\275\236\026=` \331\275s\233\036=O\326\303\274_m\206<\260\272\'\274\261\n\211<\232~\355\274\214\273\022<\367\017\035=\024s\217\272\364b\030<\007\233\027<^-3\275\332k\244;\273\322\033<\243\223D=\340\253\002=\250\002\021<\024\322\211<<L,<\356\001\014:\tc\216\275=\275A\275X\342\225<Y\235=\275E\031\034\273\017\257I=\224R\\<\360\235\201\275\266NT\275\035\252T=\201^\371\2743\335\231\273)\372\203<5\2304\274\227\325J\275\3209(\274\036\255L<\230\347O<\315\351;=S\2227< \206\256\274.\202+=\335s\n\275\323\225d\274R\210\225=pmv\272\020\355U\274|x\344\274\220\207\254<sI\026\274\266\007\216\275\031\357D<\251\304\206\272o\362z\275\244\227\301;z6^\275\321S\312<\310\234\360<\230\034\270<\314g\253<k\257\371\273z\036\264\2741|n=\365\345\240<\362\207\327\274\242\345\000\275S_\202<C_\226<\344\223$\275t\330\262;O\303\202\274\277o@=\277hK<C\222\327\273\370\206\023=F\307i\275\242\223\024<\215*\034<\n\000\374\273\316\000\211=\ng\253;\375^A\274\251\"\240\272c\2313\27505\225<\230\032\235\274\300p\264\273\253[Y\274\227\225[;\357\332\023=<\253\256\274\265H\251\272\222\230\021<\325TR;\323\032u<\376F\342<\331\220\321\274%\235\235\274\031l\005=\225\036\214<\361\277\330<\215\272\245:\264\223\347:\003G\013\274\313\005$\273\326\354T<\216\356\353\274D\306\t8]\217\235\274\311\245(=\315\023}\274\022/\333\273|v\262<>\367\316\274tJU\2759\372\\\274\327\311\024\275\233\364T\275\226\005\017\274\027\330\356\274$*\226<\210D\256:\315\331\303<\276\225?\273\314[w\274KS\365\274h\037\004=\313\202\323\273\300J\006\274\233\"\304<\363{\302\274r\312\266<\277\252\306\274\345I\2139\027g\246;\036\224\006=Y\347\203\274Nc\233<\260\246\235<\314\007\001\275{5\305;v}e;n\307K\274N\nr\274\334e\352<\233\224(\2751y\345<\224m\312<\252\004L\273\203E\203<\247\371\303=\273\302\034=In\001=]\335;\274q\347\274<\3262\315\274\272q\374\274\270\014n=n\211*\274\\\234\316<\032m\270\273\220\005\261<\365%5\275\005u\3007\303\345(\275\245\037\035=\322\241\202\274\374\234\255<h\204P=t\335\377\273:g\014<\010\025\212<\177g\002=n\316\217<\2157\013\275B\217?\275\320Gi<M\000`\275\234\n\233<|\254E;O\323l\273.Cq\274\312\221\271<Da!\274uH\310<\337\241\037=\202}\207\273n*\224<\312\203*\274/\231\311\272\303\225\255\272p\365\000<\304\240\326\274\\\275i=\314\352\013\274\267\023\373\274T\206\253\2732\226\'\274(>\224=\360z\036<\246\321\304;\35098=\036\233\241\273\275\024$\274\313\3245=\001\264#\2758-\377\273\014\223\244\274{\247\313<N`\274\274>l\253<\025X\355;O\304\241\274\210\\\000=\221/\200\275\225\352h<\336\375\252<?]\220\274\333\007\344\274\245\250\362\274\372w\331<d\365\314<\233Dh\274D,\265=\237\340@<?\035\260<\371e\361<$>O\272Q]\233\273\232^==.\021\316\274\'\246R\275;\365O8\325q\030\274\325\rN=\366\324\235<L5><0\273\327;\001\332\232\274\2372\007=\373\010\005\275U\007`\274\344\307P<\026@\243;VG;\273\240\\\024\275\037\262&\275\032\006\246\273LH\350<\363\374(\275\317E3\275K\035\264\274\350p\236<e\221\322<\245\343V;\3234]=\322\024a\275\222\315\'<-\020\017\275y0d=\246K\251\274\347\243r\274\224l\367;#\373\240\274.q{\273SG\233<\352\016A\275\270\235\325<\361\310X=\327\242@\275PCS=\2317\322;\312\2776<\32441<\265w^\275(\246\224=:.@=\274e\376<\027\316\215\275\330\343\226=o&,\273\204\203\"\275K\241k<\211g;\274\276#\302;\336\252\237<\3655);\321>9=JX\027\274\233}\211<\252\'A\275\003\337\034=\027\222\220\274\310\307\375<\356\371\001=\311\242\031\275\027*)\275A\373\332\272c\203b=e\315;\275\001 u;\254-0\275B\020\210\274\317>\250;\232\025z\274\266Q\312<\242\275\310;\\\310=;\034\220*= 7L<\230a\007=i)\310\274P\267m\275\212\275\306\273e\234\336<v=\374<\222\343 \275D\253\275\275\246;\017\275\nf\013\274PF\236\275\334#\200\275\232\356\340<\236D\t<N\366\362\274 \255\300\274\025\r\266:\274a\"\275?l\022<\204\3356\2739\266\025=\246n\250=\205\232\370;$\033&\275\255\013\221=\302=\026\275\251SJ<\342\024\262\273\211\251\272\274!\t\216;.\rl\275\224\313\230<9\321y\275\"!\003=J~\210\273\271X\202\275,\353\003\275\314\374U<\253~\341\274\244\t\002=\313\335\345<\326\237\234<\"\003\231<?\304\211=F\272\210<\024\003\365\273\021\240\361\274&\037\313;\343\005\366\274\232\275C\275<]\033\274\347\354\276\273\375\023\007;i\321\251\274O\212Z=\235\246z\274\354=\264;`]g\2744\225K<\265\347\322\274\244\372\324\273\333H7<\206*\\<\200\314q\275\327\301\317\274\243\3779\2753d!=\274E6\275\311W\247\274n~K\2747ci\274\345\t\204\273oT =\341\001Q<\373|\314<\353\000\014<\235\373[\273\256\227\206<3\345\264:\227\323\250<i\265\310\273z\3552\275\211\370$;y\n\016=\216\350\320<\312@\231\274E\004\261<~~\201=\244\030\234\273\230\254(\274\\&\337<vA\250\274n\215e\274\241\213\006\275E\362\200=T\367\303<\302\212\242\274\243\207\232\273x3\'\275\025hS=\317-\227\272\327j\034\275_f\030\275\211O:=\0039e<\t-/<\n\025\005\2752\2647\273\243\245\330\274\200\342\362\274g*\204\274\241%\214<\231\210d\274\214G\317\274\350\301\\=\272G\004=\372\0340\274o\233,\275\023\214\277=\213\026\271\274\263\364\2149.6@\275\3441\256=\207\311\"\275\t+\255=<\315\001=D)\304<\226 \214=\273R\340\275\266\343\360<5\025~\274\202\337r\274dO\303;k\027&=\3539\204\275\226\374\r<B(\254;\372J4\275\261\022\266;\2327M=\244s4</\252\305\274\3576\216\275\300z\221\274\340\000\332\271j\265\271\275\223r\215<\322}<\275H.E;\200S\207<\016n\217<\250\236\207;\017M\336\2747\030\013=\207\255\344;\201\"\333\273\035\326\313\274\231\\\231\274\354}R=\305\206\022<%\336\215\273\364E\223\273\223*\324\273G\312\230<wP\225\274\354\243\202\274V\036\306<C;>\274s\327\253<\304\300\001=\307`?\273\357\364\234;S\207\311\273\377z\304;\261g8<\302\264N:\236\303\352\274\371v\311\274x\271\200<\254\303\003=n~5\275\213R\007\274\347\223u<\351\241(\273\255\241\201<F\350\346\274U\004D=\215#2;\343nc<\354\025e:P\302\332;\002A\216=!>\230\274\271\326\220<\362\016\320\274x8M\275YDj\274\n\205>\274\020#w=;\271\004\275\241\216c\273Y\016\001;M\034\221<\312\220\357\274\362B\036\275\216\016\276\274\234\n\024\274e4\205<\226D3=\365\336\t\275\3030\037\2758wa\274\346RR=\215W\324<\333\022=\275?\324\003\275[f\353\274j\244\027\273\205a\333\274\2130\027<\220@\001\274\323\036I\275\267!\254\274\210\'\233=+\001\t;T\334$=1\266\222\274(\022\265;\271\375\365\273\t\274\301\274\345v\333;u\220s=8L\005=\351\2567\275\362\334\322;=\350[=X\333\374<\257\305`\275\313\242e\274[U \274Q\236\301\274\031\364\246\275U\243A=~D\003\274\375\030\225\274\031q/;\017[W=\3341\227<\361s\234\274\263&:\274\324\364?=\267AX=\370Q\t=SKp<\270\216J9\2310\372<\'\312\251\275T\306\247\274}\013\025\275\244\237\277\274j\354v\275\345{\336;\267\356\003\275{:\035=E{\004\275EK\007\2734\367\213<y\333\342\274\361\352S\275\204\\\374\274<\200\215\273QI\026=\tC;=\350\035\'\275E\013u=\353\n\016\275t\222\201<\257!z\274\\\200\376\273:C\305\274\030:\302\274*\005\201<\224\030\214\275\223\001\270=\031\221\251\274D\306\005=\335t\024=\r\026\004\275\3454\326=\024\305\260;+\232\032\275\264\212\262\273\312\234\034<_)\031\274\n\241-\275#\001^\275u\010\325\275\237\220c=g\273\221\274H\253d\275l\351\010\275\276f\250\272r\265\364\273~\035\244<\326\310\244\274\377\326\201\274\236\354\265=\333\233]\2752\300\314=r\254\224=\007\010\317<!\251|\275F\215T;\020\302\313\274`\310\243<#\244\035\275\031\302\267\274\377h\021<\256\037\014<c\215\217\274\366)\255\274\272\334\354;\313\270\242<P\006\315\273\306\324\316\274.\210i<O3\030=\252\302\211:\031\230\325<\365\374g\274J\037\026\275<\363\352;!y\027\274eE\371<PR>\274xi\351<-\204\267<\360+L\274\223\256\264:Or\307<\177L\347\271A\0058\273\275\265\316<{=Z;\3578\260;,H(<\222\365r=T\357z\275\357\244:<\343\325b<9\310`\274\006;F\274\256\210\201\274C9\232<]\003\027=\310\036\204\2759\026\003\275\304@\372;\237\226\020\275\345\236<<\357[\277\2748\362;<tR\335<d\367\002=\272\266\232\275\343\216\243\273~\217\307\274\325\221\332:\230P\225<\005\001\312\274\312\262*\275h\242\250\274|\336\314=\362\222\307\274\244E\202=f\331\267\274\321Z\255\2747\250\003<\326kU\275\231O;=\260\026\214=\317\344\203\275\2537\'=\266\317\215<\220\3430=\305%\327\274\3772\341<)C\365\2746\310\211\274\336\325v\273~\323\220\274\330X\024\274b\351I\274\311\033\224=#\340\273\274x\254\330\274x,\016\274b\277\213<\3713\370\274|c\022\275\340\257\374\274\345\355\371\274\266\351\255;<\021\277<\235K\013<\343r\232;\374\010\304<\241\365=\274\221\3465\274\322A?\275\360+\344<\2371\025\275\232\005==^:\300\274\225\224m;\337\326J=x\337H\275\336\260b=\t,\355\273\375\246\024\271\250\030\277\274d\226L\274\376o\205<\303\303\006\274/\362\233<%\003j=\036d\242<\307\363\005<\216[`\275\237\2520\274*\315\016\275\220\006\254=?9\367\274\370O\257\275!\247v<\271j\006\274\323=4=\003\343\315<g\205s<\014\300\":\310gS\275\034\257 =\377?\276;&\325\035=#\270\246=}\n\272\274\364\361\026=\313\314\t=\267\364\253\274\003\326#<\245\377\354\272\236HC9\326u\234\273\264j\001\275\267\364\226:\020\256\330<M5\006=\335\"\336<U\003\347:\265\217\254<\367\030\375;\263\363\010\274\"e\025\274kF\n\274\360\"\t\275z\244\025\275\363\'\\\274\310s\007=\024\177o:\t\023\367;\331\275*=aD\316\273\021\350_<\265\374!\274+\351\347;\004\260F\274C\343\377<K\370\"=\212\241%=B\331\t=lr0\275\215\204\020\275V\331\023\275\347\031I\275\231G\317\274\260y#=\0211[\275\223a\256\275\307v\213\275\252b\000\274O\3458=\024\353Z\275\"\031\002\275\226+c\274\235?K:<\347\034\274\222W`=\244#\311\274E\250r=\366z\227\275\020+\037;\265|\367;\001+\347\273v\036O\275\t*\n\274\216`k\275\314\215\034\275\004\305\301=\001\312\256\274\360%5<~\033\274\274\251\277\331\274\025WY\274_M\220<\225%\223;\377\203\201\274\262a\000\275\323\272\356<\005?\347<)\275\315<|\017I<z\033!<tB\017\275$\360\377;M\333K\274\276\201\301\273\203w\237;\004*);\"%\t<\333\367\364\274\006\227\030\274~\3041\274I\243\013=\371&6<\\\361\031<\035\333F;\r\210\377<\323\272-<\357&e=\327R\014\275L\330\005\274\311%\376\272t\354\275=\333q\232<\334\233\273\274\270\212P=\323\356\367\274t1\021\274\226\270*\273\204\324Q9\272\027H\274\213*\313;0-\253<\261\0006=\345%\224\275\361\301<;\316\3274\275\337\244\021<\252jE;\370\023\374<\265\r!\275\255<\233\275\343\246\023=g\336\r=\035r\037\274\030\203/;\210\010\272<E\025#\270\224A+<w\230Z\274+\nc<0\265`\2759\006\312<1\235\303<\343J!\274\017\317\032\275\331\365\367\274\305\212\352\274}d\000=\256u\206\275\344\213v\274O\301\217\274\306\257S\273/\030\231;\323\016\356;\332\256\025=l\263Z\273C})\274\205\205\272\274\254\371C=\t\356\212\274/\254\336\273N\217S<\005*\245\274\227a\270\2745\217\316<]G\226<\304\351\301\273N\'\310<\311\334\357\273\236\222\255<\320\3017:\357\325\341\2716\343\266\274dVK\275oPX:\251S.\274Qv\324<\320\247\371;5(:\275\225\004\230\273R\344\370:6\325\236<9\342\210<\030\234\245\274\326\037\334\274\t\202?\275\341\311\343<\346%\000\274\357\353\014\274\340\034G;\255\335\034<#B+\275\021\005\003\275v\017%\273\364J\317\274\305\232$;\206C\262<\325\032\324<h\2215=\231\321\266\273\247\357\204\274\306\023\004=_r\203\2746\350\357;N\315\203\273\037\231E\274\262\233\002\275\227\035\272=\317t\336<\206\213H=^\330\330\274\254fH\274\232\034\311<\363\221I;\267\006%\274\024\211\217\274\2332E=\270\244\216<[e\005=H\312\242\274\026\306(=\000tF\275\244\0345\275\023\037\243\273Z\232\250<CVZ\275\310\003\221\275K\311\311<\234\n<=8A\007\275VN\353\274\001Z\375=\0325\325\274\340\324m\274}(\021\2757\376Z=Xt\010\274F;(=\026\320$\274H\314\210=\315\264\"\274|\003\227=\372T\252<\013\327\265\275\027\203\315<\177\264\007=\'\274k;\030\206\235\274\226\036\257\275\032M\342\275\220\241\000=N\036 \275\326\223\241\275\274\265\367;g\0320\272\003\203.=g\221\277\274\207]\r\2752%d\275\020r\306=\353\313\r\274\300\006?=\333\355\221=4\"P=\213\211s\275\210u\351<\'\217\010\276\010\177&=R\366o;\347s\314;\354;\246<\013\216\355\2733\226\334=S\026r\274pN\262\275\237$\241\273\301\357t\273Rr\350=\241\016J=9y\264<~+\276\274\315\333\243<\342\263R\275?~m<\213\202\274\273\205`\204\274o\241\030<\371+\252\275\314p\226\275\224\252\320\274\273?U\275n\316S\274\367Xc=R\331\017\274v\005\300\274\241\225Z<\203\274]<t\360\'=\2651\251\274\375\026Q=\271\036\200\274\372\211\224<N\3425=\360\230\035<@D<=\316\254_\274\275b\235=\243\361y\275\241L\235\275\237\232\n=\371\021\254\275\300\261\005=\242\365\323<Y@\024\275OnW<\305\244g\2740\255\373<\004U\205\275\024\371\026=\2653[\274eE\202=\350\202\\9}\360\300\274E\277\266\272\204\307\355\274\317\245\211\274w\203\253<\213\356\336:k\026\302\274D\006c\274\200x\210<T\3636\275\327\002\241=o\235\334\274xB\326\2734q\007<\365e\025\274\266,\326<p\371\002\275\313Vc<\361\\\271;\025\233\377\274\265p5<\353\353\314<\244\316\246<\005\017\036=\004r\231=\342\302\210\275\360\304/\275\234n\316\274\351\252.\273\313\n\276<\314\233\002\275\000)\004=iS\034\275\tA\355\273=\372=\274BZ\360\271\201\315u<+\374\264;\016\221B\275C\226\031=\263Q;\275Os\350:\014\344\204=\372\024\037<s\005\010=\220\037\202\273\343\272J<\306\232\253;^8\221=\365E\331;\204#\032\275O\366\037\273\255\037\361<>\022\234\275$r\r<\020f,\274\321\260\346\273\266\302)\274\327\351\236;)sx\274\022\0317\274\177\361\'\274\356\250_=\240\002A<\276\036\005\273\230 \327\2742\033Q\274\305\0312=I\262\031\274n\233-=} &<\351U\324\274\207\251r<\254m\021\275\202\235\237;z\276\216:\371\323\371\274\3027\240;!\316c\273\213\271\204<\247|/=\340\217\340\272\232\233\n\274\236\322 \275\221c}\273\235\020\300<\260^\262\2746\312\010=_\355\201<\254\257\031\275\300\237\315;\203S\242\274g\304=<\352\352\022=\304&\010\275\323\020:=\211<\340\274\223\004K\275J\006\334<&?9;\345\203\311<\333\227\004\275\037b\235\273\263\237\340<\244\331\323<2\327y<\032R\006=W\236\357:\357\277\252=\356l\020\275\273\247\300\274\030z\032\275\006\024#\274s\377j\2745\313\024<\374\247\263<94\322\273\222\261\312\273\211\207\313<\201=!\275J\"\276<|\014\023<\0206\013:\241\233n:+;z\274\355\330g<x\212\243\274d\315/=\377z\306\2743~p\273\016b\306\274\200\304X\272\366\244\233<E#\213<\224\316r=5\364.=\324i:=\001\267O\275A\033/=\227\226`\275\2536\255<\0314\215\274\211\311\021=\243<\035<.\342\214<\344\333\331\274%_\220\273?\305\221\274\2133z=\241Y\004=\232R\266<\370 \207\273\200\262{\275\336\350N<*\022G<\241E<\274\005\247&\274\337\337\024<\314\3554\275Y{:=n\036\000\275\233\341\331\274\231I.\274t\257\214<\3102\255;\301!A\274]\223q<\235\243\210<\202`\364\273\355\355\350<\231\025\260<\246\341\023\274\035!\233\273\243[&\275\302(\244<\217\373\357\275-\321G\273\177E\377<t{\203\274\327\014\001\275x+h\273\352\205\001=\242\327\200;\324>\307\274W\200\217=<Y\331<\265\263\234<2\367!=T\353\266;H\211\004\275\334Ph=\005\027A\274\203\364!\275\262\341\026\273\030\\\023\274\377u\277<\363\242p=\305\367i<_K\000<\331\216\033\275Bs\210=\252\343@\275\331\002\263\274r\016C=\333\2628<\267g\r\274R\007,\274\246,\036\275u\361J\274\351\315\343<7\352\234\274\260a\235\274\257\022\275\274\024\307V<\260\253\367<f\006\231<\375\361\010=\005\345\325\274{\350\237<\246\206\231\274\237\266j=\244VP\2746\275\t<\257\204\002\274\215\026r\274\364\310\266\274\365BL=#\215I\275?\244\357\273H\327v=\377l\177\274_K\230<jm\264<\303\376[\273m\214\224\2747\024\004\275\315\244u=\217\264U=\301\030\221<\327\245\222\275=\253}=\355\227\320<\034\347,\275\277\241\245<\273\205\027\274\362\371\254<\207J0;\200\020V<\260\245\341</(\032\274\312\207\254<\305\302:\275\352\236\'=[\214\347\274\177[B=?\227\363<\227\350\020\275\315\341 \2750\340\323<\313\202\241=\346\203U\275\200\220\360\273\023\367\000\275\230c\022\2756[\227<\032 E\273V\221-\274\356\217\325;/\037\246\273\341\243x=d\\\221\274\007\354\357=\032\346\177\273\256mU\275\rr\222\274\267\\\257;\252\301\261<\230yG\275\253\365\264\275\036+\261\27535\017\274\370;K\275\327\036\366\275\231V\353<\365Y\204<}e*\274\t\017$\275\026\200\200\275\324~9\275\221\341f<\303\264\326<\271k\013<\341\313\030=\3141\256<z{\242\274\027\235Q=)\217\036\276\262\350\003=\250\"t=\363\334j\275\3526/<;\332X\275-j\213<\343g\264\275k\304\325<M\341\265;\306d@\275\356\342\005\275n\033\022=\256\000\000\275X\330\232<F\361\360<\264\305T\273\312\350\247<\321\223\236=R\337j<\357\235\254\273$\256\223\274l\254\221<o\235=\274(\377.\275\236\274z\274\311\242\212<\376\205\362\273\320\344\351\273\037W\213=\242\337\225;\3210C\274\225\301\346<q@::\021\217E\275\243\243=<+\020\006=R\363\231<\367\032V\275fRV\274\212\025#\274l\3373=@+\032\275\3742\025;V\323\207\273\301d\r\275\n\243\344<\305t\266:\2705\277<\014\356{<\333\254\213<_y\033\275^\356W;m\3018\275\014\315\270<\264\246E\273\356\276\331\274\257|\"\273\333\246\327<\023\010&=\244\"t\273\032\230\035=\244-\312=\177\365\254:\375-\2239\247\350\n\273\246\275\201\275\214s\336\274\247\345f\275\230\030\304=\233\323\013;\301\252J\2756\016\216\274]\r\351\274\362\360\255=\030\017\364<\313\370&\2750\026J\275\317s\310=\234\260D;\224\330J\274\215vN\274\323\235\027\275\256\217e\275\257\343\004\275\324\322\\\274\221\232\001=\025R&\275~\376\025\275;Z\324=K\320];\355\332.\275c\236F\2750*\261=<\237\312\274\357\004\25297\363#\275o\202\225=\025\373\"\275\2328\212=\347\236\030=J\245\275<Y\355P=\256_\022\276\003\030\266<\225G\232\274\337\267\204\273E7!;\252\024\201=\267\010_\275\241\331\256<:\331\266;\307\355\373\274x\016?\272\006?~=B\347\223<%\201\211\27485*\275\257\356\207\274\324\360\375:J\233t\275\374G\034<\346\203\343\274\366\033\301;\363D!=\266:\010=[\323><\376<\032\275\227\227<=\361\342.\274\256e\247\274\004\216\276\274\010\236\276\274\360\341\201=\255\361l\274\273\026)\275\333\371\253\274\270\366\251<ux\036=P_|;\030\003\241\274\243:\350;\033O+=\333\341\014<%V\364<r9\2729Z9\274\274\222\224\003\275\370\177\340;\006\020{\274L!^<\0270&\275 W`\274\215\357\274<\372US=\345\211\001\275\220U\367\273\233#=<\255\037\010<U?\233<\244\003W\275E\370\021=\332V\325;\334`@\2738\306\346<\241\266\202<>\351!=\230\225$\274\335\267\335<\337\215\245\274\200\3237\275\005k]\274\227\326\n\2749\263\252=\"\253\257\275\303\372\267;\355R\364;3\206\037=\210\007\265\274\234~\273\274\263\324\244\2740+*=s&\233;\034\341i=\315\223(\274i\332\033\275\244\005\252\2747\206\255<:\2371\274t\360\360\274f)\343\274\026q8\275\377\314\032<\326Af\275v\377\234;\247\277\275<\354C?\275\037\335\317\273(oX=\223\362\352\274\244T==K\345\034\274\0256\007=\256\207\0049\300B\005<\031\016\227<2\305\261=:\213\005\272\255F}\275\3613.\274\251\347\032=\033\022\237<\340Nl\275\260\255\373;#\272\001=F\326\253\274\212\235\235\275]\373\t=\2007\025\274\254\232\322\274#\361\351;\241\354F=\216/G=\3024\357\274\353S\274<p\250X=\353\351\213=\364\372\025=\\\370`=^\271\212<-\361<<\242\014\240\275\325\227\310\273\037K\306\274,I\036\275\302\320y\275\372\221\234;\255s\r\275=\301%\275\327\312\t\2750\"\305<3\375\332<j\024\202\274\236\355s\275\330\345\324\274\007\246`=\231\311I=\327\204B=d~\026\275\227\367t=1z\237\274\241\007\237;\032\203!\275\375\332\014<\310\267\342\274\313Dn\275\367\374@\274V\300\241\275\256C\263=\366\257\007=Hc\030;}UQ=\2276\247\274\253\021\343=\326v\006=\233\262\230\275\343\003\030\273G\366\351<\030\252\232\274J;\t\275#\332x\275D\214\032\276-|\t;\305\241<\274\362.\000\276\216N\355\274\316Y{\273\364F\273;$\311\276<\2334\222\275\275\\\030\275\247\305\023>\360q\031\275\353{\206=%^\201=\223y\216=\226<\251\274{!\325\272O\257.\276\370P\303<\204{q\273\014f\200\274\326\354\337;\373\017\272<\255\247\231\274t\373\177\274\217\223\373<b\363\014=P\374M\273\272\031C\274\206\232\222<\255S\213<\334\245\311<\016\226U<\343\320\371;\r\227\026\275J\214\253;\206P\003\275\202\255l<\257\367D\272\204\361\010=\017P#<\342\'\237\273`z\003\274K^\210<\355\335\\\2739o\333;l\306\204<\363mt<\232\021\037;\325\226m\2733\210&=\232\357\253\275i\365H\274\025\351\344<F\300\356;\305=\326\274\267\021\337:\370IQ\273\341\256@=S\270s\275\260^)\275\314\211P<\224\206\313\274\003t\003\274,@\005\275\213]\217;!\242#=1\211\004=@ \204\275Iq\213\273V\224\013\274l\306b<\310\013\003=;[\037\275%\350\001\275\274\221\265\273\356F\021>\325\305\337\274\273:\214=-\017Q\272\370a2\275\010\313\26699o\026\275\177H\224;h\\\212=+\326\033\275\220f[=hQ\331;\221\232U=\206\231.\2751\324\334<\177\346\265\274H\340\204\274\020Fa\274\331az:\213\\\"=\350\3426\275\314\217n=\356\364\000\274K:\035\275l\231\373\273\234)\320<\2458B\275\331\255\270\274\206\310=\275\242\377\233\274\310\007\236\274\003\357\203=\341\270\200={=1\274\230]g<F\277\220<\213\362r\274\010\3265\275C\376\340<\274A2\275r\034E<,UE\275\334p\013\274\272\321\311;\350\276\373\275\257\310.=&\303\221:\202\"G<\037/!\275\356=]\273\177^\'=\224\272\265\272\025\272\202<\300\250P=1\365\210<\363\275\317;\346?F\275s3\337\273t\307>\275\257\014\014>Y\211\014\275F\004k\275\0231\016=\232\373\025\274Wj\254<}\273;=\322\340\"\274`\211\276\273\320\327\212\275\005\210\210=\033m~\274\372k\016=`g\346=\215\n.\275\260\0216=\277\247\355<oX\301\274\213\257\202<\264\356\301\273E\224\000=\"\306\266<\3001\264\274f\331b\271UuA=I<\353<\250\360\265\273\0254c<\373\226\230\273\375M\211;7\2564\274n\217\270\274\023\345\271\271}vN\275~\203\020\275Ao\346\274\377P\303<\361\276\306\273\336T\211\272\324\3073=>\036\023<m\277`<\3453,\274\312+\030<\2304\220\274\251P\301<jA\364=b\336\262=\211I\203;$\3606\274=\205!\275b\277\350\274\373\205\235\275\256`6\274\276\223\000=\231y\272\273s[\033\276\"\265M\275,\337\323\273\017\272\207=|\340\222\275t\252+\275k\006\013;\365\"\301=cdp\274b\330i<\001\273\260\274\2737\241=m\303\262\275 \330\3539\350D\214\275\366tn\273\221\'\032\275\342\201R\275\t\325\363\275\306\230\\\275\337\256\271=\251tr=f]\376< \301\300\274C9\026\275\337xb\273\372[!=A\307\335:,&\272\274S-\332\274\005\316\313<a\221\215:]W\"\274\372\221\317\273\034\346\346<\275\257X\275\r\2077<\212\251\222\274\003\342\'\274\321\245\215<]\311\357;\250\2543<\312\351W\274\261\344\213;\377\367><\177\006\027=n1\317\2734\243\036<\037\366W;$\275\357<\022\317\236;A\300\302<\233\251\022\275\3414P\274\364\346\367\274\327\225\017>o\022\r<\224\\*;\022\313\"=\344l\206\27468\277<\250\213\220<\251\351\225\274=\376\001\275m`\024=\227n\231;3hg=\363\242{\2756\271\357\274aN\220\275\000\2363;\321^R\274 \004G<\263\024\200\275\300\366w\275\310\316\t\273vf\n=)\357\207\274\362c\312\273\034\244%=\302\333y\273\260S\361<l \033\275\030!\027<MU\201\275\372\206\244<\'x-=i\345w;\317\334\010\275\315\302\014\275T\230S\272V6\265<\372\317\227\275\303f\004<a\276\200\274\357\372\313;fKS;?A\274;\332\266\322;\2641\247;\336\027\325\273\\\221X\274\310W\375<\225\207\347\274W\rg\273](\027<\254\237\003\275+\341\277\274C`!=]>\031=\373\357\244\273\250\000\335<\353\002\251<\027uE<\300)\233\274\022\356\277\274\276q0\275\0130\304\274B@\316\2734.\310\273}r\346<\306&\217<\310\252\251\274,\320E\274\311\031\271\274I_\337<\034j\2779\345t\202\274\241>4\275\036\230!\275!Ag\2747^t\274\364\264\257\274\327\033p<\2427*<\'[%\275(\221+\274\314\2319\275 \3762\275\346\257}\274\211\r\013=\234G\"=\277-\200<$\245\255\273\022R\026<w\203\014=\245\347\032\275\231\001\251\273@\254a\274/\222.\274\213\323p\275\230\263\026>u\247?<`\274U<?\303\037\275A\241\312<\021\343\206=\022^\261<\004,\345;\371\022\356\274v\250\336=\r\200\335<\rK\014=\367 \\<.\246\322\274\222\027\262\2752\264A\275&\267\000\274\357\376W=\335<\212\275\t\264\226\2757Fw=\272\352.=i\302\026\275O,\225\275\3474\006>;\301\225\274\004W)\275\204]\244\275\035\262\237=\3124\211<\215\036\370<\211\211\205\274LS5=y\031\021\275d7\323=\30401=\217\251\265\275\270\256H==\312\006=\2451\035<d\301\034\274\036@\217\275^\200\005\276\336\320\235;\336\256\345\274\020e\316\275\210K\276;\247\016E<\010{T=\202/\254\274\204g\235\275\346!\336\274\314\355\341=\343\373\304:B\022\376<\212-\220<\004\013k=\276Q\274\273\034\224\247<\3774\n\276\241\203\341<\013\345\007<\253\276\352\274x\243\024=he\272<:\014\r>:2\266=T\352\034\275\2237x\275\335>1\274X\213\000>;\206q=\353\251\215=M!~\274<\2104=#$k\275K!\326<m\375z\275\001\265\340\273|\000\023\274d\240\241\275\211\2040\275\026\352\215\275\265\215\204\275\213<n\274\rc\013=\246\266\300<u\251\223\275\2556\224<\267\205\217=B\343\325<w2\243\275xi\226=\356\"k\274\035 #=@\366R=\0073\217\272B\'\003=\356\004T\275\242\177X=\221\001&\275\016\344N\275\341\020\026=\356\337\246\275D6\247=\314\206P<\224\032\355\272\363\246k\274\232\325m</.\317<\233\237+\275k\352P=\001\341\235\274\005\223\251=F\360\320<\375\311\257\274\246D%<\027\366\214:v\311t\274\016?}<\251\237S<\273\344=\274\3040L\275\303,X=\307\267>\2756\231\221=\264\313\337\272d\270\'=\272\3674=\355+w\274\220\222\222=}\215e<=Q\261:a]\341\27363\016\2759q\267:\222\253\335\274C.\203\273\270\214\203<\322$\004>\330\223\226\275\307E\211\275]\255J\274K\207*\274\202|\220=\370$\346\274\373\244\350\273!n\007\2753\033_\274%\366\252\273\277\371\361<\205\004K\275\335T\245<2\270\257\275m=\357<\207\233\312\275\244<\'=2\335B=]\243\206;m\\\271<\034\360+=\350\341\200<\262?_=\221+\204=\371\351\234;\010g\345\274\326i\013\272\031H5<)\033U\275_\376\234<\373Q\226\274s\353\271\272\343\201\274\272\206\320\036<\363\346\005\274\3117\'\273%~\223\274\027/\233=[Tp<\374\346\261\274d\200\001\275BN\236\274C\031\214=\357^\333\274{\231j=\354\322\032<\326\214\033\275\241\000\316<>~\327\274\272;\303\274\357\371\026<\261\024\223\274\247\035\203<?\272\320\274\302\337\213<\177\301\373<4H\025;\314[\010;\367\214\355\274Y\331\360\274\354xI9\347\242\346\274Y3C<\310\230\013=2F\303\274\257\240\210<\273\001\007;\270:\225<\007\213m=T\021\314\274L\321\333<\256\202\270\273\001\254a\275:\316\r=Vh\214\273\342\r+<\325\014\251\274\202\233\275\274}}\307<.\220\023<\023/\242<\226O2=\"\304\020;\276K\264=\213\345T\275\321n\322\274\'\3107\275\014\277E\274\202\326\271\274\377=\035=\343\215y<\362\320]<\205\2315<<\006-=\004n\221\275-C?=\324\346\322\273\330\033S\267ih9;\365\366(\274\342+?\275Q\271\317\273u\245;=\367\263\226\274\367\345`\273\004\322}\275\257)\010;,\211]=\331\"\214;\220\200C=*\342L=\211ur=\304j\243\275\324\345r=\354\323k\275\334\242\031=O\361\317;[(J<\324:\201\274K\242\021=\177\324>\272s\353\t<\300H\273\274\340\330Y=P\357\254<\362 \370:8\2015\274\223\335)\275d\232;\274M;\252<\324\360\016\274\017mA\274e6\022=8\2030\275\315\355e=\275\313u\274\340|\017\275\031\271\370\272\007\220a<\236\325\374\273\224\023\3679\035\276A<`\266\225;\314[\244\273\352\367\3248\246]\275<C\347^\272\360?\010\274\316\371I\275\327\360\027<\360\331\262\275\257p\270\274\313@\333<\241\033\000\274\255\177\316\274\306\345\341<\re\036=\373C\370\273c\301\017\275\"(\354<\344\303&=\325|\213<\362vL=\320\325S<\032%3\275\303\r_=\220\276C;N\316\"\273\021\246Q\274\001\266\222\274\253\230N<\352]\215=\216F\334<N\001\017<h\214f\275\226\331\032=\020\313\204\275\217\223\222\274\242qO=\255\021\233:\2503\203\273\021\365\204\274\342\343\025\275D\204\270\2747\374m;\362\001\225\274;\"x;\230K\223\274,\312\227<\'\215\031=\235\330$=\232\0025\2733\207\005;\274\361\n=\3128Q\274\334\212f=4W\240<\365\202\253:=\3140\274\n_\200;@N\234\274\344mh=\332Ng\275\265\010y\274\373)o=\240\356><\217.\024;\0224&:\255\366\022;z6\017\275\243S\021\275\270\353@=\322\305<=\202c\305\273\367ZC\275\215\2140=\016\370\007=\251\365\004\275J|\327<\3564.\274\342W\003=\"\375\311\273\226H\252<\334{\342<k\274\224\274&\305\030=cNE\275v\003:=\201\022\202\274-\306\332<\002!\000=\326[\253\274\247\2256\275\210\003\367<\262\342\233=\335\031\022\275\320VT9\014\254\317\274!\006\013\275\333X\254\274%~!\274J\010\363\274\330\3510\273\365\026\322\273MX\221=_\372%\275\022W\224=\253\205a=\026\0017\275<\'\000<\202\2633\273\317\260%\274\2511\226\275\237\213}\275\222\333I\275\347_\216<\355\t\013\275\272\214\334\275\332\336\332<\031\010\314;yS\374<s\305\200\274\257\221\237\275r\354\331\274\263\311z<\367=\'<\033\324\350<\376Eb\275\334\202\274<\323\010\030\274\036\274H=\310\000/\275H\0235;\335\204\033=\226\022\005\2750\301F;\321\017E\275\243\005\274;+wk\275\3250\215<Fg\301\273&\033\274\274\231\373\003\275\254\370`=#%\243\274\336\t\211;\360\321\250<\t\313\306\274X:\304<=e\243=\227\261\203<1L\273\273\236\264W\273\027\\\306<\010X\316\271\233u\236\274\332\333\346\274\240\257\231<[!\266\274Q\245\245<\362_\236=\310k#<\035t\205\2749\372u<\r*W\274+\005,\275\356\276o<\025\036\026=\277}\246<\275\332\252\274\232\207e<\000\350\003=O\266\025=\363\234\225\274\025\006\326<\263\212\234\274\031\311(\275a\027\r=\350R\223\274\351\364\233;\330\275\315:\311<\"=A:g\275\207[\026\274\027q\210\275udJ<\217EB\273Z!\025<\001\355\032\274\326\315\342:9\244\351<\031\247Y;\240\242-=\353<\370=\001M\006:\246\260\355\273\207\250\332\274\242\370\243\275\240\037L\274\223\361\245\275\270d\200=\337M_\274\310.\202\275<b\323\274\026B\312\273vl\324=^r\377<8t\246\274\222\202\352\274J\022\020>\364\315]\273\341\234\240\274O\300|<\237i\033\275\241op\275\225*\353\2746\217\210\274757=5\340)\275\361\261\027\275\323\335\003>\316\322Z\275V\204J\2758\337t\275\274\360c=\343\024\032\274\322\026\177;@o=<P\266\370<\\4\376:\340\024\316<\240\020\023=\315\360q<\031\031@<S\274\347\275g\310a<a\017\345\274\257M\320;t\027,<5G\242=\"\346\302\274\t\235\316<`w\2569\013.\252\274\233\257\302\274\030\325\205=\311H)<s\263;\274\310OZ\273]\312\234\274\203f\313\273\330iD9#\213\255;\356/z\274}\340\266;K\225@=\325/\"=\225\364\211<\305\207\014\275\242\246\261<\277\207\372\274~\037\300\274+\230\241\274\256\225X\275\347~\n=\245M\020\275\000:J\275\302\367\243\274\256\207?=w\n+=5\274\261<&\234\311;fl\014\274\373\362\220=\300\256\200\274\376\327\301<\2107\232\272\213\312\247\274v\211\003\275\211\311h<\306\336\352\274\313B\254<\274\362S\275\371\304\006:\n\231h<\363M\350<\237#\344\274NE\305\274\341\307G;\336\010\263<\323\326c<S\353\360\274{\275\270;2x\200<\346\355\211\274\341gB=2\021\304;\247\375\034<\314i\020<\360\371\025=S\315X\274c\227\037\274\000\273\351\274Z\200\023\274R\225\254=;6\377\275w&\354<\302\376\266\273\205\204\'=\330>\262\274\237\255\357\274W\262\271\274\227\331\253=\240\006\314\273\335\253\206=\224\327\204<\235\027\343\2744\213\004\275{\273\n\274\323\371\022\275\020\020\363:\345\376\376\274\036w`\275\364\220F<\2211\216\275u\260\240\273\3131\364<\225\211^\275\236o\367\273|\343:<\370\307\202\2758[!=\264\264w\274\232\t!=\301\014Y\274\250A\013=\'M\204:\212\"\330=u\213\303\274I\0175\275\245\216\250\274\230\016\016=\363\252v<\376\343(\275\'\221u;V\334\316<\355Wp\274\220^\010\275\316\321g<\241/\237\274\272\341+\274,\234\211;N\314\036=\260\304^=\322\003\327\274\357K\000\2737\266!=\266\207\251=\220\000\'<\375\"\274=J\033\220;\226\031-\274\325)\276\274\265\276\204:\352\206\241\274\240\003\031\275\243\003^\275\363\241\274;; \361\2740^\317\275\307\347\213<\332M\356;\257\336S<\206\330<\272\234{z\275\002\272\310\274\263\236\306=\376\310\214=\213\203R=\251 <\371Ou=\330\276\032\273\004\235w\274\327\002\237\275Eq*=z\2103\274|\316\216\275\337\370\313\274\341\037\263\275$\270r=Cw\213=5\1778<uYL=\236\030\200\275\347\023O=;\373\314=\0209\233\275\375\025\267\273)\305\2269$\027\033\275\217\022\036\275YQ\366\274\010\377!\276\362\3410\275\252\362\033\272\312\037\232\275C\252\033\275\177\321*:\356\235\361<\203e%<\351\372\230\275R\0260\275\257e\000>k\002\220\275\204\013M=\026+\253\274\035\361W=s\264\211<\034#\031<\3610\230\275\310\217\306\274A\337\277;f\222\005\275\216\323\205\274kq\204;\316\372Y\274|\323 \274fW]=8t\000=\002\261\205\274\010f\266<\235m2<\375,\t\274x\346\265<\321\355F<,E\n<\341\304a\275Ss\213<\240\2361\275\"\222\026<\001\320\224;_34<\322\336\205;\323\034\327<\327n\345\274\000\255:<\312\244\357\273\210+\200<\373\235\221<\377\202\305<\373\t/\273q\221\264:\0108\324<6\200\233\275\035\334;\274\225)\363<\223\220\256<O\016\014\275\332\240#=\360cC\274\363\2471=\267\252\\\275\342J$\275\352]\355<\030~\217\274\205\n\334\274\255\234\334\274%\212\260\271D\214\364<;\nD=\311\377`\275\312\r\234\273\362\022\232;S.\n<\006\202\270<\350/\254\274\276\353\212\274\036ZP9|\325\022>\255/\003\275\231\320\224=\317\376\204<]\024;\275\227t\254\274\227+\025\275(\333\002\2752\255V=\333`\006\275\330\333\341<_p\237\274\02290=\300\272\013\274P_~<\014\274\224\274\355\330\255;\354S\246\273MV\230\272\030\217|=\372[\202\275\261z&=\232\217\230\274xy\252\274\224\026=\273\027Y\314<V\026l\275\305\237\247\274\373\023\222\275\361N\311<\013\032\266\274\315\371W=\000\261\\=l\204\272\274\310\337\000=\372\010X=\"\325\346;R\256\244\273\271\350\370\273\357\220P\275\007\333T\274\001\375\212\275\003K\031\275\220\336\r\275\275\273\341\275\333\312\177;\302\372\230\273\201\251\022<\203\244\016\275\r\210\302<C\217m=\211H\013\274\304\333>\274\265u\344<\310\242\252\273\201\306_<\212\263\000\275\324.W;\004k.\275}\364\021>5\'\337\274\345x\310\274\007\305+=\223\224\033\2751\211\365;JJ\235=\323mK\274\\x\371\273\276a\222\275\250g\327<1\t1\275hy<=\276Z\312=qY@\275\232\001\201=\334\"\206<\032\200\334\274!\253C<M>\371;\324E\343<\351k\276<\353\302\223<\3408\2679\037o\201=.\2146<\013\335\374\2744\253\274<\221N,\273\223\2058\274\233\016\002\274\3533\260\274\376=%<\t\315b\275\345\361\357\274\221\035 \275\257\364\007<\251\001k\273\243\303w\274\3463(=\253\r\257<\235\375\341;\324lr\274\315\304\366;\324\300\005;\303\341m:A\324(>\367\342\236=\255\211\361\274\275\230~<\t\261\273\274\264V\311\274*\037\237\275\r\247\372\274\3767\262<\340_\233;U\2232\276(\215T;\275\353v\2744~\245=\256p\177\275\r\373d\275\321\311\306;Z\321\">})V\274Y\032^\274\322\215\030=\360\230\265=it\234\2758\373%\274>\365\022\276-@.=jD\364\274\025So\275\275R\231\275\310\276\202\275\317@\'= h\022>$Y\r=<[\001\275L\351=\275\034&\003\272I\026]=\237\"D\273\372\341\271\274\360&K\274\324\330\314<DM\271\273s\251\033\275\223\"#\274\204+\310<\177\214\177\275=\265\342<\346\030p\2741`I\274]9\t=E\036\220\272\277tw<p\310\n<\203Z[<\035qM<\003\352\356<\013\034\246\274\215\225\235;\351\261\240;\224U\001<\346\335\270\273T\226\222\274\002\372\315\274\020F\032\273\2253t\275\000q\330=\356or\274o\027#\274\037\223\024=,\000e;\262\213.=oM\360<\232\204\260\274n\245\n\275R\305\203=\345W\310\274\205G\013=M\352m\275\234\216\264\274\353\365{\2755\210\211<\356\271\360\274[x\014=\"A\250\275\372\265\356\274\317bB\275x\326j<\352G\352\274\2761\374\274\350#\035<&q ;g$#=\020O\213\273\255\t\266\273iAJ\275({K\270\332\210\'=\3151\344<\014\033\300\274\210\2357<\275\300\330<i\302=<I\346\222\275\177\272\010=\035\010+\274\2405\332<8\324\000\275i\312/<l\206\004\275\373^x<mW\223<\226=\262\274\"\277\316<\333N\036\275\363\\\251\274\222c\342;v\364.\274z\246`\274\366\203\342;&M\032=\235\034\242\274\"\344\030=\374g =\330\tL\274\342\216\027\275t\302*\275\016\337j\275\345\277%<\242u\014:\\\020\213<L\371\351<\252\244\321<\234?\304<\323\300[\274\2359X\275^\r\217<\036\034\343<\003\204p<(\035\351\274\230\234\001\275\232\3031\275\350M\016\274\217\344\220\273\276 *;i.L<\374\224\r\275\260\2649\274\351KL\275|\033\351\274V9\335\274\2319#<\224z\030=^B\024\271\210N\023\273\210g-=\230\023\264<r8~\274&\340\231\273\311S7\273<#\002\274\267(\254\275vT\025>\2523\263\274a\205\201\274\256\362:\275\261\234\374<\2140\273=\346\035\230<`A\345<\r\234_\274\370b\025>\334\007\223<\0204\344<\227\217\036=S\3617\275\311?\325\275\002\275!\275\362\016\246\274\013X\243=\177\264\257\275\341\361\t\275\321\027\273=P\222\010:C\265\007\275\021e\365\275\035\033\335=4\342\327\272\226\262\304\274\177\373\004\275{\367\006=>\257v=\374\337-\275D\213r\274sT\370<\0317\304\274\364\324I=5?\244=T\271\204\275m\035\342<\231\220\n<&j\243\274lbZ\273\017\'b\274\304\372\n\276\007\267\316\274\233\275\022\273\007\206\035\275\342\233W\274i\t\241<i\347X=\265\242\242\274D\370\220\275\313i\216\273\t\244\242=a\350\266\274\321\347\331<\350\313\234\274ad\204=\\\245\310<\262X\r=\254\000H\274p\267\225\274\005\010\263;k\203\210\274\337\341-=M\207\020=P\361\021>Z]\257=\032[\002\274\016\024~\275\316\222\216\274\243\221\217=\021[\001=\210iJ=`\t\005=&\360\201=\000T\335\275b\273\023=K\335\215\275\206\376\343\274\347\354e\2744:K\275\207\262\036\275\264W\202\275\014\214\321\274i\237\304\272%(f<\206\355\024=\334a\003\276E\307\325<\340K\311=E\257\210<@\340U\275ZtX=\272\345\004=\364\202/=\216\210\367<\240V\260\274iPW\272\311\3336\275\202\270\315:\211\336\002\275\311ZL\274\001;&=\252\372\222\275\227\000\332=\257\326\314:\315\341\022;\213\016\344\274\365I\263<-~\326<L\343\304\274\246\333\217==\336\201;Y\305\240=\210\207Y=\2129\202\274H\177\215<xh!=\354gx\274\224\367\265<d\010\"=_w\350\273\030\354Z\275\352\321\025:\235e9\275\014\034\237=t\252\220\274>\332\030=(;\371\273\320\246$\275T\361e=\271\201\204=\216f\014<\347\274\313\274\320\365\205\274\225ei\274\205\214\017\275\241IV\271\017&\336<\333G\266=\211[}\275a\347H\275\244!\254<\252\342;;\305n<=\002\007\225\274\262\302\346\274\232y\241<\226\201\001\275\215\303\3319\226\"\360;\235\\\003\276\342\017\374<\242\377f\275i\270<\272\275\210\231\275\272~f\274\351\356J=\224\306\354;\206\200O;\313\364\034=\264w\333;:N\242=l\221n=\233\315\031\273*.\003\274\217\330\010<\\v\3629\"\351\313\274P+\263<pV\031\274\312\026\332\273\243\026\364\272#\303\376<\010/\233;\271\301\005<E\006}\274!X\256=\003\360\220<\226\360#\275\346\316\364\274\354T\255\274\202\201\211=\312\245\247\274\212{\202=)\343\277<)x\r\275\312\347\226<\371\037\215\273ib\367\274j;=\274\351n*\274dn\374\274\211\237,\275\361\353$:R\227\355<\351\333\257;\342\355F\273\222\336\222\274\313|\355\274\264c\264\273>\021\262\274\377\005\271<\022%\t<\014W\320\274O~@=_1\257<\034F\014=*\363\335;H\224\207\274\370?\022<\002W\302<5sV\275\334\373@=\021\203\251\274`J.\274\'\340P\274\212\313\302\274]\340,<\034\034\343\272\n\356\225;t\220X=\306H\236<\027\023\273=8\361H\275\335\244\365\274\020\364\273\274\t\233\216\274|\376\021\274\000\375\037=\232\324v<\212x\364<\212~6<\205\342\021=\022/\264\275A\036Y=fN\352\273K\001\325;\330\353\274:A+\226\273\002\312\314\275[x]<\354\334 =\363V\r\274\352\231\266;\360$\251\275hP\244<\343@\234=#\010\221:\370L\016=\212\027\031=R\360\232\273_\356z\275\245\205T=\302}\264\274T\341\001=\rH\032<6\310\224\274\326\366\317\274\263t8=8j\216<1|\027:c\361\313\274e\215~=O\270\250;F\235d\274\362\340\320\274M\037\313\274\002;\357\274=A\356<\2326\231\274O~\234\274\'hw=p\341\004\275\313r\203=:r<;\356\367\264\274\257\0174<\014p\226;8E\200\274&I\311;\242(\241;a\361|\274I\301*\274 $\236\274B\214\302<\035O\'<\360y\242\272\336\325\013\275\271\237\222\273\177\372\021<ZP\016\275\375#\302<\204\336\245;\262\346\241\274\3320\010=\313[\007=}6\000\274e\322\003\275a\324\001\274\357\2237=\240/\013\272\0232\372<rH&<L\240\363\2744{\007=\360)\227<M\r\023=s\230\255\274,O\014\275!\233\322\273\"Fl=\353T\233<\372X\234\273{\245I\275\371\013\036<9,?\275\035:\334\274_4\020=@|`\274\220\235\034\274\245\304!\274rA\212\274u\244i\274u\277\001\273\010K9\273>.\323;\263\322\262\274\366\344\222<:\335\270<\310\251\034=\n(\004\275\274M\211<\350E\237<ee\333\274T-{=\224\301\\<\334v\333\273k\260\340\273\370Ig<9w\362\274\254\366\225<\275\275@\275=\261>\274\014\3157=\303\301;;\323\336I<W\310\303\274\013t\230<o\262\245\274K\351i\274\006\332\234<\027]C<^\254\204\274:n\331\273R\206\301<\205\2675=\351\033\255;\000\"\t=\222,M\272\313z\014=\316\365\205\274\0358\002=Uu\301<\372\264\200\274G:\206<\360*T\275{\205\'=\230q\340\271\241\243\234<\276\002\246<w\234\337\271\246\037Y\275\363v\300:\256\360a=uf\351\274\312\210]\273\230\024\365\274\364\303P\274\274\302.\275\355\034B\274\203\210!\275v\331\345\274\n~\252;S\034\327<\2378\177\2746\277\022=*\204\327<\036\207\"\274,\250\021={T%<\204w\223;A\205\242\275{\225t\275\355\374 \275\314\216\300<\225\363\226\274\276B\357\275\203C/=\230\243\207;t\2073=`\014\335\274j\r#\275+\2219=\262\207K\272l\275\263;U!\202<\002\005\202\275\376)\204<=\265\224<\343\3760=\r\345\247=S\020\222\274\340ux<\3135\305\271\251\243B<\024\356\t\275\350Y\334\273e\343\325;HY\213\270\003,f\274\303ra\273\270\327\345\274\330\032.=8K\002\274\224A\372;\333\347-<\213\311\342\274P2o<\300\314}=LY,<\254A\261\273\275\254\340\272j\305H<!\216\205<\356\022\013;\344\255$\275\273\333S\273t\330\027\275*-\013=\032l\222=\340\357\342\273c]\366\272\255\212w\274%\273\272;b\243\246\274\200O\341;&\273\340<\216<$<\204\006\351<#\261\006=\336\\\336<\230\371\211<2\265\002=\214_\025=\371]\336\274T\324\356\274\377\'\021=\254\243\313\274\324\350U\274\254\336&\274\317\016\331<%\264Q\275\360\025\021\274\301\255;\275\375VK<\251\322N:\357GV<\334?R;\367\023\020\275=\232\201\274\234*\034<b\202\022=\204v\274=\335\236\210\271\336hM<\202\256\271\274\364\314t\275f`\261<\346\204\206\275}Y\002<\200/\264\274\227\301M\275\215Ht\275\\\222\312<x\350\244=\020\262\274;\022\276+:O9\240\273\346w\354=\3715\327:\006\'\211\274\237\254_<Z\252\037\275O)y\275)\233\271\274\327\351@\274\271\347\016=\225\022@\275\376\357\253\272\213.\334=A\370\272\275\330y\"\275\253\252\027\275J\3551<nQ6\273\353\241\355<\023m\220=\277\222\034\274*\271K=\031\206\\\274\010E\024=S\273\017\274\021?\247\2749\216\007\275\333\002\252\273|\023\223\274!\222\272;NFV<\0279{=\237G\211\273\017\306\320<OL\236;q\340\366\273\266\307*\275zRI=:\030\377\273Q\347\364\273\336\366\226<\3059\275\274t\227\005\273\017\272\205=\014\324\r\271\373:\245\274\013h\034<]\222V=\266kF=\211G\212<z\3176\274~\351T\274T<\222\274\345\212\026\275\376M\320\274\337XQ\275\361E6\273R\253\274\274\210\327\235\274\313;\033\275c\262\227= \035\371<\001L\315<\320\246\357<\t\321\214\274\ts\200=M\350\007\275L\266\267\272)l@<3}\313\274\032L\322\274\000\014\251<\216\020\337\274\020\221\241<\367\221?\275\037l\031=\234\355W<\352\257\220\273H\360\371\273\230\265\n\274k\2358\274D\327\017<\206fK;h\365s<\302{\223\274\257]\007=\002H\210\274\\\311^=\364w+\274$\177\216\273\375\276\271<\262ZK<f\006\201<\324\307\322<\203Y\322\274\"\'5\274\325(W=\034\304\260\275\262]W=~e\330\273\251\237\343<p\264x\274\305\212\031\275\032\241\001\275\262T\255=)\351\325\273L\252\220=\016\243K=\246\376 \275\346LN\274q\034\215\274-+\340\274L*\301<\350\310,\275e@\357\274\271\"-=X\3505\275I\343\276\274\254\037d<\304P\"\275\t\344\006\2746\237\252\274\347\277(\275\367\255\333<g\t?<=\003\262<\307\024\366\274\220N\005=\230\003\374;\350\\U=J^\202\275H,\253\274\243\002D\274y\215\216\273\217iB<\311\236\242\274\327\344\014\272m\213\374\273*y-\274\374V\202\273`\000\023<&\305\271\274tc5\273i3^<\323\374\223<Li\260<\020\\\323\274\025\365\004\275\334h\000=Si\205=\264\270\364\272\277%\340=@\337 \275m\345\326\274\234\207\360<\027\026\024:\204\026\226\274x\210\264\274\321\370d\275\372\354\253;<\375d\274.4\310\275I\271[=\234T\377;j\352\234<\243\311Q<\322nk\275Z\364\212\274\351\302\241=\3107>=\301\206c=\275\315,=\ny\022=\nj\253<\200\247\336\274\254`\203\275*\317\222=5\364a\274(\253>\275\000\344\224\273\311F#\275K\323\027=\207Kr=Gt\n=\313p\020<\267\324\014\275S\256=<\002<\203=k\277@\274\23239\273\362%\363\274\210\035\304\274q\022\253\274\026\007\t;\307\010)\276\233\340\207\274\3060&<\016\t\256\275q\342\312\274\226\2535;G\320$=me\212\274B\307;\275\033\250\243<|g\301=\262\355\266\275\030\350\033=\336\260\346;^\367\034=\223\020R=^%\251<\241\321\341=\327\001\r\275\302\255Q\274\313\034\024\275%p\304\274e\261\366\272[\306\351\272\271\336\215\2720\326?=\233,\311<W,\272\273\240j\026=\327\214\246<\210\177\374\273\3114A<\377;\213<SV[\2738rZ\275\t\t\370\273r\255(\275G\317\272;\010\331\265< \t|;\210\013\010<p\374\375<\017\335\263\274\264\366b\274\344\312\331\274\037\3455<\343\377~<\341\331\315:w\212\003\274\217Y\t\2738\373\201<\004~\004\275\007H\200:\245\264\237<\363\013\233;\2164\214\274\217,*=\257\311$\274k\245\n=\303h\022\275w\032B\275\242?\001=\253J\007\273Ri\256\274\310\371n\274\217\351\000<\364\345\234<a\256B=\225\024\002\275\213$N;c\325\234\271(\203h\274\267Tn<v\376E<\225\037a\274\257p\340\273\213\226\317=tn\341\2749\035\212=\356\020\273;i@\274\274\r\363\342:\027W\033\275!\300\016\275\204h\364<\323\013\242\274\335\266\022<\264\360z\273\321@\251<\375\307e\272?\2277<3\227\211\273\305\225\302;\313\227$\274\312wi\273XJ\215=\277%\215\275\026\023\234;\334\234\016\272\370%\272\274\252<\001\273Na\372<:\300*\275\353;\276\274\275\364\221\275\226x\212=\304\226\255\274\353vS<^\204\376<\317R\247\274\322J\320<\\\311H=i\365\272<;\037\211=Zy\234\274\257s\035\275W\007\266\273\262\201\212\275\332K\211\2754\353%\275X!P\274\270P\232\274y\317\262:\310r\210\273\307\205\215\274F\201\t=I\307b=\234\214)\274\310V\2417k\343\337\272\\\310\221:Yn\340;\321\360\037\275\037\335T<\273\346\361\274I\332\276=\333\316\334\274\321Uh<\346!><k\374\206\275\242~\020\273s\272\211=\3423\226\274\243z$\274\307>q\275Y\322\014\275\320\375O\275e\245M==\346L=:\315J\275\363\372m=_\r\036:R\204\300\274|\234\236<<\261\r=O\235\356<\331p\022;\351d0=\214\tI<\271\216l=\305\230\216\274\273,x\275\314\023\375<\257\264\242\274\360\031!\274s1-\273\245\261f\273\256\360U;m\2128\275\341\323!<\272h\024\275\261\305\374\273\304\307G<b\310\272\274.C\225<\370\340\256<\234\220\030<B\257B\271\236\np;_<\232;i@\372\273u[=>\207\001l<\370\374\273\274\342\263\333;u\033\263\274\302.k<\322\227\326\274\352\343\373\274In\014\273\020\323};\023x\014\276A\034\016=\010v\202\274\337du=w_\021\275\002\216I\2754\362\024\274E\216\t>\216\310\207;\213,\205\274\2219\334=\253\351\004=\225yG\275\277\250\243<]\013\n\276\237\206\266=@p \275[d\t\275O\337\234=\223\267\006\275}\r\326\273\201\262\377=!\177\n=1}\017\2758y\n\275\265{\305<\305\377\005=;\233\373\273\200\340\213\274\273O\337\273X(\275<\3203`\273\360f\004\275\223\217\234;\353\265\267<\000\006e\275(\3260<\006\265\235\274\340\357V\274\377\357\027=\352\257\226\273\301\332b<T\341y<\370\332\317<%\013\323\271\017T\363\272\327\337\036;\376\206\323:\226\337\026\274\245<\374;\337\325\032\274\320\251\315\271X\334\010\274%\020\256<\036\016\375\274ZQ\021=\327\030\356\274z~h\273?\340:<[H_=\226\375N=2\3517=\317\022\346;\237Og\273\n\"n=\205\227\002\275)\025\326;\242\323x\275\355\037\024\275\244\262T\275$\240\270<\254\214\177\274\300\3266=Y\250\217\2751\327\341<A!\201\275\177\301h;\001\342\001\275\307\021[\274\017\312\254\272&\256>\274\301.\033=x@E=F\214\245\274\243<\003\273\210.\032\275\241\344\n=[_\303<1\251?<\"\020\024=\374\031\316<\267<\337\273\246\212\237\274i,\256<\311\315\256\273tm\"=<\274\366\274\332g\201<\230\334Z\2755\217\327<\3641\306<\220d\354\274\034\366\316<\366.\037\275\252e\330\274\251\371\305;\367;H<o\205\223\274_w3\275\313\352\247<\014\250\377\273\244\031C=\255\204\"<\036\025\245\273\346(\007\273,@\r\275\372\004Q\275!\275:=f\327V\271\343V\230:\330\014\341<\333_\202;u\357\020=g\245\264\2732\212C\275c\236O<\257:\334<\216n\032=\251\360\365:;\373\275\274\362\301R\275\010\330v<S\262;\274}\353C\273U4\234<\214\320\377\274P\370\227\274\014\364B\275\202\t\247\273\324\340\301\274\236\"\005\275\300^%= (\211\273\276U/<\323\0264=\206\n\267<\333\367$=\254\337\267\2740^x\273/\357\272<\377W\231\275+\221l=\253\310\337\274\353\006\375\274\334\035\220\275]\336\200=\212:H=\201\220\202\274\323\276\020=\240_\366:\334\320\306=T\372\315:YI/<\204g\320<4\261\024\275\374\235\241\275&*\276\274\306I^\273\240\325\204=\246\355\246\275x \300<\322\334\212=\'\027Y\275\004\320\000\275\372\n\332\275\275\'\'=\237\244B\274\330\364$\273R\037c=\270i\314\274\201\372\200=\274<\211\275\"O\273<\364\275\223\274\231q\313;b\261\253:\234\243\342<\2216\346\274\026\231V<\004\227\356\274\031\013:\275\206Ot\274\271T\304\273\355\322\003\276\333\201\226\273\210\332Q<q\311-\275\003\337\226\273\255D\243<\305\335\312<s\303A\274\240\245D\275\376e\036=2\031\206=\036\016\343\274,\020\330;\221\333\023=(k\221<\303\377\n=\006\316\247<\375\245\327=*}\'\275\005\016\222\273PL\244\273\252\005\006=\005\370b=\021\245\213=\241;\003=\3042&=a\243\367\274\001\245-<U\375W\273\246R\203<$\304a=Yy\017=\340BJ=y\303\301\275\330\333\215<\321x\267\275j\352D\275\014\024\361\273\324\216\266\274\227=^\275\335f#\275G\207%<\034\273\367<\340\250\n=:\314\351<`h\366\275Wg\304<#\255X=5\2242=\307\234|=\2673\206=N\033\331<\245\030\371<z,j<\202]5\275\341\236\210\273IY\207\273\2617\263\274\351\311O\274:\025\276<\237\307\024=l\322Y\275&\253\233=\025\325_\274\025g\024<\370\3307\275\033n\026=\324El<\r\335\221\274\'FZ=j9\316<0\034B=M\372e=\340m~\273\322\222\230:\177\236\r=~\020/\274\341\005\256<\206\346\030=\023\177\346:\027k\003\275\034\2451\275\320\342\327\274:\256`=I\2717\275\227=N=\244\353\\;\2250M\275x\262\341<o\362+=\244\331<;\346U\007\275\336\252\022\274:\343A\274\354C\001\275\264z/<\243$\222=\332\253\036=\316\177\t\275\277\320V\275\372\205\253;T\240\224;\347\374\013=\025\251\247\2747\0319\275a.\216=\207}j<\357\232\201<\273\211f\274\232\372\271\275p\316\020=j[\300\274\267hI;\357:\202=.k\177\273\350#k=\247\350\252<\010L\276\274\010\\\245<\344\325C\274\345\027L=x\312\260<Meg<t\212(<\336l\214\273\316W\n\274gT\032\273\210O\217<\310%\270\273\331\207\203;\262\240j\272\247\237\256<\227\335\263<\362\001v<fk\367\273\213aq=\344\345;<v.\005\275\276r\242\274!\344\007\274U\010\317<\211\032\007\274\336J\355<\205\201\364<\211\254w\274\030\031N<u\275\022<\321\212\253\274v\2135\275\332\235$\273}\226\343\274\257\314M\275[O$\274\314\244s<\206M\216;\301\r\302\274\326\371E7\014\027&\274w\262u\274|\220\330:\003\374&=\220x\325\274\207\323\253\273`>\"=i|\365:\330Q\376<G\320\276\272\212M\006\274\025\270\216\274\277;<=p}\310\274\226\364B=,G!\274\375B\363;\274\252\237<\314{\006\275@\203\342:9B!=2\037F=,W,=\0007\002=\214\266\240=\'\r\r\275\243\353\242\274\375\256\340<\222\231X\274q\244h<\311\027\206<\200b\224<\252\311\366<\010\037\023\271n/\316<\370\262\225\275\312\231\035=\016\222\315\27249\031<\336\335\351\274\330d\357:\ru\317\275~\203\236<\032{\235<T\"\017<\013\250\375<\345xR\275\351t\365<\252\332z=!xO<\027\014\256<\232\263,\274)V\201\275`K\022\274\307=\343;\344j\001<\303\273\364<]\026\n\274\270\264\337\274\021\263\233<\0362\315<ZX%=0\227(;\246\242\205\274\010\315\032=\"8\215:t\341\014\273\234\021A\274F\354\340;\031\356N\275h\277\355<\226H\361\274SV\272\274\260wg=\220\317\225\274\350\032\207=\037}\250<C\253w\271\365e\017=2Il\271\277\234\364\273fi$=\361\374\235\274\236\261&\274\314=}\274\275\300m;\3033\306<\327\241\345<\035\313\"\274>\2556\274>\274\262\273\345\221\243=\023\313(\274\342Hp\272\2701J<z\230\205\274\253\035\257<n\020\000=\230p\023=\212\224p\274\347\245\265\274.\346\005=\004;\307\274\2657E<\331\\\246;E\313P\274\351R\305;,\311\356<\014\202\023=\234r\013\275\234\027M\275a\n\222;\314\005\351<2\020\202<$b\273\273q;\314\274\212\337\'\275\033|\327\274vU\034\275O\2745;\353\243\255\274\215\006\305<*\301\216\274b\214\000\274\034\007\362;\027\250\206;\360\313\262<[\315\223\273*\200\037\275-\021\212<}s\001\275\340\307\023=49\232\274\025!\362\273#+\r;\266\\\360\274D\336D=\350\332\033=\263:\3479\007f-<A\216b<H\325\222\274\007\355\016\273S=\262\274\375\334\n\275\023\314;<\364\226\252\273\212\362\003=\004\037\347\274>\223?<\317S\267\274s\233\303\274\370\254\266<\256\006\271\273}\226\225\274p\323O<@\220\232;\211\320\006=\260S\252<\251-\231<\371\225\361\273\004z\261<x=\347;6h%=\200\207\225<.\276\375\274\203N\222\2747\361\033\275\025\261\356<\261g\004=-\223\310;5\032\233;\031)\235<\357\037\"\275\262\233\n\275\376\026\350<(\010\367\2747X\263\274\366\335\002\275\r\020\364:x\231\036\275\326\036\236\273]\247\320\274\3345E\275\303\314(\275{E\231;\257\267\251\272\032\\J\275\330>\215<r\361\224;\207N\266<\361\033P<\342e<\273=\264o\275\324\022\332\273\021/\210;`9C\274\007f\252\274\006N\244\275P\232\332<-\336\004\2747O\256<M\336Q\275\243#f\274\357\237Y=\rg\236\274]U\033<\016el=\260\303\331\272t\255<=\365\364\236\273\271\244\242<u\252\345=w\302\246\274\333\005k\274\257-y;z\'\273;\'\231\340\273Y\353\272\274\320\324N=\363x\306;\314\230\316\274\334\202\225<\213r\322\274\240-\245<i\316\013<Y\023.<v\021Q:U\371\212\274&\333\363\273\343}/=D\n+\274\267\304\274\273\225\300-<@}\021\273\206\335\216<\252\236?<\177\372\'\275\345\222\315\274\3325:\275\304$\002=\200;;=\276S\225\274\307\254\231<\004\016~\275\240.\313;\347}$\274\'\324\204\274\211\021\246<Qr!<W\177\202=3i\001=\233Z\2269\274\033\325;\356\265_=\014O\016=\311\313\360\274\222.b\273}\2774=-\323\203\273\3318\241\274\030\375\342\273\005\231v\274p\354\276\274\225M\017\274:\327U\274\214B\226<\340\027\276;\346n\365;`\337\224<oJ_\275\214\375l\275\222Ib<\221\352\307<\320H%=\367ye;0\216\375<O\244\t\274K\000\310\274:\374_=\374\344\010\275\247<\271\274\257\301\351\274\237\0066\274b\265d\275\303r\304<\024 \020==\370\217\274*\373\341;\203\306\030<\242\027\224=:\002\002\274\335\376\302\274\247\202E\273\304\267\313\274c\0242\275<!r\274\024\236N<\330\307(<\212\376\022\275\271w\342<\273\314<=\022\344\254\275v\220\314\274\214AF\274\364\000\350\274\210\246)<\354\t\022=\367\260i=\244\204\r\275\324cA=\325\300\347\273\325\350\256<\232\271\231\274a\265/\275I\345\360<\241\032\227\274\257\364&\274vu\252<a\034,<$\024\374<\345?\346:\320o\014=S\337S<\237\261\375;\356\223g\275\020\263\006=\026\236\252\274\223\324\"\274\346\243\226<\030\023\265\274\265\022\360\273\305\212\255=W\232\274;\331\331\007\275\220>\022<L\224/=zb\013=\321\353M<\034!\241<)\321\032\275\270\260l\274\264\021,\275@\254+\275kp\033\275.\302\367\274\316:O\274\250\264\365;)\263\037\275j\005v=\003\005\300\273\001\233\021;\257<\271<\355\000\261\274\277Z\324<\022N\026\275\356j\220\274\2461\221<\353\304\242\274\366\312\334\274\3719P<7yd\274,!\235<\023\\9\275\341\2244=t0s;\"\030\323\274\231\010\3679t\216+\2743m\036\275\221%\025<\277<\260\273\320d(<\312\211\032\275v-z<}\001\225<\331ou<z\227\365\274\265\'\007\275\360\352\216;\177oP;#\220\243<\022O3=\300\211\266\274_G\203\274\302\342\231;x:\007\275\006\034\351<\225\306W\2748q4<\350\276\014\274b;\t\275p%\352\274\343\000o=Z\2223\273?\3267=R\0242=\362\343N\275\204\222\226\273\303\232d\273Q8,\275\304\322==\337\3603\275\311\365$\273\201f\317<\\eh\274\376Z\264\274t\222\246\274\323\336\222\274\320\273\267\273\270\021=\275W\\\347\274\325r\010=\343\347$<\037w\022\272\204J@\275\345\000\033<\363\376\n=\303\365\336<z\341=\275c\244/\274\237\277\t\275*\224\363\274\371\024;\273-\235\207\274\227\231\030;\205\037\016\275cp\2708Ni\242\273\330h\211<\r\002\222\274U\376\366\273\337Y\332<\327O\346\274S\031\311\273\005\251P\274o\\P\275[h\302;\363\213c=\306\005\302\274\0208\263=R\324B\275\\V%\275%\301\361;\306\210\034\274FnE\2741LF<\226\217\021\275f\371\341\273@^\234\273\203=]\275l\0321=\244\'\226<\227\272\226:~\2217<@.J\275G\'m\274\2024?<\274\333\"=\322\306\023=\275\363]=\032h\234<\010\275J<\261\315\003\275\225\304v\2758\337\321=\316{`\273\017\374i\274\n\205\243;u\327\361\273\342\033S<s\231\267;\312\201O=\221\031\\\274\371\037\031\275\'\303\002\274\014\211\360<z\021Y\274\3039\266;b\374B\275\020\303)\275]\212:<\007W\247<}\213\010\276`W\237\274q\272\320\272\n5\207\275\205\010\235\274[\272\030\273\206\372\274<\250`\367\273\025\235\366\274\261\377(;\036\224\251=\3453\221\275\000\276&=EiJ=p\010\223:\206\271\036=\030\304\202<\300\216\022>\220\205\204\275\301I\237\274\003\217\260\273\014\207Q\274t\025y\274\256V\371;\022\2458\274\177\224\204<t\304{<Y\245\334;SZ\374<\016\266\220<\213Y\322\273\276B\250\270\305!\227<$\345W\274\202\013F\275\"=\324\274\034\367\013\275\010\001D<b\204\270<\377\340\373\272J\271}<\r\310\014=\001\201\005\274\335\3505\2757\302\002\275\303t\023<\001X\337<bFB\2744=\342;\013\345\342\273\231\275\231<o\250\3149\034\225\017=\205*\201<tp\237\274C\031\264\273\236\257\232<\264\260\343\273}\006\273;\373ay\273\276\374M\275\033\210\343<oxB<\027n\217\2743\322\272\273x\330\213<\255h\327;=d\025= \246\371\273\351\341\035<L?P;\374\371\345\274\261\363\211;\340a\320<\ta\223:8\001\330\274\200r2=b\034\000\275\236\2215=s\2453\273\023\370\022;@\260\233<2A\031\275H4\216\274\310\010_<\343l\000<\303\363\341\274\325J-<E\315\034;\264\033\365:\205\271\236\272\0063\262<s\202\013\274\263\212\313\274^\017r\274\273\210o=\362AI\275\247s\260\273\362&X<\026\243\262\2742\211\032<\271\357\000=\267\203\032\274.4\221\274\377Q\217\275\017\245\226=q#\210\274ad\333\274\017\313p\273\257A\257\274\025M\221<\003_4=W\216\036=0\257\257=\000}\357\274\220\220 \275\334\317\205;v48\275\260\217b\275I\270\201\275\234;T=bWi\274\360\323\022:Y\002\202\274e2\010\2748\215\253<\314\242\377<\024\345R<\006\030%<\365\361+\274\371\002C\274l\303y\274\030\206%\275\220\333\030<s\302\317;\212\007\347<\265`\322\274\326_\022=\030\312\351\272\333\244\232\275zl\220<A\334\326<\222\024\371\274\256\217\203:\2417\013\275\375\206\237\275\324\345Y\275\343\014E=\231\244+<\035&=\275\017\346>=S\222$\273X2\272\274O{\021=\232\332,=\263\342\377\2735\335\313\274\361r\361<\270\304\206<B\332-<\351U\340\274B&\201\275\330-\031=\361[\225\274\006^\357\273\272\223*;\252\267\022<9\225\006\274\355_6\275=\264,=\030L.\275\n\271/\274\232=^<\306I\253\274\035\213\322\274}\275\000<\"\003J<W\020\335\274\265T\320\274}\2777\274|@\221<\3140\364=\377\375\035;;D8\275\3012\036\275z(\353\274\317W\377<\001\017A=J\362\314\274\375u\003\275\322\312x\2741;\232\275\272\334w<r\016=\274\022\216\273<\234N\341;\371\231\322\274\356n\204\273EW\'=\306_\232<ac\371\274c/\333=c.\255;\020\nk\274\2752\013=\215\t\333\275J\335\006>D;\r\275\360\267\337;\214\205\247=\264\356\215<&\356\336\274\030r`=\006\224\250<b\2323\275\352\022\306\274\254&\002=o\014L<\245jh\274\014+\263;V/\370:5\035\273<\017_\211\274B\030\234\274\366\016\252<$\344\246<\324\310k\275PY\333\272C<c\274\205Jj\274XF\014=\332\212\017<\277Tf<\3743\t<\205\261\320<;\013\375\273u\215\027\2736\270\276;\266\220\350\272\223\313\224\274\'\025]:\257\223\233\274)l\253<\364-l\274o@4=\312\312\025\275\350\317\013\274\232\272-\275\343.\031\273\216\207\316\274\367v\203=\312\253\213<\336\"\022=2\0170<\316*\013\273\314\001\002=\352\220\030\275\032\317\036\274%\257r\275\273\256(\275\37619\275\221\212\267<\320\225\243\273\211\377\005=\323\270t\275\373\220P=\365_V\275\355\202\346\274\346\325\035\274\303V\211\273RRl\274\"h\207<\2752\\<\024\023\262;\324\007\265\274R\352\227\2733^\234\274\010s\230<\017\230\326;@g\320<\373\266\325<\223\365\250<\316\033\017;\275\300\001=\2311\271;\210\332\256\274\311=)=\205\230\363\274\227\037#=\"\233[\275\230\312(<\035\316e<\177#\330\274\307\363\315<Z\223\257\274\"B\017\274\306\2618<AD\262<\201\247\347\274+\247V\275{\362\005<\262O\273\272J\3333=x\334\251\274\211\250+<>\333e=\343\254\364\274R\2511\275:\005T=v\005`\273\377u#\275.{\332;\320:4\275hv\367<\037\351;<\267y\364\274\234\302\203\274\351R\364:\361\363(<\363\r\177\273\035\355o\273t\277\037\275\327\311\213;\227\334!\273\034\t\3429\205\343\270<\306l\251\273V\351\004\275\213h\326\274\274\253(=,f\336\274\360\225\301\274\002\311\362<\356|\035<\270E\001;~.\214<\2603\301<\202w\232=?\336R\275\242\177V\274\032t\253<\3347\351\274M(\020<%\363\247\274\223&\247\274&\330\210\275\320gl=\n\0214\274\220\231 \275\300\026\207<\0142\245<\352V6=}\005\311\273\'hQ\273S\036\010=\306\226\373\274\354\262h\275\2122\311\273\354}\310:pzB=Y\314\222\275\372\324%=F7\314<\317\272\241\275\377\207W\2742\377R\275\2002a\273\234.\r\274\202\257\303<7\005\242<&\366\202\275^r\207=\025\331E\275\312\225\264<^\026\354\274x\225P\273^\217\223\274\234\025j<\023\0144\274\376\\O\273z}\350\274\320\262P\275\001j#;L\025!\274\237\026\251\275\306B$<\201\023\362;\311\336\016\275XZ\306\272l\361\016<\036z\007=\320\013\010\275\303\216\263\274\357\374\265</p$=R\272\373\274G\037\310<p\343G=\007\005\036\273~:\223<\344{?<!\017\260=1B\013\275\'\220_\274\2553\316\273`/i<Q\212g=\303/&=\307\234:\275\373b;=\324\035Y\274\351o\202\274\001\254\217\275\324\270.\273\355\223y<G\205T\274z\362\277<\t\010\254\275\316\360\311;N]\245\275L\217\304\274y\314\261;\345\264s\274\035Kn\275\255[.<P\312B=\334\211\027<\331\017\270<\246I\255<\305\205\237\275\337\n~\274da\245\273t\377F=\343d~=\024Z\"\274d2\003<[\000\272<\030\264\235<\341\275N\275\001\024\217\273\254|\022<\026\206%\274 \215\2727\243\275\332<\264|\221<\330YM\275\240\013\2719\r\235\237<P #<\235\314M\275\224\226\222<\226\246\340\274\240\t\264\274\201$\364<\353\301z=\177\346\224<,\304A=\255h*\274\215X\330\274\314\001\022;\352\367\355;\244gb\274.\013\006=\241k#<\027\211\346\273\270\017{\275lp\000\274\375\371\t=\023/\237\275\244\231/=NC\024\275y\355t\275\374\232v\274\031\236\026\275;_\007<\315\021\032\273<]\221<\177\003Q\272\252\0250\275qH\3748\274\335`=\271\330\016=_\376k\275\002^\330\274\3108\377:$\332\'<\320\31029\233\330~;\320\2761\275\325{|=\243\n\262<\021`\016<\210\217\211<\260\026\221\275Yd\343<\212;\266<\275\217K\274\3724\235=\t\2640<\341\004\\<\2050\277<C\374\315\274\236\2255\274X\'\275\274S%j;\233\366\226;m*\377;\207C\376<\310[\335\274,\320z\2741Nw<q\274\204<\024\362\027<\243\025\351<\307\361\334\274\302\033s;Pb-=\0332c<\377.\036<#\367r<CJ\344\272:\324=\274\234\034O\274YG\026\274B\356\347\274\250\275\317\271|\267\002<\313\334\376<D\232\354;\267/F;\314-{<\325\n\346\273\200\273m\275N\254b:\377\3769\275\271\003\200\2755c\007\274\371\034$\275_\340\251;\305\242\213\274I\207\221<\005\2678;\244\233\025\275\215\005.\274&\245\264<y\366\253\274\337\212\301\274\316\355\305<\220\267\217\274c\210\304<\020-Z\274\351\301\257;\270\035G\274T\3276=2\025m\274P\211\207<V\3454<\371I\274\274Er\030<\254\256):F\200\204\274/\363\036<:\222\033=\365\331\002\274J\371\275<sE\014=l1[\274\236\371\225\272\335\374\261=>+\227<\200I\274<\335\360\227\274\324\311\033=Uj\232<r\3309\274V\2003=t\001\017\275F\246\326<\266\313\244\273>p\207<YL%\275\035BY;f{\206\275X\303\002=\372\\A\272\202\327_<\355\017)=\272\\l\274;\351\240<N\247j<<\007v<\307\377\320<I60\275\327\310S\275\303*\031<\372\276\036\275\354\267\003=*\245\363;\246g\370\274@\017\342\2743\355\026=\353\t\205\273f\'\036=Pk|<\302\356\213\272O\222\376<\303\360\331\274\3668\000=\215\342\216\274\257f\246<\320\230|\275\270\217\005=\rK\020\275\222/\222\274\323+\025=&\210\024<;\255d=\035\321\245<3\342\201<\227\254\021=f\300\252\273=\003@\274\010>Z=\205;\337\274\332\213\226\273\200J\361\274\'-\255<g\346]<sw\020=\247\033\216\274`\023\272<)4\313<\301\005|\274\324\256I\274\350\312\246;\004\1777\275\337\002\002\275\237\265\354\274(Y\266\274\224\022\242<\027\\\237:\311\336\025=\003\203Q<!\t\241\273\030\203\003;\223\025M\271\336\331\226:\245\0314=\231\356A\275\320\262\335\274\222\t\r\2747\020H<w 7=\376)\362<\302\231\326\274\367[\335\272\302 \313\274Ga\343<i\306\374\274-UP<d\333\234\274\0254C\274\361\227\232\273\207\273\033\275Vb~\275x\236\232\274\227\032&=7\016\334\273\327\000D\275\364)\223\274e\337\264;8\031\243<\355\337\215<H{\023<:S/\275\211w4<\336\254\217\274\024+>=\221wi\274\354\353\250\274OG\004\273\270\013\271\274\304)\345\272\225\332|<\205\276\275\274V\024\221<\030\305\010=\350\376\023\275\016\227l=\241wv;\327V?<:6\312<\201\251 \275R\033O=\300-\225<?LA=;\342\216\2751i\372<fEh\274\373\240\376\274\204\320\224;\002\270{\274\305k3\275\353\374:<\246z\240;\241\211|=\300M\006\274\025!K<P[\340\274\365\224\350<\371\301\215\274\303uq<C\243\222:\346\235\356\274\276C\270\274\237{+\274\003\245\225<+\206\032\274\200\224\343\273\212-\207\2757\246\224\274\362\267\216;\254\211\010\275e\366\200=x\245s<\305\351\240<\2601Z<\371?\245<\363z\270<\301\237\270:\266\324\235\275-Hr;\267\350\206<\224\222\246<\275\353\300\274/\3100\275P<D\275\315\303\231\273\316\213\241\275I\"\345\273P\177#<\352\244\302<\013\372$<xX\341;g\356\366;\002\027`\274$P\200;\300f2<\303\336!=\246\252\036=H<\331<\205\3118\275(\307\242=Q8\326\2743p\266<\356\231\n\274\330\263\275;Z\277\223\274\273\342\301<\030\317\273;\237\241S\275JB\241\272X\201\r\274\304\302\034\275r\036\311\274\251\2716\273\322U\211\274\023?D<\276\026\366<\365U\200\274\210\027\266<\301\245\334<!{\353\274;`\343\273\312\301\363\274\247\350\037\274\233o\023\275\221a\377\274R`\030\274\260\241%<D\277\'\274t%\250\274\031\367U;\003\211\355\273\262L\247\272\205!\210<\251\016\224<\355\007\001\273xR\030\275\260\200\246\274$\377\315<\232\223D\275\355\233C\275>P\331\274$\325n<\013\020\233\274l\257\230\274\227\330S\274\202\016\'\274X\243\225\273?\1779=\036\031Z<\356\365\033= ZE<\207\360\005<\206\3758<\356\300\245<\275\261I<\244\270]<\366\350\'\275b\303T\274\326\3467=Mo\213<\3770y\274\227t\013=\312g5=\267c\322\272\237S\300\2746D\341<\203q\033<\343\230#<\021[\r\275 \311\027=:\224\223<(T\026\274\343E\265<\260\3771\275r\346\234<\272\0239\273.\007\017\275j+\324\273\344\364\020=@4\"\273k\371\033\274\253\270=\275\350\202\244<i\325\007\275*m\034\274\327\007\215\274k>\200:\370\242\222\273\316\233\251\274\177U\030=\224o\337<\334KM\274\033\020%\275(*]=\220\334\202\274\240\323\010\2746\346J\274NB\235=\357^5\274w)\225=rd.=\304j\232=\232\323\376<\021\360\262\275xU\240;\350c\237<C\"\007\274\nu\"=\300\211W=\343DD\275R\345\315\273^M\337<%\177c\275\350\360\254\274\273\010\002=\216\025\004\275k\326\2778o\206k\275\271-\336\274rq\275\2744\220n\275&+\013=\374+\342\273\275\246S<\245\240\307;I\234I\274\305\332\007=\267\007N\274nG\270<\352%-<\371\024A\275~31<\313*5\272\020 \363<\212\330\313:\310\322\244\273T\334\220\273\207\322\362\272\373*#\273\033\005\356\274\366\307\002\274\006\300g<sO\217\274\346:\324;\265l\037<]\265G<\343\221\007<\335\243K\274\315\373\205<\261\026x<\256m\016<\217\244\016\275\243\006\r\273\327\272o;\203C\340<W\215/\275`\362\227\274xr\200<\335T\244\2748\325\312<m$U\274\266b$=C\255\253<:\311\201<\267\036\236\274S\327$;\245\325\215=\233r\332\274\374l\265\274\0248\304\274\244x|\274\tY\273<`+\002\2752\365\331\274\310[S\274\234T\216;A\270\361;Gk\226;\242#\030<\370\341\241\274s=Q\274DT\370;\217F\324\273X\316\260<\262T\n\275U\314\332\274\346\'\345<Z-(=g\313\002=\304A\214\275\016\252\022\275~?\010\275U+5<U\221e\275\250S1\274=\367\211\272\323\363\'\275\016\222 =\236\346\305=\303\264\253\273\022\363\002\273\233\245\020<\374\252\274\273\177#\366<\252\023\347\272\343\216w<i\212J=\032\364\035=\333Kt\275\242\014\205<\\\304\017=\357\233w\273\346k\327\274\346\315\213\274\007J\230<\275\023\223\274\212s,\275eB\000=B\210^<\344\346+\275\213\226)<\251\017f97\347\016<\251\005\005\274\2232\217<\225\216`=\014L\002=\200r\037=U\314b\274A\252\031<\312\342\345<f\006)\275\023\256\233\274]C\204\274\024\007\022<T\340\200\274p\236\036:\246\226?\275\003\205\210=\276\2029\274\337\237\240;e\026\261\272g\362X; \r\355\274\n\366\235\274\237\247\317\273_\335\227\2739]>=\375\270\276\274J\276i=G\220\366\272r8\201<hL\330<J\231\206\275\302\310\264<\351\223\367\274\320\266\002=\273AV\275e\275\005=\305\3031<\215\032R=\300v\234\274\351~\337\274\021A\217=\201\250\352<Cf\307\274\314x1\274\235\314\236\273\273\030\233\274J\215!<\257\241):\267l\300\275\247\313z=u`\253\274\337\r\274<\302\034\\\275\273\240#<\334\"7<\255\266\253<8\317\227<\236\345\000\274.\023 =\253\2573\275X\333\205=SE\222<\301\335D=y5\024\275~\203\274;\302\'%9\267uh<\207_\020\275\001\370\365;<Z\346\272y7\262\273S\225\352\274\256\266\356\274\332\265\252;^}-<\370\270\016\274\233t\211\274\025\261\002;\372t\024=F\2214\274\212\350\301<\304\215g\274\214i\n\275\350\373\027<\232\220\2209\2508\357<%\365\242\274\244\373H<\200Q\214;\217\252@;\370r\240;[>\270<q\355\354:\223sJ<L\006\243<\2505C\274uO\007<\374\033=<L5o=t\231\336\274|7\322<x\250\025\274\267\351\222<\261I\003=\274!\033\274x\032\004\274\014H*<W\306>\275\330)\277\274\374M\250:iT\372\274=\003\310\274(\375\002\274N\223\272:\336\t\226<U9\247<5\354\362\274\271h\276\274\024\225\347\274\300\257\242\274\361\003Q<\363\277\241\274\307r\007\275\177\031\244\273\177\372\244=5\301D;N\351\345;\2025\262\274Pl\234\274\240\351\367<\222\027J\275u\304U=\230w(=I%\213\275\272\306\022=\213\025s<\034\360\340<\363m\211\274<\007\016=\277\342b\272\342f\221\274>\217\222\273\347I\240\274\315QC\274\212Il<\343\241]=\005\013|\274\214\222\'\274\366\203\251\274\277\313\237<\264\367\252\274\254yo\275\304c\036\275P\265W\274A\217\326\273O\312\021=j3\246\273\361{\022\272\034\340\014<\3370?\273\301\211f:P\322\027\275\310\324\206<\363\200\301\274D\332J=e,\223\274=\246\356\274\334\352/=\375B\305\274\363\233\025\275\222\360\227\274\362\235\347\274\307\236*\274Y\007V\275x\277p\274\020bt\274U\201\344:\272\323\323<I%\356<\272\203A=v\201\277\274)5\216\273=\226\321\274\335\307\302=\273_\342\274\033|\013\275v\344{<\325\370X=\256\\\316<\001R9;m\001\337\274w,$\273\316i\206\275\300n^=KG\251\274\353\2605<o\347\210=B\277\366:\342S\005=\2555?=\362\317;\274\275\254\215;0\355\352:1\322P;%\341\337\273\035\355\233\274[\365\"\274b\341\313\273w\305A<e\262\204\274O\352\340\271\277;\226<\323\366\371;\254To;\3655X;U\tn\274\362\000\023\275UJ\006\275<$\364\274\211\207\334<,. \274\001\270\246\274\222\252\005==j\233\274\240\252\356;\023\266\322<C&\214\272o\237\342\272c\366\242<e\350\213\274\326s\007=\303\376\005=h\256\020\275\264\277\032\275\340#V\275WI\030\274G\324\321\273{y\262<\2318\200\275I\221\020\2745\320\207\275\301\336)\274\3201Z<\351%\377\273<\217>\274T1\223\273\325t%=E(\357\274t\250\214=\0342\022\275\"\312\\=ay\007\275r\022\007<\346;\235<1\276\270;\335\354|\274\210\016\'\274G\332-\275\357\211k\275\342{D=\332\177\211\274QgC:S\032B<c\325\361\273\253\310\021\2751\225\255\272G\030\007<\004\2571\273u8\240\274I\241a<\371t\000=\226i\347:\256\206\027=bU\003\272\2301\252\274\200\331\000<\314Wq\274\235\2060\274 :\212\274\032\206\265;\274A\250<\266\264\010\275M\2624\274\354\203i<\337\345H<\262\204\344<\357\247\025\274\201\365\025\274`\310\231<7\021i<\213\254}=\032 \377\274\245\357\025\274\022\357\321<j\307S=T\235\225<z\nV\274]\037\344<\236t\221\274\262\n\324\274\203M\010\274m\025\263\274\010\351\353\273\362$\243\274\232^\317\273\\\342\034=\305\204\204\275\242D\"<$E%\274\371P\203<\375\330W;\305\220\000=\317s\212\275\300\001\037\275O\017\'=\032\262\226<0q\253\274\370\375k\274\353\213a\274\r\205\241\274\240,\321<\237\376\253<p\372u;\213+\243\274\002\377/=\320\220\244\274&\202\360\272,\241U\275\353O\004\274(\250-\275\317T\315<\365qK\275@\021\313\274\363\256y\274\241_\370\274\342\337\324<Mr{<]\214%=9\235\222<\017\233\262;1|\307\274\336B\003=\301S\337;\217k\254\274\256\002<<\001\n\216\274\017U\216\274\013\315\245<\021}\023<^\026\302\273\334b\360<\021\r\271\274\022x\306<rc\331\273\027\273>\274$u^<\r\357\260\2741mN\273;3\224\274Y\262\020<9-\007=\256\023\023\275%p\014\274\344\257v;:\322\357;C;\0149\374\376\350\2747\256g\274(\202J\275\317\003\023<dx\330:\016A\307<A|\203<\353\222\t<\301\251\265\274kt\007\275)l\236\2739\306O\274\274G\300;#\001\205<y\355\242<?Gk<\312\334\260;j\307\207\274v\337)=\036\334o\274r\372A\274\354\004Z\274(\203\277\274\'\255\256\2735\203\260=\313\203\':5\366\361<aT\316\273^\252\263\274\021\356\331\273\\V]<G\245\360\273\366\270\030<\231}\016=I~\310\273\254\215_<\272\220\262\274\025\212\033=\237d\376\274\025\247\271\272h\305}\274\350\356\243\273\221\223\337\274\377\224\376\274\220\017\313<<\236\262<U\264\030\275\304$\300\274%\310u=\236\347\300\274\000yK<\232 \252\274M\2162=\033Y\230\273\327)(=\000\344;\273.\006\343<*##\273\366f\024=\372\026\342<n\\\266\275\332g0<\271\316\364<\302q:\274E\024\311;\2767\230\274\201\\\314\275\232\211\006=\344U7\275\002\212\206\274\243f\262\274T\303\216<\316\266?=i\r\013\274?\244 \2745\274\024\275\3779F=\263\226\233;M8\007<f\r\014=\356pe=-w\032\275\224\350\027=\273\370\320\275\200{\014=^[\350;\314\225\024=\014\200K<\305G\024;\371-\221=\365,\204\273w%\201\275\211\321=\274$\237\214<\362:\316=\370\375?=C\315\327\274\234c\t\274\342b\332\273\330n\205\275\374X\315;\324\210\204<M\"\333;\352\022\361<V\2426\275I\315r\2759(\336\274\356pJ\275x\304\250\2741GO=\313=\010\275\3543\213\275\347\362\331\273\322~G;\227wV=\033{b\274\231\221F=\323\247\010\275c-\257\274\216&[<b1V9|\375\337<\036>\300\274\215\264\256\274\337\202/\275/pM\275^\032\376;\031\356\214\275\330B:\275\311\343\t=\205\226\260\274W;\340<]\270\313\2740aF=\230\370\243\274\271C\270<i\215\337;zH|=\027e]\275\244\356O\274\251m\310<\033\334\371\274\336\200\232\275\240\302\261<E(\275\273\374\270\\\275n&\312:\271\301t:7>\033\275\353\334\353=\314d\211\274\026Q\352\274@\256\035;`t\\<C4E\274\310l\306\272F1\215\273\244K\020=u\321\r\275\376P\026\274:\3666=\275\247\037=\252\027\204=\020\342\332<\326\270\200\275@\267\334\273\315\270\245\272\005\216\270\2729w\'\273{\013\271\273l*\352<\017y\311\274\207\033\005\274+\220Y<\311-\017<by2<\001\263\013\272\246 w\275\313\263\035=\370\034\025\275\303\275\260\273\366G\370<\203\017\340< A\271<\021\221\235;\235\022\032=6\n-\273>{\374<\247!\251<3\017\032\275\351\224\036<b-8<\204\004\230\275E(\251\274~?\031<D9B<i\024\312\274\251\270k<\3315f<8\241k\274\226\275\032\275|\344\263;\334\255\372;(\202\200\274\361\340\253\274\353DR\274z\006\352<\230\336\020<\337+\003=\346\363R<$p\254\274\304P(<\206k\366\274}-{:,\333\316\274H\250\352\274|\223S\274\362b\022<%\366d<.S\364<\321\374\336\273/\"V;\3455U\275\213\261?<\212Z\317<\330\231\333\274\000\212\203=!\335!\274\263\332\010\275\032\223\241<\035xn\274~\017?<\307\347\227<\032\322\277\274\343\333\003=\035\007\025\274\311\2143\275[b.=\241\347$\2730e\215\272&u\003\275\275\250$\274D&Z<\216\253\231<n\323\213:\343\034\247<\331\2233\272;\334\036=\255\330\\\274m|\331\273\252\276\010\274f\226\216\274\177\256[\274\367\035n<\337\245v<!\3103=\330\360\037\274\351\374&<T@+<\016\216^:\013aV<v\314\346\274\231\321\354;\335\000\254\274\327\013d=y\300\211\274Z/\216<\257\007\276\274b\335\212\275\321\266\006\275m\034\355\274G\300\316;$\020+;\327#,=\340F\365<\262QU=\332l\323\274P\205\241<\312\246G\275x2\207<\244T\033=2<\\=\343_\240;+\346X<gH\001\275q\273\270:)\216F< yY=ti\310<\030Ki:\262v\002<\0221o\275rw)<\007\261\r\274 \3574\274-\016\304:\024]L<!w\010\272A\001\323<\277P\351\274\215 \361\274\272\332-<\374\374e\273\370\374\353\272XN$\275tv\2069\374.\257\273z\010a\273\217\223\023=\310dA\274\374\251C;\307\337\246\274R\366r;B\311\242<\227?p\275\007\225\006\2753V/<l\2235\275!\335\004\275\212?\311;\357\374(\275\261KX<\267\363\233\274\351W\000=(\337\316;^!l\274\244\264\320\273\267\375\313:\007\316>\273\221\014R=2\317\037\275~\263\355\274G\205\367\273\332\210\230<\0067\352<`\035\217=\314\250\222\274\2710\023<q\306N\275}T?=RAP\275\n[\031<\227\336~\272\315\211=:e\346\270\273\031\331\230\274\334\340<\275D\246\276\274%@\017=\370hB<\327\312\002\275k1+\274]\\\'\274n\371\353<\316I\210<\202\232\333:m\336g\274\256\220K<&\376 \274\245\3627=J\340\343\273\343\226M\274\254\363A\274\2312\366\273\243\205\343\273\254\223\346<\276\273\013\275\007\303\262\274\'\305(=\251\271\260\271\353\315\303<\317\265\245<W\0168\273\253o\"\274\213a\353\274\0238\356<\232-\310;\035\350#=0s\261\275\236\363|<\024^\213<-\200\361\274\013\341%;\244A\344\273\245\210\310\274\024Y2\274^\212s<0\203\017=\000\337\255\272f\273\306<\242\230\005\275Pr\004=4\242\017\275\014\024\026=vM(;j\274+\275F\r\326\274`Y\351<\357[D=\246\311\267\274Z\225T\273\014tN\275\005\'\025\275\204\002\205;\365\341\212\274\274\270\r=k%\325<Q\205C<3\337Z\274\005\000\177;\027\221\306=\322\200\206<\220\r\300\275\233\214j\273Z\236-\274/ (\273\010\355,\275\003~\013\275\272j\305\275\t\033D\274\026\t\206\275\332\231s\275l\212\031=gU\374<\225\363\332\274\242q\023\274\226\271W\275\223\211\323\274&\305\247\274Z\331.=~\326@\273\004(\211\274\331\322\027=\243T\002\275\t\226\213=\374,\000\276l?\031<\206p\232=\024\231\370\274_\t\220\274\035\314\r=\026\210C:\314\230\230\275%\206\004;~\007\240\273\360]\031\275iO\324\274\021S\367<\364\344\322\274\032\377\367;\362\237\237<\354\274\334\274\310\"\321<FE\000=\214US\275\341\217l\273o\315\237\274\025N\201\273\307\346\320\274\243\311\252\274\026\350\021\274\226\241\254<O)Y\274g\310\371\273%T!<\267\025\201<\243\271\006\274\276\213?=BBL\273|\335\004\275s\205\370\274\n\350\276\273\016\365\350<\033\327/\275\341\033\344\274\355\316\264\273Q\377\214<\262\211\260\274\365?\305\272\335$\010\274@\336\001\275\206\300\225<c5O;\321\022\267<\021\340\326<O\033\304;oo\244\274$\300\216\272\372\337\321\274M\202\205<\341B\220<\273B\026\275\266\006\311\274\312\016\016=\003\303\016=\2462\204\273q\214\030=\244\264\240=\006\227\360;\257\311t\274\247\337><\244\315(\275\351\030\214:\230W2\275\354\277\245=L\331\211\272\2132\363\274\016\035O\270\r\373\027\275\313YH=\355I\266<f\251\333\27482\027\275\236_\261=\303\324\325\273}M\333\274\316n\376\274\337\360\212\273I)}\275\313L6\274yj\001\275,\252\';P\346\241\274\221\202\367\274\232\305\257=y\006\202<\361\3325\275\224\244|\275\362XP=\'u\336\273\032\3771\274\226iv;\375V9=\347\335\322\274\231\366W=4c\027=\311cf=Z$\337<<c\353\275\321\026\007<U\274N<\000YG\2661|\356<\356\005\216=\200\007g\275\376\005T<\340\025\033<;_*\2752{\177\274\222\'3=\261|\023\275\354b?<\303\t8\275\330|\342\274\006\302\014\275/\212\301\274\215\374\243<\244\013\300\272\262e\007=~\254\333<\270\224\342\271\30669=\344b\216\274\252\374>=\256\377P\274\026\257t\275\360Z0<l\365j9\362\037\357<\030T0\274\260+\034\274\035\256\343\274\326\260\207<{-\254<\003\270\366\273\2702C\274\022`+\274[o\302<,*\006\274\202px;&\231\026<v\365\325\273;W\320\274_\235\243<<\0062\274W\307 <?\360,\275\rL\335;\020\027\007<\267\007I=\375 \367\274e\211\216\274\327C\254<\313\017\275\273?\211\252<1\030\315\274\360\027\205<\221]\224;NS\237;\312\021\375\273mV\262\273\206\341\027=dw\273\274\265\363\013\274e\356\007\275\220\336\005\275\007}\237;\241d~\274\010\010\306\274R\031f\275\360c2\273!\013g\273\350 \215<f\341,<\022f\032\274/5\016\274;(\177<u7\303\273\364T\332<\326\367\255\274\317\244\373\274\300\355\232<\215\022\016=B\340\256;\311-\214\275<r\234\274\235\345;\275\211\334\337<\344\006\225\275\240\362y\273\023#n<b\261)\275\271\252_=k\205\227=<\347A\275\244\317\276\273\034Mo<\242\030\r<\305F\230<*\204\004=/\314\233<)\305\222=c\312l<1\273\204\275>%u;\226\\\226<\344_\276\274\336\021\245\274\244YT\274b\325\250<\270p\323\274e\326\n\275]\273\003<\345\026\016<m\240u\275>\361\256<\204\207i\274\361\337\370<4~\270\274\032v\n=\344\013\306<\321\262N=\013x1=\355\027\357;zM\336\273\305\021E<\227\253w\275\324$\020\273\272\253#\275q\252[<C\337\357\274\321\010\007\274d\252\202\275n\267\365<E>D\275)\377\201<\305\037Z<\033\215\332<%\374!\275\274B\205\274d\322\267<\324]\325<\275\222;=\316\351\n\275\264\nC=\360\203\235<\223\370X<\211^\367;R\260z\275!-\022<\3655/\275\241\370%=\342U\234\275\243P^=\'w\004=>\031(=d.\374;\367\032\200\274\365\263\232=O\3437={w\257\275\363\3239\274\231P\003<\017\203$\275\0327\230<{C\022<\365\374\023\276\006R\331<0j\253\274\214\236q\275\371\342C\275jSK<GS\367\2739\004h<]\360\365\274f\236\336\2747\217\262=\350\324\216\274$\227\271<\371\361\016<\303+\241=s\236\027\274\032\301\314\273o\325\023\276\005e\323;\344\356\217\272e\204b;\363vW\274\247\234\357:J\334\276\274\323\234\211\274\255\301\317<\257\001\267<Zq\343\273\r9y\272\273\227\037;$\302\031<\323\033C;0\264\270;\236\310H:\220\262\026\275\177\352\030<\001\005\355\274\350\033\202<\205\303\226\274=\315V<#\301\035\274\276\\\013\273\222\024\032\272\301,\242<$\365\2539\211\010=<F\245M;\r\257\362\271\252W\270;/\251\334\273\237\214\036=\251Ui\275\244\236\375;\261V\213< \\\010=\245\t\030:\3658\023\274\373M\360\274\304\272\325;i\372H\275\t\020\r\275\020\316I<6\372\237\274Cc\264\274l\341\302\274?\244\321\272,\257\024=;g\322<\356\242\304\274\020R\244\274s\227\367\273\235-\260\272\260g\317<e\346\027\275\004\351\233\274\025\033(<\273\r\340=\020\372P\273\2623\211:j\341<\273\\\236\376\274\237x\244;\n(5\275\004\374\306<\325\320!=\205\340\023\275\217\2707=9(\304;\0057;=\025\3702\2755\264\231<\203\332/\274\351\032\256\274\375\364m\274\230\312\312\273\020w\026=o\357\320\274\0264\032=\224f;:\236t\305\274u|\217\274h~\317<\316\\,\275\370\013>\275\344\256P\275\245\303\002\274D\342\223\274\317\273\210=\306wF=\362\003\225\274\037b#\274\361\230\345<\372\367R\2733@\r\275{\355\331;\305\014\017\275\376\210\331<]\335\'\275\313\351\036\275@\324\376;\016J\332\275\236\356~\275\274\325.\274\232\353z\274!\036\347\274\251\377$\275.\210\002\275\273f\354\273\235\373\326\273-\342\303<P\324\317<\244\331/=\317\211\005\275o\204\032<*\027\t\275\217\320\372=\330W\026\275\341\\\335\274l\3660=2\3149=\246\377\177<\032\341\373<;Xs\275\304)\001;Ks\237\275\334B\255=\337\335H\275\260W\246; \322\313=\314\362\032\274\304\014\201=8\333\023=\364\242\313\273\270\025\275:3~\352\273\360*e<\013\312\222<\335\035\375\273\"\207\307\273\365I\313<\263R\265<\013\004\325\274\031\326v<8\250\000\274\316\321\025\274\362\264\226\271\236\3235\274GI<\273\370\235E\275\316\271\314\274\225\221\344\274]\330\306<\264\244\250\274,\027\001\275\010\240\310<@\354\022<`\237];<\230\220<_\333\202\271}\216\307\273\352\006\006={L\003=\307\220==\2113+<\033\232\036\275\353\254\306\274\321\366\230\275\376T+\275\036V\177\274\n(1<\032\303\'\275QfS\275\302\207\217\275\326~\222\2749\322\325<X\r\223\274\304^\326\274\320\2106<4\034\240=F\246\245\274\204\322\336<\216\036\001\275$\220C=\370y\016\275\202x\330<\317\341O\275\272\354\240\273\220p\213\274\232\233\326\274\026\177\240\275\346\032\204\275\310\207\215=\2153\020=\236M\021<\372\0147\272e\007\252\274\333\334\301\274u\010\202<\037+\016;\325\256\332\273\324\332\236\274\004Q\224;\341\342\030<\235\024f\274,25;\320\261S<\207n\t\275\354c\215<L0\336\274?\363_\274NW\036\274\n\366>;D\315\223<\361\246\307\274(\335\2069\343x\227<\007\367\306<\352\237\032<\\K\211\274M\021h:&]\322<\307\263e;\247\n\352<\217\341/\275\237\027\310\274\370\237f;\347k\220=C\026\213<\217\301\246<P8$\273\263o\374:\005\221R\274\036T\321;)l\002\275+h\006\275\370\321Y\273\275\244\224\274\305!%=\006\212e\275\302\372\024\273\037o\313\274Z\362\210<Nc\257\274\264\346w<\204\024\252\275\032\267\275\274\311Fp<\000&\375<\275\221\032\275\217m\027\275\242\031\215<\016\036\264\274^\210\352<b\362\352\273\030\342\\\274W\372@\275\347\376\007=\344x\367\273\n\240\004\274\267#,\275rZ\271\274\274\372\335\274\271i\310<\366}l\275\263b\372\273e\003\031\274 \321(\2755\031\267<\222F)9\r\200F<C7\301<\255\205\210\272\364\002\324\274&\016\331<\332\350\346\273\311\3620\274\233$*<\377\350%\275\003@\321\274\004\260\021=B\261\022=D\\\\\272\201\250\031=\241ol;\177\233M<h\240\227\274\006A\311\2743#\243\274\245\333:;lD\303\273%;}\274\353\304\252<\214\207\034=(\3058\274\324\316\254\274K\263\274\274\0131;<\270\302+\274[\017\251\274\036(\253\274\307cH\275\304\360\026\274\037<\216\273jH\372\272\365\367\261<\373\250\210<~\032\207\274\023{\306\273\244x\n\275\361\237(\275D\307\253\273\003\330\355<\351\246\330<6\3042\274\240v\244:\207\270\3409c\177<=\203\257\321\274\343\024\232\273\357p/\274\014\243\273\274\307\022\300\274\232\367\331=\231H\034\274v^\235<5m9\275\275[_\272\271m\334<\343?\207<\220?\364\273IT\034\275\000\t\247=\026.\017\2744\234\3449}\177\220;uK!<\364\353\242\275>\313\000\274\255F\200\274\361\346\266;\353\305W\275\032\231\262\274\265y~=\333\350\327<\032\316\275\274\366\006\232\275\001\251\232=\235\200r\274\256\207\235\274Wi\315\274 \227O<\341v\036<\251\335\212<\356N\004\274\261\271\023;\225\347\250\274]\332\236=\243\306+=8\007\312\275\331\035\274<\374\376\212<\237\235=\274\2207\250<\260\323\027\273ba\362\275gI\256<\005\312\026\275\335\263N\275W\204\247\273\367F\336<\254\2770=t6\246\273crV\275\220\226\317\274\326Mk=\010\001\234<\0022S\274\377\335Z\274Z\2255=-\031\021;\212\201\255<\245\247\346\275\315\245\231<\263\365\223<\005\362;\273\305\034\013=\r!\322<\024\r\345=\367\325\242=t\311.\273\251\232s\275\030.%\274j\270\357=\346\0263=c\024R=\273>\370;\027\363\367<\254\017@\275\241\034\324;\010\347\363\274\333s\227<Fd+<\361\312d\275\223\010\373\274\233I\213\275\352:6\275{\214\222\274\213\004\027=\336e\010;\tI\227\275\013\317\370:\036\363?=\225c@=n\333\221\275v\025~=\323\344\311\274\207\356\305;(\263\303\272\340\343\023\275\265\021\004=<@\252\275\001\370<\275\003A\265\272\265\241\347\274 nM;{\267C\275Q\307\021\275\343?U<\022Il9l\205\362:<\311\3229IEW=\235\320\335\273+V:=\002\213\251\273\321V\222=\237e\327\274j\214\372\274l\027\262<\202\340\206<\215]\201\275<]\355<\220\333\332<\2546\026\275\324\216D\275\270\022\021=\352\265\016\2752\256\302=\341+$<f \004<|\262b;\212\314\371\2725\303V=\025\307,=\325`-\275W\355\234<\247\rl\275\261S\017\275\024\351o\274\237\232\361<\023\273\232<U\374\207=j7`\275v\370\362\274\231\"B;cB?\273H\363\246<\372\262\247\274\017\0357\274\321;b\275`ze\274\356y4<\203m\262<\273\272\202\275 \364X;ip\245\275*\033\024=2\340\321\275y\303\223<=y\256<P\370R\273\250v\212<\330}\r=\323\367*=~\343\352<C\303\362<7#3<\330\331\314\274\351|\355;\263\371C<\324\314_\275$\352\210\274p\364\231;$Y\233<\233B5\274\037\344\332<\2039Q<\333Gn\273|\320!\275\316\275\306<\375\'\035<l\322\337\274\367\030\343\274\025\352\214\274\232\305<=\035\214\276\273\250o*=A;-<\002~\264\274X\246`<\200\334\243\274\316x\205\274B\031j\274\r\316\306\274\307\362\334\273\243O\212\274\036X\021=i\321\270<\313\236\260\274\230\005D<\346Oc\275,1{\274\217$\324;c%J\274d\217\362<\177\344\274;\310\225y\274\033I\345<\0205/\271\3749\263<\210\201\024=\336)\244\274\234BA<&\243\340\270\223\022A\275\367\261\025=\263\236n\274z\016\300\274U\2730\274\000S\271\274\304\0102<\324\360\344;pp5<h2\023=7\3538\274\235\r+=\005\227\247\274\035X\245\274\202|o\273\270\264\325\274g\010\267\274\302\323\035=\200]C<\304\213\203=&2\311;\246L\003=LV\256\274L\354\265<\363w\023\273\223\252\374\274&l\311;\237\200]\2746\022\314;\352\244\013\274\362\367\352<R#6\274\274\255\210\275\272\211\235\275j\346\221\274\250^\"=\225J(\274\264\020\373<\372.&=\345$p=\371\3052\275:\303 =\256\246[\275\344}\240<JC#=c\245\360<\377\"\020\275\376\023\004=F\232\246\274\344\331\366<\201\010\251\272]15=\236\216\223<@D:9\261\204Q\274\301X\025\275n\232>;B\316\336\271\022l\213\274\315\314\306;\327\264\271<\313\367Q;\342P\355<\013oG\274@<\024\275\000\212\014<{\267\375;R\236\026\274\251\273\"\275\314\321\007<\242s\235:\342.);:\376\026<.yR\274\241\310\024\274q\224\237\274\336!\007\274\3274\202<b\234\034\275\341<4\275]\212Y<I`\001\275\304\362\304\274\177\365\002=x\243\000\275U\317X<\226q\337\274\244-#\273\010V\002<\243\376[;\276\222\222\273\035\243\232;\376\335\007:\223\333A=\375\032\321\274\325V\365\273\313Ie\274\254\003\037<y\253\302<\220-\226==\'\003\273l\367\305;!\202\177\275\003\017\273<+\226\212\275[\326b;\343\n\n;\3662{;\224L\227\273\001b\227\274\312\271\342\274\250x\270\274\244\202y<Xzl<\017\277\236\274u\312\3229\323\026p\274\237\205\360<~\333\342<\346\240G\274JN\364\272b\204\306<\306iA\274\027\254?=N\277\277<,$5\274Y9Y\2743\273+<\003\310\344:,\351\021=\020\013\023\275N$\316\274\361\001-=\232W\355<\031\335\036<\215\352\334;w\316D\273\236\232\315\274\234\352\275\274\350s\253<\340\300\347;(\364$<#\227\205\275\342eX\274\006\r\277<\014B\010\275\323^?<\360\354);ou\247\273\250\212\273\274\207\312\275<\347\311\025<\346\216\253\273\317w\022=4\3610\275\004{\027=\216\265\n\275\260F\225<\251\262O\2717\331&\274C\'\005\275F\263;=\377\240\\=\211R\207\2744\312\022<\366\314\010\275\210\326\022\275g\215\254\274m\327l\274\250\305\022<\363\030\205<\"Z\314\274\010\034A\274f\222\327\274h/p=\372U\213=\215\221\306\275\276O3<\270\320v\274\303\022\360\274\372H~\2751\273p\273\034$\321\275\"\352\250<\\\323\357\274.0k\275q\352-=\361\243\365<\356\356\224\274[\366\221\273\\!\245\275\201\327\032\275\307\226\247\274x\320\020=>\016\004\274}S\276\275j\305\275<\3029\242\274:\'\212=L\314[\275\314\226k\273\353\256\211=\361D)\274\300u\235\274V\323\366<\351\343l\273\306\211*\275\2234\274\272\300_+\274\352<\342\274\372u\013\275L2M=\217\206\223\274\333+C\273\3704F<X\315 \275\362\366\241<<X\020=R\225f\275\364e\2028\360\354\215\273\33590\274t\237\273\274TR\370;\031\323\234\274\211\333*<S\001\203\274W]W<s&G<\363\245\325<\007\244e\274\223\016\321<#\023\347\274\317\251\020\275C\370\361\274\001hO\273\301\355\340<`\234\231\274\314{\032\274\306 \321<\036\237{<8\225\000\274\324\017\267<\241x\202\274CU&\275\"a\236<\230\254.\274\217\347\350;!\207\202<\220!\327<\345\322\027\275F\037\235\274ASi\275\036X:<\353p\243<\000\203\317\273\320\375\351\274\372\212*;m\005\372<U/\005<\177\274\024=\232\037\274=\317\255\030<R\274\301\274n\304\273\273{\r\202\275\022m\260;\215\332#\275\206\\\222=\257*\210\274\010N5\275\224\255\033\274a\322\272\274\265\332\223=\2217\356<\321\300K;BX.\275<\252\344=_\302n\274\027\223\355\274\227\263\302\273h=>\274\317\247\205\275\341\212\346\273\205\236\326\274\270\360A:y\346\361\274\020\326\362\274\360\264\362=\017\210\263\274A\213+\275qMH\275o\237\365<\315f\262;s\361\202;;&\034=C9\3637?\311\002<w\304~<\327\265\005=\032\322\363<\360ZR<\353\362\241\275\276K\243;\201\245\3719\326\217<<\274U\365<\r\261\232=\305%]\275t\365r<\312\201\235\271\322\302\271\274\224T\303\274UuD=vv\030\275\377P\207<J\230#\274\247\340\375\274(u6\275\227$\362<n\210\277;A7\265\273\265\032%=\250\343\356<\r\025\033;\301)]=m\244f\274N\026\204<w\222\020\275V\267{\275\275-\235;\300\254f\274\242x\345<\022\240\260\274\000\333\235\273y^\001\275;y\022=\372\301\233<\312\233\325<\023\306T<\231i\002\275\224\210)=\347+\225\274\002[5<\311\216w<\006\247\200\273\270\365\377\274\364 \322<_\256V\274_O\210;\201R>\275\243f\225;\'\374\010<\203n\020=)\300\365\274\013\320\301\274\263*y<\350\263\227<V\314U<\333\t\260\273\261\352\230\274#\372\025<a\222\274\273\355g\237\272\274i\017\275\007\226\254<\246\277~\272Fe\334;F\254/\275\005\251\276\273\230Xd\274\342p@\274\243\365\227\272\r\014\246\275\257?j\273\215\325\240\274\2672\334<\317\207N<\372@)\274Z\263\266\273\331\323\036=\004\010T\274\207\320$=\250\313?<1\262\302\274\377s\272\273\305\367{<:\263a\274JFQ\275\024sh\274\'\036+\275\312@1\273\337c\240\275$\257\013\2749\263\r<\207\006~\275\340\010\010=\322\302\231<]\006\203\275\306\3768\274\215\034\202;kR\034<\207\027\256\272j\306.=\222\353\022\273\004\337\232=s\227W\274\220`\025\275\032wl\274\034\014\264<\234l\352\274E2\334\273\tP\216\274\014g\326:\267\241w\274\313(*<\217\301\241\274\240\\\253;\031\3158\275\310T\306<GK4\274\034\210\035=\275\205\010\275\005\306\320;\230\300Q\274\360ox=YS]<gt\274<k0\034\275\263\333C\273\330\010\350\274\321=\204<\326\2303\275\037\340\351;1/\352\274:\235\005\274\370\340H\275\262\267\201\274\032\341)\275\321\251\010<\363\210N\273&\"\360<8\022\262\274V\320/\274|A\020=\241\275!=W\3008=\226\324\367\273k\037\350<\375\242\354<)\362]\274\321\243\026\275\025-\035\275\3241\371;Og*\275\n%\270\274/\352\230\275\034\203\"=\3476\022=\252`\031=\231\274^\273_j`\275\242\031\262<@\243\323=\003\365\256\275~\034\237\274\363\312\241\274`\252i\275\357\"$:u\025/=G0\031\276\242\023\275\273_O*\274\2314\270\274\010\376\016\275\027\256x<|\366b\274\242\306\255;\356F\034\275p\340\027\275DxZ=\252zb\275\352C:<7\234n\275\024qc=\321\037\250<\026\305[;B\203\212\275[=\245\274.\251\033\273S\304=\274\222w\033\275\tr\234\273\210\261\220\274\005\314\230\272\320\2262=*\223\327<\002\020\225\274)\035\363<#,\264\272i\247\221\274H\250\212;\275#\314;\2517\206\272/ >\275\221\014!<`\210=\275\341=\023<\224\331\240\274N\323\262:\343\272\215\274~\264\237<x\034\223\274zy\225;\353\321\351\273d\263\204<\337\253U<\335@\355;\001\303/;a\232\241;6\244\277<\216\037]\275N\365\312;\251\2578<\356\035\310<1\361\372\2744k\306<\204\003\373\274\206\010\214;\336\tK\2750\261\370\2740m\347<\357\202[\274\234\221\023\275\216Z\331\274\220\337*\274j\025v<~E&=|I-\274\366\316x\274\357r\324;\0011/\271\002S\263<+\201\326\274\343\217J\274\363\007\226<\024V\340=\251\221;\274\310\354\3125\330\216S<\252\334\376\274n\224(\275\010^*\275\243\221)\274\351\304\314<\245\010\304\274\216\372\346<rH\221\274|tz=-&\002\275\264\377k<\223-\225\274\036\231\201;\2320\336\273\367\241\235\274\376\0332=\']8\275F\360\314<\017\364\350\272UR=\274e\347\205\274\257\303\241<P\2446\275\035c:\275Q\354\216\275\004\205\300<\356\233\271\274\3614b=T\2740=\013\003\342\274f\r\211:Gs\177=\305\211\224<>\243x\273S\206\250\274\375\305V\275}6\201<\261J?\275\017\031\205\275\003\354\251\274\022\224\241\275\303\003\271\275(\210\217\274\003\017\366\273\325-\316\274t\366\305\273\256R\351\274\3674\357\273\262*\n\275o\320b<\313m-<\023pR=,F5\274-\010\222<\241I*\275#\010\341=\n\031\317\274\363&X\273!<>=\377\325U<\245\352\005<\306\000\225=*\377v\275^\202a<\247\344\255\275\034P\n=\200\345\211\275\007\005\237<\225s\230=\r\205\240\274\342\374\252=\332\251\251<Y\252q\274\032wR\273\313=+\272\222\"\355;\320MB<\250\027\341<c\3359\274\276\370\375<A\255\366;\260E\t\275\245*\256<\nF\244\273g-\277\274\034\341\n<\032_.\274}\236R\274j\317T\275\212\253\334\272\226\027\014\275\321\205\214<T\370c\274`y\017\275\0303/<\3334\265<\370\031\316:\313a\031<\264\232A\274t\225v<\310\223U<6\'\262=\207\360\265<.\275\306\273:\353\330\273A\206\030\273\321\366\302\275\357\332\217\275o\263\364\274\342\375\236\273\234\"x\274\317Eh\2756\253{\275ea5\274>\220F=#[\037;\362\035%\275F\373\255<\301\305\322={\376B\274\277U8\271\250S\363;\353\326G=J\311\020\275\036\274x;\\O\365\275\252\366\277;\275e,\274\266t*\275\324.\252\275\347\357R\275\0217Q=\024\331\300=v \000<\376\206\262\274\236\036\370\274&3\234\274\277e\013=\256o\324:9\211\324\272\333W:\274\251\006Q:\302\326\352\273\367n\021\275\203t\310\273<\354\266;\372\357&\275H4\014=\3617\270\274\001\367~\274\',!;\304jJ\274\360\225\205<r\342\342:\355[j;\361\3167<\207\352\201<7\264\"\274a@\200\274>\220\037:\255\230\003<\314\001#;\212\327X:\304\232\014\275\332>\257\274\036\373\230\274\023\0209=\333\325\325;\250\t~<Vl\215\274e\374\204<\204\014\234;\231\214\251<V]\370\274\310\301\021\275\020\254\276<\213f\003\275\351)\031=B\033\"\275\236\266\231\273o\355\363\274\233\315\'=[\201\017\275\212O\334;Z\216\270\275\326\350\267\273\321\247\245\274ct\332<\212\315\022\275\312\002\030\275()\250<\t\255\013\274WR\325<\231\301n<\001\267\260\274\312\262\004\275\367\3651<\345\t\026;EC:\273\335\335\342\274\344U@<\027\250\035\273\\z\221<y\243}\275\337{\236<O[\000<\273\210\334\2742K\322\274\017K\337:\306X\254\274\236\304\327<\230\310\210<\013\013\t\275\244\003m<n\335\014\275p\354\260\274\335<\367;xg\t\275\354B\315\274\357[&<p.\017=G\006a\274KZ\014=\312L\252<\236\366E\274\305\331\346\274a\257*\275\027\212\"\275\240\016\320<\022\005\354:)\351\333\273\321\327\302<u\305\021=(\305\034=K\251\360\274\374\302Y\275C\014\311;t\036S<gy\233:\330\217\006\2740#8\275\036\270\242\2744\307\205\274\220\316\253\271*\321\256<:\322\'<Z_\372\274/\037\025\274\260\0035\275\t\"\306\274\232\273\234\274\n\311`<\0044\374<\237\020\371\274\022\205\230\272\374\345\302<el\372<\3115~\274\351(\263\2737\266\034<5\357\227\274\034\244d\275R\325\225=\305\203\377\274[\224\320\273l\0377\275>\'\003\273\014c_=C\323\250<\004x7<.\377\204\275\234\352\233=\224\3376<s\275\276<d\264\274<\243\237h;8d\217\275\216@\351;\333\237\242\274\272|]<|\347\213\275\304ce\274\025\264\261=.\347S:\336[\200\274\261\276\303\275\273U\007=9M\035<v<\377\273\202T\321\274\312n\335\274\022\222\346<\373z\026\275\216\375\322\274iqd\274Cn\221\274N/\016=a\"\234=FK\226\275Y\271\312;6\022\234\273\244Q5\275`\301@:\250\304\035=+\245\005\276\243jU\274\036Mg\274#\322\314:\177\017\222;\336\220\355<b\000\322<+\211\316\273#*\\\275\020\232w\274\374E\247<\303\254W:7T\354\273\272\345=\275\022N\034=\261\017\370<k\300\t=\251\365\307\274\336A3\274U\202w;\014\2654\273\363\202\006=\034j\r=\207E\307=$\306z=\027dz<1.x\275v\242\014\275\221~r=lP\225;4\214\250<\377\037\274<_\307T=R\333\216\275\336=X:\262{3\275*\302u<n\327k<\262@B\275\2126\320\274L\024\206\275)*\375;n\373\234\274$\234.;\026\273g:\206z\377\275\377\217\\;H\277\210=,?\337<\366\236\335\274G\322}<]\373\033=\260)\022<g\333\335\273.\'4\275\003]\225<\201\004q\275Z\232\315\275\264`\006\272`\304\247\273e\2010\273\201]\320\274\374\356[\274?\250\371\272\036&\000<\263P\007\275\353\"\237<-Fi=\300\362\251;\2416y=\245\273\367:$\240]=\272\340\013\274\014\"4\274\037u=<\331\026[=m\370J\275+w\371<\224\362\212=\360g\327\274\356\363h\275\352\031Z\273\350M3\275\372i\250=3\274\315\273(\0200<\373>\021\275\005{ \275\024\316\030=\357\346\241=\362\331)\275\315\"\004;\277\271\t\275\230v\r\275\216CU\275D\312\254<\236\026N;\003\'i=\253\3343\275\211s\026\275\211\257\360<\221\206\374;\236Y\377\270\271X\302\274t\252\345\2742\363d\274\177a\316\274I\017@<*:Q\273\347\226\005\276FS\237;\002<-\275\213wM<\025\253\214\275we\305\274F\315\300<\347\034\211\274\251K7<\333+\214<J:\262<!V-=\277\210\357<v6W\273$\2531\273a\3602<\014\340\337;LY\305\274\201\305$\274\026\'\236<\361M\031<N\265\201\272^\022\034=\305x\300<\340lT<\273\220\t\275\320\360\352<b\033o<5%\001\275\250\314\351\274\356\335O\274Ff@=/\373\260:+\222>=<\364\276<\312\301\203\274\2520\242:\307\223\232\273CM\247\274\353\253\342\274d>\204\274\332\001 \275\361\3363\275\232\353\215<\247\267\205<!\260\022\274\r\325H:\n_\026\275_o\214\2746]\330\273\001\233!\274\315\315\267<\025H[\274\362\370k\274\376K1=\314.\260<oJ\356<%G\377;\273F#\274\227\333\277:>\257\300<tw\037\275\276O+=\210\006\022\275ni\374\274\214p\222;K/\200\274l~\300:M \005<\301\'\246\273\234h^=\322n\230\272\232#+=\202\324\241\271hr\347\274Z\212a\271Z\251\347\274$s\351\273\316k\"=\274\307\236<\255/\234=\363\253K<@\007\241<tl\034\275\343\002\263<{{\220\273\272B\221\274\214\263\027<\326[n\273\211?\034\275r\323D<F\017\t=6+\307\272\214\224,\275\222\376\304\275\341q\374\272\303Bg=xk\223\274\205\177Y<\275\t\357<\233\257\336;\304^\343\274,\203\020=\323N\360\274\235\236\031<\250D\277<T\344\240;\022\272Y\275\003\370\033=\362\260G;\007/\027=\022\332?\274\010&\022=.\225\232\273[6\262\273\n\306\363\274\352l\360\274\340\272\255\273\371\227\030<\325\"\233\274\002a\025\272\035\3541=\371r9<\3506\363<s8\343;%;\030\275\233\356\210;\331\320\251;8]w\274\251}(\275\347h\";\337\202%\274N\254\257\273\277\233c\273\365\025\000\274:\005\230\273[t>\274\202\335l\274>\323\207<\246\300B=\037W(\275\007\344\220;W\316o\2749H\300\274\344\324$=Ywl\274\2123\023<\336[\355\274@\256\220\274C\343t<\3646\031\273N\242\307\273r\251x;?\366\023:\017\334\344<khW\274f8\271<\356TI\274v\355\247\273\217\031T<\034\272\\=\375!\312;\021\236 \274d+X\275h\235X\274\025WK\275\"\025n\273p\223R\273\027\354\3249{L\232\273\200\ny\274\2064\020\274:\200\031\273\007D\031<&\310U<x\267\215\274|L\005\274\325h\226;\001\224H<F(\374<\016\232\231\274I=U<\2742?<V\222J\274\300\340I=:a\027<V\026\215\274x\022\276\27421\221<n\267|\274:\\\004<k\336\002\275\212a\202\274Gj/={\215e<@[\212<\"\222w\274(~\304;\326\307\234\273o\031\005\274\256\001\006<f\3202\274\253k\373\272\205\331R\274h\263\330\274\346r\352<\224.\244\273Z\333\207<\213\223I;\200})<\352Z\t\275\317\316\002=\370)`\274+\343!94z\224<(Z\"\275\264\r\023=\010\351>\274\253 \312;R\366\035\274J1\276<\227\002\006\275\350\367\216<\226q;=\270\263\211\274N\374\036;\270\370\014\275=\331\301\274\2515:\275(]q\273\313\205\335\273\303\'\025\274\314\377\240<-\344\002\275\331\224\t<\264\272\350<\351{M=\022\233\025\275\"\371\036=\253\253\031\272\265\374\303\274\221\255\200\275\227\2156\274\370\330\236\275\234\375\364<\365d*;\315\200\216\275\366#8=\352\321\275<\354\232\312;\014\267\205\274\277\002W\275\010\344P<;\003\001\275\250\237\030=\267f\302\274T\271\240\275\312\307\t=\"\374\234;\376\227\211=\022\025x=\373\247\230:\250\0067=,\220\260<\311\2705\274j\315\221<\344\302\365\273>\225\315<\326\246s\274\252\375\234\274\302\245%\274\240\321\000\275\"\331/=\375\320\202\273\242N\002\274\033\022&<\222\006\006\275O\242];\273<\350<\214tG\275K\r\216;o\3678\273K\337\204\274.\257Z\273X\303\016=\r\276\016\275\217\203y\274Nc\360\274[\203\253<qG\367;\252%\315;U1\256\273\330\005\200\274s\342\206\274\340\371\260\274\262o\022\275\317\206\211\274\235.5<\366^\335<\361w,;\242\220\261<>\n\245;\237\324%=\321m\004==g\236\274\324P\253\274\036_\234<P\232*\274\214o@\274 \311d\271r\346\323;\306\216#\275iW\214\274\365\310B\275/\2376<\250\035\275<\226\276L\270~V%\274w8\021\275{\360!\274\200\253N<\233\303\335<\342\307\206=1\r!\273\341W2\273\3217\364\272\300K4\275,R\323<\005\341\212\274\377\306\256<v\200\373\274#q\200\274o6\027\275\254\253\030<k\231c=\236\032L;\335\221\'<i\223\323\274\221\200\234=\321\202\366\273}I\241\274\357\223\003;\277\204\235\274\221\364e\275y(\316\272\tc\277\274F\020\301\273\210\331\340\274\263\237\010<j\t\344=\230Y\212\275(\251j\274w\3221\274\2108z\274\n\"c;\361g\246<i\353\250=\354C\227\274\201\033\030=1A\350\274A\271\003=\014p\247\273\315=l:0~;\274%8*\274\263S\234\273\234&\246<s\220\021=\272\361R=\307`\t\275}\365\245;\024\201\337:\307\3243\274\350\247\006\275\307\306\362<\235#\006\275\273Z\217<Vu\024<\'\017\032\275\374B\027\275\232\317\247=1\332;;~\201\220\274\341~\001=)@\255<Z\304<\273\022X\'=\242\367\330:\337_\244\274$m\212\274\033\344\211\275T\264\341\273\006\010\240\273E\242H<\036\314F\273\220\341\254<ZM4\275\200\210j=\'Q\235<\367}\335<[\323\234<j\220$\275\014~\316<\"\305/\275?\327\036\2748\014\274<\234\213\207\274R(\345\274cj\240<\240\204\240\274\2043\362\272?\363:\275y\352\252<b\364\225<aA\313:p\372\2169\223v\n\274\003\033{;\350\"\274;\355%5<=W\336<\360\315\350\274\233\261\210\273\374\236\277\273\37390<W\254\002\275<;H;\022\246\222<\357n\374\272\350y8\274\'Q\337<\205\307\232\274\2021S\274\"\352\311<\204gq\275\372\344\267<\307\363\343\274\304\177$=\367\266\211;\315\214\223;l2\032\274\343cC=\021\266\271\274\362\237\n=\217\254\026=\223*0\275\211\037E\274gV\204\273\020\352\022\274-9\033\275\262\006\363\274\356b\023\274\305\016\033<\027\014D\275\307\220\353\274\007\030\033;S\263}\275\020\223\273\274\030\305F\274\337\026\004\275\231C\355\274\026\266\346<\256\230\017</\000\257\273\331\227$=qjq\274\224\236\300<\303\226\032\275\245\323\237\273\244\r\210\274,\210}\273k\006Z\274\323\034\265;\250\024W\274:\330\253\274*^#\2747\343Z=uC\267\274{\253|;U\326\\\274\252\314\006=\317+\262\274 /\232;vL\034\275\0162\232\274\353e\311\273\3243\330<\"\000(\274\030\220\321<JF\213\275-#\221\274\224]\363:\327\323\212;\031\036\213\274&_:<\213\206\333\274\314\377\330\273]\321\237\27474\256\274\353q\017\274\335\351q;\323\350\014\273\0214\364<\212W\236\274J\243\023\274\242\377\265<v\332\257<\240\017C=q\017\255<\237\337\346;|\314\016=\225\017\314\274\034\365\367\274\2139\263\274\314&\370;7d\367\274bE\006\275.&\343\274\276\366\337<w`\030=\374\225n=fK\272\274\037\331\223\274A\374\247;\227t\204=d;\\\274H\251\351\273\256\021\336\274\024\312P\275O\365\326\273\006\022>=T\214\030\276\277\001\224;=\207\374\273r\276L\275\006\200T\274\322\321\203<Q\010\204\272\277+\366\274]J\241\274\177\272u;P#\332<\205\347\207\275;\241\207:u,\216\274\310X\304<B\263\'=uv\273<\016\272\243=\226Z8\274o#\217\274\017.\212\274\373\025\031\275 e\320\273>\020y;\300\215[<\001\205\t=A\271)<\000\'\036\274^\257\026=\177\377E<\211=\265\274\037\315|\272\3550S<\177EH\274\263\271:\275\"S\203\274h\345\021\275\273\233\246;\360\363!:`?0\273\267\014t\274\200D\271<\302\345\325\273\300\306g\274\204\237\232\274\265\345\201<\002\363R<\372\214=\274n+n\273=]\367;\336\245b<\354\341\231\274I\344\214<l\273\327;IH\022\274\230;\324\274(\004\016=\226\210\003\275@)\225<\302\267\023\275s\031\024\275\016\220\001=27$\274T\205\003\275+Z\200\274v\350\317\272\'\215I:\275x2=\347\244\364;9\354\232:&\004m\272\371\014\202\274\257\231\213<j\265;<\235\245s\274\010\313M<\032\312\227=*V\030\274\330]\256:n\273\321;,\032\202\274=\223\031\275[\273\371\274\241rS\274\227\357\004<4\205g\273\004p\230<\367\022y;\216H5=\214\177\330\2747\343Z<P\221P\272\253\325 <\3377;\274\370}d\274\351b>=\245\375k\275g\332\325\272\216\267\203< \371K\274`:.\274\326)\272<\303\374*\275\203\303\037\275\243\347\215\275\247\356g=\201s\245\274\025\245\231<\241\327\260<\267\031\240\274B\306]\274X\240v=\354\365\023=\262\202E=\342\251\232\274\235\354Y\275\033\261c<\204\244%\275{\333\225\275FL\220\274\302\246T<s\301\267\275?%n\274}\t\265\274\022q\211\274)O?:\374\200\302\273\'ce; \340\320\274\250T}\2738\210\251<\037\230\010=\032|\204\274\273\364\274<\364\272\370\274\242>R=\031\312\206\274\371\321\016=\314\027\217<\270\313\214\274\232\260\306;\354\024\240=\327\024\217\275\373[X;)5y\275\314\226\352\274+H{\275v\\j<g\032\363<\357\362\237\274\2262\215=\001\234!;:J\327\274\317w\r<|\204\312<l\227Y<a\240s\274\306}\024=\212s\325:\213 \203<\223\266\305\274\211\367K\275+u\321<;<.\274\207\233\366\274\251`\201<\310M%\274^\242\261\274\204 \'\275\210\261k<ZH\236\274\317\246U;\235\367\375;\224\253\353\274!t\032\274\323S\233<\035\320\332:j\r\363;%\253\216\274\246\007\356;l+\324:\221N\346=\203!\272\274t\230<\274Y\311\214\274d\013\017\274\266-\314\274j\235F\275\371\033\266\274\2769\275\274\337\205\214<j\330\035\275Q,\352\274{{\213;\345\262\024=\264\273\211<4\244\263\274_\354\234<\301\'\233=0\026K:&J=\274\366ke=\211\346\023<\205`\344\274\323;\252<\030\265\366\275a\017\021=\336\032\276\274\243~\214\274\200#\253;\325\230\257\274\255\211\243<d\036\323=2\365\375:\3420!\275\363a\243\274\231U\026<\016\237\253<\t_\r\274\204$\373;\020K\005\274\003\2170:R;\343\273\016\335\013\275\236v.<\223\226\223;w\3506\275Y\370t<\\\225\240\274\350\317\214\274\270\"\225;wI\213\274\327vO<\264\267q<\267\242\230<\327O\204\272NA\235\274\265\033K\272.\343\213\274\034gg\274_\213%<W\301\001\274\302\243a<]{\354\273SQ[\273G\342B<D\361\313;\251\224\212\273\214\024E<\301\021\031\275B\340\027=\266q\354<\'L\001=\353\'[\274M\363U\274\016\035H<\254\023\'\275\344$\003<\225z(\275\344\224\261\274\316\017\223\274\361-\r=\343\231\024\275\361\020\257<\035\353\237\275\200X\345<v\217\264\274\377\245\025=\266?\313\274\221\tt\274%M\254<\252\313{\274I_\311<\317\356\201=\224\\\204\274\313T-\273\177\266\267\274\304@f\271G\255t\274S\035\022<\017\265\017=\237G;;X%\334\273\021\266\003\275e\212\206<\377\326I<\251\201-\274\275\340\304\274\257\302\233;j|F\275\026\337\334<\232\331\213<\370\257\000\275\267\301\222<o\372\020\275\2210\352\274\342\273d\272x\366\034\274\326\315\307\274\314V\034\275>#\251<\216\302\267\2734\2259=\031`\354\2730N8\274e\353h;Rz\340\274\032(\323\274\202\344d=\013\016G\273\351\310\302\274\035\375\242<\200\354A<\243pV=J\347\216\274>\244v\2752\373,9\256\003A<\2325\336<\207\r\261<R\0019\275\341\264\255\274\005\267\212;\302_)\274\247\2765<\301\301\250<\343\267\360\274V\322\262\274\203\232A\275\354JE\274*7\263\274dS\264\274\007;\023=t6\353\274\260\201\205;\271\266\260<\272\203\363<\242\326\034=Wg{\274\'\323\222<\300\030\220<\r\363\243\274\340\004\346<\224\216\225\274\241r\355\272\237\352\205\275\310v>=\377\3223=~u\305\272/\246o;jAo\275 \3505=\364\373\225<|(\275<[\315\013=\016\372n\273Y\2557\275\351\222F<0a\362\274\370\0263<2\302x\275 [\001=\365\254\274=\265h\032\275n}]\273\215\364\226\275\232\0036<:\277*;/\177I\272\344OA=_9Z\275\360\264;=\2632z\275:\0010<t\010\021\275\273`\006<\302\363\223\273\264I\241<=\221\346\274\2034\370;\342\301\344\274^g\222\275\"\214w\273\364\312\273<\354\252\340\275<-\000<\227\202!\272\314y\312\273\013FQ<\320\037\351<\023\247r\271Hy\265\273\\E\310\274\336\240\224<\r54<\004\323\023\274\324M\334\274f\323v<\243\245\257\273T?\357<\247\036\t=\336^\205=\222q\324\274\267\352=\274\376\265*;\251A\300<\352;\370<\363\221G=^\325r;\247\346s=5A\005\275a\206\246\274Z\256\010\274E8\324\273\323\270m=_k\005=\016\345A=\242\261\025\275n\236\003\273\372\226\217\275\272\232\274\274\274~\210<\272\021\017\275\320\274\025\275\320\320\032\275?\276\026=\0013}<`\033\320<\332\310R</\246\353\275\364\001;<v\177\270<\331\001>=J\214j=\366\2261=\374\321\225<\216\372\027<O\300\274\273\302e \275\346\344\316<F\277\026\273G{\351\275,\366\027<P\020\t=\203S#\274s\364r\274\233\017\033;\354Z\021\275!\303b<\373\036P\275\337g\023=\373\320\016=\301\223&<\262)\177=|\233\310<\336\202\257;,Z\"9%\362l<\235%\202\273\254\376-=&\0226\275\'e\262<W\316\231=\342\333\247\274\364\334%\275\247\360P\275\327mZ\274\000\274]=-\212\260\274\004i\033=uC,\275R^;\275\352\262\217\274\037\272\206=\031\222-\275S\364\037\274\337\215/\274\340\373-\275\242\347;\275P\365\004={\201\340<++\016=\256\261\001\275\360\262\"\275\347a\232<}\001\205;\357AV<\315\372\005\275&.A\275\016\353<=\306\243\272;\001\027\365<;\2131\275\027o\251\275\231bN\272V+Q;Z\237\242<\327]\\=\347\250\277;Qk\025=\350\033\026\273\036\202\327\272Dq\027\273\243r\272\273\310-\002=\335\346\223\273\364\223]<w\267J<\344\372\366\272B\311\210\273\335}$<\313A\003\274.3\334<{\250\202;U\347\r;\247o\001=OF\032=\374\026\261<dm\264\274l\344\206<\256\263R<\337\324\204\274\220\312\245\274\237\211\2069M\031\214<\220\303?<{8\250<\324\252\002=x\221m;\356M\000\274\331\221T<\355}\225\273\254\0063\275\374\321\241\273SP\242\274\030\036`\275f\022\275:\365\376\335;8\210\326\273\304\226\241\274`\325\215\274w\230J\274\326\304\327\274\351\001\013<\235c\344<\331Q\034\275\242\036\214\2730\354\023=\271`\310;\271\243\266<c\t\\<\200\270\371\273\240+\261\274\371\2311=-\242\325\273;6<=\360\312\233\274\356\215\263\273\022\301\322<\342g\230\274%\006\210\272\236a\r=\246\337D=\016f/=\356\363\224<\210M\303<\371\223\205<\234e\304\274\321e\343<\322\366\337\274\214\202\214<\024\212J<\271m\316<\347\000\215=\363\266+<sQ\306;(`\022\275\t\255\025<r\234h\273\004\225\205\274\\c\214\274\'\226\036;\216\266U\275F^d<\346\234\261<C\2447<\225C\243;!\322\204\275\352\3741<r\211\004=i\334L\274\223\302\374;$\252\207\274\262\346\017\275\034\205;<\376a\020\274\251\375e;\240\017\272\273#\267W\274l\242\276\272G%\013\274c\236\257<\031}\363<-/\031=\266\241d\273>\367\213<\312,\262\274\233#\r:\274\374\210\273!4\001\273@m\341\274v\301\027;JV\254\274\334\004H\273C9.=\253\024\223<\224\231\376<K\316\251<\235\316\211\274\250q\345<\223~\001\2741ws\274}\202\306\274Eh\324\274xL\020\274j\265\362\274\374g\'<f\362\026;\376w%;\362s\\\274\313\272\213:\010\325?<1\331\274=;-\200\274\350\224n\274w\363\244\273\031\241\301\274[{\337<\250\330\321<\203\302\311<\312\024\314\273\340\177f\274:\323p\273rN\003\274\236\317\216\272j\363\020;\254>]:O\323k;\r\350\354\273\206\343\330<\277!L\274\373?\237\274@E_<$B\n=E\232%<K\023\200\274\023[\026\275\375\"\205\275\013\210\010\275<\351<\274\235\\\301\274\255S\006:xa3<B\255\264\274\335%\016<5K\302<\\\r\313;\271\223\t=;\234\306\274\004r\023\275<\250\022<\215\201\002\275\302~\372<c\213&\2717`U\273\340\256\020\274\260\314\227\274l9 =&\215\324<\277\313\023\274\373\002)\274\251&\260<`\']\2714\226\004\274\3045\001\274\363g\337\274\001wK<\331\033B\271H\221\033=\177\250\244\274\0046\237;aw\t\272\231\215u\274}\324Z<\240\304L\274\256h\361\272\265\374\\<\350-\226\274\001U\233<\340\371\243<\335\2735;cW5\274V\016\331<Y\326\024\274R\026\030=\342\3229\274\"\014a\274\322\233\204;u\303\226\274i\003\351<f\007\254<\253:a\274\247\317\221\274\320\330\007=\225\324|\274\267>\345\2745\317\366<-\275\275\274\276;5\274\315$\034\275\357v\240\273}\2157\2758\347\251;w\215-\274&\007(\275R\260\326\274]\337T\274\205\272q<\244\032O\275~\343==8\362\035\272z\371\233<\276\016\245:\000\224-\275\3032\033\275y\340\'=R\327\216\274\354\305\017<.\276#\274\240\030,\275\t\033\307<\366\223\027\274W\207\330<\035\366$\275\322\002\017\275\026\300\311<\217J\010\275w\230\035=\205\223\t=\024\332X\273\2345A=\010\267\210\274\000t7=\272j\234=o\225+<\243\217\223\273\216\n\213<\205w\341\273:|P;.)\212\274\255:\226=b\366*\274\271!#\275\32466<\352\250\350\274\210\265\237<\222\264\337:wU\313\273M\000\t<q\272I\274\351\263\223\274S\345\023<\013\243H\275\np\205;\323\251\233;\336+\202\274\254\335\021<>Z9=p\357\366\274\314\034\323\274\022Z\005\275\360w\336<\300\3577\273S\313\204\274\016\252V<,cJ\275\347\017}\274?\3254<\222*+\275\376\270\200\2747\243\372;\222\203c=s<\003\274\334\014\217:W\337\031\274\021\272\202=%p\t=kP[\274\240vK<\351\024\t==UK<\245\027\261\274\215\200\036\273\201\343\004\275\345S=\274\205gJ\274\256\353\211\274_O\214<\236\220\362<\t\263\364:\323\306\206<\255.w\275h\221H\275\026\rW<=\344\210<\344w]<\260\272\255\273\006\'\251<\200:c<\344\241\327\273;OC=\236\003\204\2739\010\204\274,\244)\275\214\232\013<s\243!\275u{\320<B\301\014=zd\243\274\356\322\264;S\344\243\273\373\267-=\330si:\003\327\252\274\t\254y\274$U3\273\221\352\203\274\265R\004\274\360\367\210<\306\377\240\274;\366\321\274\367\001\330<\314\301\206=T9\227\275\215\323+<\375\310\236:\235\367.\275/\262N;\215\n\342<\245YB=Q\360\235\274\317\352\333<p,E\274knj<\351z\275\274\033\3505\274?\341>=\3207\315\274\224\242\246\273p\225\r=C\376\004=[\303n<\202\257\246\274\205\342\340\272\256k\256;\312C!<\333\340J\275\001\351\206;\000\265\r\275\220\037x<\250\263\236;\037\020\007\275<T\314\274l\250\314=\004\2732<!\213\n\275\225\274\000=&\037:<@\374\235\274_E\305<\214~\361<\256A*\275r8!\274\254\241\205\275\312\267}\274\275`\367\273\234\302\200\274\211\256\277;\023n\327<\271\247\020\275SfS=n*\336:G&\260;\000%U;\304\320\022\275j.\332\273\371t\031\275\251\240\244\274\216_\354<\372\2719\273\312\371\002\274op<<\231c\274\273\304\367R\2733.<\275\023\022\323<\226\367\320;\312\233\227\274\020\253#<\302\211d\274=\017\311\274\030\306\023\273\352\024&\273\tX\321;\371\211\372\274C\252l\274vI\201<=~\037\2738-\254\274\367\2479\274\017?\231<\304\323<<p\317A<\323\0271=\321\325\n\274\225+\221\274\270\300s<uQy\274\235-\210<*v\360\274\364)\347<\370\010\3719\023q\241<Vb\023\274\n*\031=0=E\274G\202\024=\027d\016=\343Hc\275=K\\\274{n\026\274\352\334\351\274\233z\005\275\025\223\364\274`%\232<!@M;\342kX\274d\275\034\275\020\236\274\274K\2647\275K~A\275\324\231\016\275k\362\320\272\206\002\352\272NP\251<\342f\354;\351\367\233\274 \246\327:\221\247\031\273\210\337\004;\375\310z\274.t5\2710t \275\361\010\223\274\236/=\274=\243\200:\225\343b\274\216Mt\274\302\352\227\273\375\nO=SO\226;\244+\205<\376\273%\274\352\250\033=u#%\275\320\350\337\274\206B\004\275S\311Q\275\205k\220\274\336X-=\351(\265\274\007(\353<\251\377\200\275\205\311\326\274r\023\336\273\356\302\350:\212H=<\250\265\213<\325\035C\273\300\335\306\274\273\274\221\274S\036?\274\"\005\t<\342Mi<l\007\t\274\013\377\326<\026D\244\272\3047\037\274\365\307\025\274\304\242\014=*\364\343<\231\241S=\336\235~\273\321\370\224<\332\250,\2753\001Z\275\235\n\356:\036d{;aU\001<f\325\032\274\306l\315:\204]\253;\310\t-;\217~\233=\347\273\037\275\326\325\006\275Y\375\371\272\2277\013=&mV\273\027S2<h\314@\275\365\366a\275\273\374\267<b\201\203=C1\313\275\007>\341;\235\235\013\275\000\375\337\274\014\237\370:\325\331k:\211\277\203<U\334\275\274>F\"\274j\254\303\274\364N\322<B^o\275\241\270\245<\277\324\274<hf%<\276\021\357<\265I\351<\nA\336=tB\370\274s!\022\275\260F\226;\336,\265\274\364\300)\274@\336W<\017\251\215\273AP\234<\363D\021\273\300\370o;\010\202\007=\200M\210<\316\215H\274>eZ\274\0334\240</\202y\274\230x7\275|\217\316\274^\305\254\274\023\307><\013`\335<>}\303\273Y\212\032\274\'\325\373<\3024\031;\331\2343\275\372\003\371\274\225\254\247<\351\334\305<\301Y\365\274&\357\277\273\r\202\337;\267\370e<\233\335\003<\2276\036=\255Y1<~\3146\275\222\243\206\274\331\364\335<)\'\257\274\001\r\002;\324\024\\\274\273\224\017\275\232\256\240<N\020\332\2724C\210\274y\004Q;:.\266:\271\254\340\273\207\032P=\254\211\006=\017\271@<\266\357$:\245\274\260\274\006\352\226:\333l\360<\322\205\261\273Q\337\001\274\007n\367<\002\274\241\274\305d\034:\346`Z\2721T\376;\014\302\270\273\001\345\333\274\262\367\211;e\304\026;\251\036q<\214z\223\274n\\\274<\340\305\236<\255\377\340\274$7\216<\337^\346<\200\020\205\273K8\344\274TJ\t\274]\274\032=\320\252\024\275o\007\327\272\002\303\307<\275<M\274!\362W\272DN\251<}p\301\274\364\270\005\275\234\363\206\275\t\330\214=\234*\231\274\360\022\323\274\335)\341\273\264\323\200\274\006~q\274\2237P=\361h3=\302\304\227=\210\013\322\2742^j\275\321<E<\342\\w\274\030\334i\2757\033\370\274|\356\272=,\204\214\275\323R=\274\221\346\332\274-A\340\273\030j\311\274\205\367\367<P\213\271<\236\221\225\274\002kE\272qM\243\273eD1<\000\021u;\030\250\304<\223\217&\274(\271\300\273\251wx:1\214b=?\223\352\273\352\331\'\275x\204\325<_\323.=P\273q\275\226\230\320;\311E\001\275\357\035\243\275\313I3\2756|\005<\r`,\274\346\347\242\274@\302\000=$=\236\273r\261\026\274\305\231\305<t0\016=A\236\202\273N\234\025\275\026\322\212< \304/<\264\037\302\274\034O\220\274\306`3\275\356{\322<\300Jx\274\010X\234\274\177m(<T\321\355;d\203\335\274\022\351\344\274\371\333&=\277\000\230\274\030y\r<\264>\254<\271\336<\274q\002\353\274\270\323\207\273\205\276\356\271 \330\300\274\340\261\035\275\241\215*\274\036\244-;,\317\254=]\261p\274\026\277\340\274\224\373e\275\2037\242\273E6(=~,c<L\200\305\274\312\265h\275(<\301:}\325\013\273[\376\024\274~\035\325:E\324\034<\343\266&=w\217)\2749A\014<6\257#<\251\301\365<\367\003,\274\'\374_=/t\335\273\364\215\202\274uD\270<\234\266\317\275t\002{=R(\323\274\025 \321<\352g\356<\343\301\224<YZ\340:\310\243t=\300\310\227\273)\371L\275d\007V\274f\027\346<A\325\022<;\255d\274\206\321\217<>\204\306;\300\\\276;3\002\256\274\370\213\303\274M\370\032=\025\216\201:NGV\275\270D\270\273\007u\211\274\240\356!\274!r\215<E\356y\272\320e6<\215<\037<\371\326\027<\270^f\274_N\240\274\031\253\212\273`\030\242\274\001>\344\274\n}\247;\0216\322\273t?\361<\\\035\211\2739\306\242<\341\'c<\232\344\247\274?\320\347\274\005\275s<=)\033\275J\246D=y\232T;I\030\032=e&\232\2748g_<[\377y\272\017 J\275r\313&\274\212)`\275\0048,\274\332\322p\273\013_\327<\235\\\3479Cz/<\0023~\275\366\017S=\303\335\375\274\227\362\023<_\351\017<\231\017\363\273\356@\215<;\337p<\263\203\007\273\310d\255<q\313\037\274\033\255t\274T\021\332\273D\263\241\273\220-\234\274\260\357\234<9\213\353<S\351!<W\024\027\274\312\244;<mb\251:Y\r\001<\2430X<c\227\271\274Rk\275<\021\374\027\27574U<1\374,<\026C\277\274(\313\215<$y\201\274\207|\222\274\312\363\247;\016\013\005<\207\277\207\2748^m\275E\327\327;/8Z;\346[\t=\371\344,\275\332\271\326;\003\032S=Hm\275\274\006eC\274\265~]=\330\350]:\022\215o\275\222{\342;\023\340\n\275\006\017G=\342J\330;w\244>\275\243\367y\2748@\340\273\013\216\260<z\n~<\000\004\267\2745\000\306\273|{\335\272\2240\316;\332\tf;\024\243\240<\332C\210\273\210a\315\274\316\247\242\274v\266\n=0\306\314\274l \244\274\213\332\321<F\t,\274X@\202;W]9:\246\311\320<\220O\217=Z\014$\275\204\352p;OX?<\323H%<nP\344\274\371\356\350\274\204U\236\273\031hc\275\'V+=_\255\n\271.r\010\275\005\007\006\275\341#8\275\311\325\221;\250\246p<T\302W<Jd\024=\024\371\016\274\022M\216<\345\240\242\273\345b`\274\335\251\342;K\ns\275\364\345\'=D/}=\0064\203\275[%M;\304\352\024\275\227F\213\274\345\276\237\272} \325<\331\243\232<\342wL\275\031-\363<\220\300\025\275J\034\277<\273w&\275\326\256\036\272\030\325\300\274e-3<D\374\010<eA\213:\032k\342\274\2651\212\275\020\214M<\025\316\344;[\024r\275\2230\371<\371\366\265\2740vg;g\017?;z\333\307;wI\331<\031\211\224\274\251\201Y\274\354C\341\272\270(3\274:\357\005\274z\324\037\273\342\332\310<\365\374\n\274Zt{<\321\204\334<\247\371D=\255\354\266\274l\264\311\274VY;;\351\333B=\221\314\276<v_?<\266`l\275\314\032\261=\316\320\037\274E{\326\274\241T\230\275z\245\263\274\360\337\200<;yb\274\263\002W=!\035\345\274\205\027\231\273/1\200\275HTW\274\326\205\320<\272\331_\274\211l\316\274\320\033@;\311\265g=:x\340<#\371\002=y\321#<\213\234\207\275\027[c\274z\257\342\2743\005+=c\016\241=\006\217o\274\376\201\253<\006\007\324<\314.F<\230\315@\274\332\037\031=\233\3554=m\020o\275\001_\244<\264R\205=V#\261\274\2637\342\274\356~.\274}\272\214\274*1\303;\364\022\177\275M\336\320<UQ\t<s\223,<\275> =\033^7=t\217\341\274I\212\n<\327@:\273\213\035\316\274\027g\214;\220t\254\274hW\216\274f2\211=,(\010\274\371\217\300\273P\312\216\275xR\177<\217A\273<G\251\\\275\243i\027=\021\330z\2759@=\275\034\2470\275\023&\021<\236\222Z\274$\211\003\273\306\324\350<\260\375\020\275h\215\207\275\342\312\215<\375a!=z\273\364<7\273Y\275\026<&\274\213z\"<\233l\004<\013\250n<\246\333\312\272\301\002U\275a\032\203=\364F\030;\370\274\317;@J\362\273\304\373\203\275YZ\377;.n\t=rt\037;\227\007\245=&\346\316<\343\376\201\273\223\324.\274\337*\301;+\272\260\274d\277\236\274\3620>;:\002\266\274\212q><~\333\357<w\373_\274\207\315\205\274\nJ\013=\351\3156;\340q*=\331zi< V\235\274~\025\242<%\035]=\n\362\277<Rb<;\243\225K\274\240L\265\272\334^|8@\355,\274\346\321\243\273\336#\010\275_\332\207<\264\037\033<\333W\017=\265k\253<\330j\243\274d8\276<\251,\246;(\264M\275aTG9\316S\234\274\270\376s\275\255\007J\273fY\364\274\314\347\241;\212h\304\274\357d/<\262\332Y\274\031\257U\275\027\2356\274\001\345\264<\'[\372\274\313K\262\274\370\363\335<z\211\204\274\266\263y<l\205\276;\310\315\373;v\n\264\274\312\365\031=1\316^\273\303\206\202<\334\r@<\323\353\337\2742\317\255<\341\265\303;\206\304\214\274C\341e<z\032\031=o\353}\274\374P.<\013+\234\274e\227\355<\216;\203\274p\245{=BC@\273$\266\003=\300\330\303\2749\347\034=sqy=Wg\224;\235\222)<\371\253\200\274\216>!\273(\333\036\274\243\301^\274\260\024\343\274&(\325;\345Z@\275\360r\324<\376\n\206;\213\030\214<\206\301\001=\242\257\362\274\305\303\274;\356\006\237\273|e\025\274\251e\212<\032\0105\275\334N\244\274\010\370T<B$S\275`x\333<+m\343\274\'R\022\275-\307A\274!\344\r=\036\373\024<\314\314\021=\2013\003=\360K\007<%\377\010<FmW\275z\3574<\374&\300;\212\362><v\017\020\275/\221\024;\021/\372\274[\314\205\272\032\252\363<\300\200\030=_\235\331<\277\206\313<\236B\207\272\220\250\371<z\305[:F\022\302\274\275_(\274\221\0300\275\300\370{;\256d%\275\202\316\333<\264\364\246\273\262\004\244<\264.\201<\265\312\361<4PK8,\347\035=}\223\032;\\c\317\274\246\220)\275\353k\t\275;D\333\2731\226[<h\017I\274\354\202\357<t\270f<\256\224\223<\334o\036\275\263Q4\274-\267\250\274\233\376S<\315\210\217=u\330R\275\266\260\\;\322X\272\274\"\364\213\274sU\010=\242\360\002=\354s\300\275E:\177\274\336L\307\273\371\246X=L\315\374\273$Q\201\274\222\343\032\275\261\005\275\274T\310\177;\251\020\020\275\356\227\032\275\257k\346\274A\362Z=A\201\215<\351\004\353\274\324H\030\274\326]\231<\'\177\360;\203\326\320<\377\247\203\274\214\271\317\274\006<\343;\342~W\274\037\212\332< Ma;d/\261\273Q\316\255<HW\211\274 \307\000\274_\367\231:\377\217\325\273\332Y*=\033;\320\274xd\225\274\002\032d=\000\315\3549 Se<\330\330m<\'\007\001\275G\364D=\203\337\347<\004l\206<U\215]\275\331Y\237\274A\366\214\274\347:x\271@\243\277\273\324s\265;:o0\275\311\351)\274\343U:<Mv4=\343\023\241;xR1\274\036\2054\275\013G\373;\264\002\030\275\207Qd\274.\351\031<\274+\245:\257;A<\226\307\221\274*7t\273@\214\344<k@\035\275\363}\\\275\340w\203\274\255N\236<UK\302\274K\264Y={\034\267;l(+=n\027\362\274\000\2331=\206\212Y;\331\332K\274\237\261v\2753\253h<\3153\246<\222\2542=\342\243\020;?\211\213\274\273\n&\275\2661\254;\'yb\275\217\306[=]\343\211\274F\274H=eN\013=\2471\010=\250\354\257<Z\333\325\273\024?#\275\000s\271<\233D\210\273\335j\037=\367\347\333<j2S\2752\362\207=\231 !<\3768\357:u%-\275\354\375\031<\r\014\276\274\002\256\352<Z\367\315\273\214bn\274\242\244\001\271\000\343\370\271\023\243\237\274h\256\276\273ZI\307\274\317\350y<{f2\274\036\246-=Kx\373\274\232g\270<\3534\023<\033\245\037\274\343+\220;\215~v\270\235P\223:\301_Y\275\361\007\261\272\225\251\337<\241\265\242;\036\365/\274\036\006\263\273]fI\275\313\341o:\225xd\273\265C\325<1\254#<\022\030\256\272\206\350\376\274Q\216\227\275\036\035\022=\023\341\326\274\017\226X\275\366\271\220\274\301]\272\274\355\023\017<\267\326\202\274i\245\324\274u\376j\274\324Z\250\274\t\253-=\3309~\274\006_5=D\306\255;\241\251\333<Lv\223\273.\2518=7\341\205\272`j\254<\377*,\2751\\\352\273o\322\036=\243\326\271\274Ehh\274\227\004\256<\322\2007;\346\327\243;}\020(\275\310f\273<\220\032{=NW\005<s$`\274\277\375\024=\344u\253<\221?\254<\344\323\352<\341\333\326\274Vf\245;\215\202\2258\213$\301\272\370|\225<8bA<\267I\010\274\352a\357\274V\265>\275\211\r,=\344V\355\274m!\030:\307\332\r\274j\352\270\274P\r\001=\306K\341;\302\335{\273\315f\242<E!\213\274\006\363\351:\203\345E\275\226\r\n\274\257X\273\274\305I\201<\267G`=_\337\271<\3246F==\023\255:\345\316\267<\305\246?\274q\272\217\275\364Z\377;\347\0130<a\026\016\275@\257\032=`\025\t=4\007N\274\362\233\247\274\252]\322<\315\317[\275\3206\210\274Y\210\035<U\205<\275`\301&\273\212!\220\274\252b5\274{\204\273\274\364\013.\275\033\023z=\207p\276\2747\354l\274I\307\351:\305\r\246\274r\323)<\241\020\213<\365*\202<\032\025L<\302\223V\275\200\363\217<\375\207\265<\216\254\001;\344}$<\270\224\203<\317#\315;\370\t\346\273.\032\346\274\224\035\341\274\365\311,<\270<\216;\363\257\342\274\0354\361;B\215\333\274Q\333!<\'\277\263<1\340D\274\314 W<\271T\035=\t4\332\273\016:\203\274:Ak<0\262\353\274\222\257\023<\004`\330\274\014\021c<i\337\263\274\n{\277\274\001\202\333<\013@\212<\031*6=\212\216\010=\242\321l<m\370\334\273k-\226\274\370\270\343<\247\0101\274\315\265\333\273\016!\027\275^\377E<\\}O=\335\331w\274\023\213\013\275\373\325O\274\254\rh<\364\363\233;\346\265\334\272K\311\204;\311\365\r93\270\246\274[\254\376:\177Q\306\273\357\325\273<\224\\\376\274E9\007;\036/\336<\236|3<\347\272\243<\233\3609\275\241\303%\275i\377\207\274\360\304\214\273SmV\275\215\270M\275\213\244\002\274\240\326]\275\314}\025=\032E\205=\255\277\255\274G\243\200\273\227\024y\274\031\t\325\274`\217\021=\233\255\230;\243\327\314<\265\275X;I\255\215<0S;\275\357\025\030=\360\217\226<\223\177\357\273\0044e\273s\312\261<\223\214\236<\247\023\t\275.T\224;\347\247\211<\362\276\313\274\001\251D\275\305\014\352<\257_\313\275\230\314l\273\315\227\005=\0300\377<F\243^<\272\267\016<\341\365\013=\223\355\377\274A\277\220\274\177\'\023\274\251\301+<\201\263S\272\314=\223\274\030\265\245<\366$\302<\336i^;\306u\330<\371Gl=\264\013G=\0348C\274\312q\217\274X|%<\025\204\211\274\305\356n\274\272\265\005\274w9y\274\340\211d=3\355~\274u\245\034=9\231\233\274\311\242A<{\366\010<j\254z\275\342\216\215<\202\3063\274P\213\274\274\257R\r\275sv(\273\234\211F\274\3217S=\354\"X\275\241\211.;\363jr<\207\251-=\210S\326\273\001|3\274\010@\\\274\36109\273.fF=P\000!=\270@F\275\r\031G=\343\221\345;q\020\302=)\267|\275\247\320\226<j+\236<\206\211J=\265\272|=\023\345\335<\304\357\t\275\313\241>\275\266\t\213<*$\002=\226\035P<\351\3358\274\212\376F\274\022\371\257<]\037\0078s\322#\275/\354\355<\031\257\\\274\010\021\313\274N]\365\274\364\360\274\274\352?\222\273? x\274lE7<\242\361\014\2749\020\0239\\\373\035=,\223\003\275\244A\213<\004\355\373\274x\016\010\275?\023\020\274\305~n\274cI\215<*zA\274\007\177\304\273i\340\321\273h\022\256;\306&<<XG,<A\234\025\274\315[\340<\002\363\204\274f\016\242\2741Vi;\360<\035<j`O=pQ\335<\3142\314<\244\245\320\274\207\230\247<\356\312#=bW\264<\200#\223\274\005e\356:\302\355M\274\025Q\203\274\n\243Z\274\016fj\274q\232#\275#S\274<\321_\025\273g\301\353\272:\276\010\274+\217\335\273}\350\216\274i\340\262\273M\3269\275 $m<\202\251\240\273\236\0033\275\352\037*<\216\323\343<6t\327;1Jm\275\257\213j\270\272h\325\272\267\264\232:\220\362\374\274\212\236\350<#\336\271;\337\212,\275\234q\274<\311\245\260<\224\177\033</\003\265\274U\371\006=2\033\272<v\321\200\274\364\372\301:\342\262\351\274\216?\021\274\345\2540<\256P\332<\366\006\302\273x\225:\273\220\010\322\274\371\023\265<\323o\344;\267\177\205\275VI\342\274\243_/\274\332#\177\274\321V\331<\311\254w\274A\027.<\310,\233\274\251zP<\254\"\225<\032\024\341\274\367\337N;\351\264\027\274UD\032=5\260\247;\323z\311\274[N\246\273x[\016;N{p9\370\233\303\274$\010,\275\014\361W;\234\265)\275\010\324$<\003\232Y\2744\362R:\007\237\030<!\3256<\273\321\371<\246y\233\274\244\273\303\272\354\325\000\275u`P=\224,\270\274J\273C<\343R\275<\376\330\367<\322\237\327<;q\212<\016Z\233\275\344\207\244\274\357\371\372\2742\0173=$\305\374\274\003\2018\274p\312\025=\303i\202<\244@\332<54\375<K\3157;\216M\r;-\322\255;\260I\314;\3423\033\273l\347Q\274\370\201\266;5\367\372\274(\364\0149Ih\366\274-\271\212<\203\250p<`\211)<\250\247&<x\010\215<W|\225\274{\027o\274\0179\260\274\267\206\031\275\310\346\207<\026\"Y\274\321\021\303\274\\\275\301\274\304\334@\274\372{\352<\337\035\004=\247\210\373\273\257WY\274\371\340y<\325iE\275\376\264X\272 \024\257<-]\230<\371\025\242\274F7Z\275\360\217\226<\r\314\265<}<;=7l\207\274\307\246V=-\352:\274\363p\232\274\316qk<\243k\027=R\344!\274\306\332\021;\350D\033=\361\275|\275\301\247\352=yYi\274\226Fh\272\312\271\347\273$\313><\212\033^=9e,\273\2378\020;\032\250\234;\324\240m\274\034\3061\275\270?\271\273\257\300M\273,\253a<\246\253\231\274\212P\200;\310\222y\275\206\267\200\274\242\226\021\274-\205\236:\004\246o\274\363t\205\274u\202\020=\274\025\322\274\272`\344<l::\274\221\336\254\274\264\036{<\350\345\333\273\340pX\274\332\344\276\274\201\027-<`\251\224<\"\304\264\2741\277\360\273<\t\024;\237&\224\273\366\253\350<vT\2727p\031\305\274\000\334\034\271\217L$<\212\234/=q\033\315\273R\260G\274\034J2=\213q\306<\274\0267<\310\345W\274\017\334j\273\327\224;<\3159\260\274\250\036A\274,\341\3127g\346\363:\360\263\352\274f=\2437\352\250\035<k\340^\275*.S<\026\327\251\273\323v\325:g\332\223\274\230\305\034=\274\375)\275P\242+\274K1\224<\202^m<\304\251\360\274M<\024<\037=\\\275\263\247\336\274\372\250\252<\255\2203=\023\007\r<l\273%\2750\332\032=-\3207\275\355\001)<*Ib\275\234o\224<\275\2033\275\260\267D\274\242\006\253\274Rw\340\274\325\340\263\274\227}=\275I|\007=\346\224\255<\224\001\263<\317\336\325<\036\226\r<b\\\270\273h\213G<\222\231\027=X\256\220\274.\017\273<\002&\022\274\220p\014\273$\334\211;\263\024!\273\212\033}<\033>#<\207\224\004\275;N\316<\220^\203\274\237\002#\274\347\226\302<\263%\214\273^\010S\273\010\352z\274\375\025L;7a!=\370\257\225\274R\375}\274=\241\217<\215\317\002\274\2675\224\273+t\250\274%\377l\274\206V&\275\217g\357\273\341\354\225<\204\032\013=\304\211\271;\226\331&<\312\006g\274\214\303!\273\034Ub<\223\030\236\273\001^\312\273\255\003\327;\303\000\370\273\222\003\260;o\016\302\273\347\376\236\274\242\244 =\272\252\014\274cM\307\274\"\215\022\275\335o\315\274\304\327L\274\240o\037=3*\356\273\303\227G=\251=\342;g^\036\274:@\231\274\311\000Y<+\255\026<\236~\224<a\252G;\377j\241\273M;+\273Y7O\274p<l=/.\233\274\243\316J<\273\304\237\274\270\364\022\275\206\260;<\336B7=\035\014\276\274\346\302S\273\313`\025\275O(\020=nac\275\002\252\023\275u\331o<\354\257t<\2009\005=\334\027\266<b~d<\260#\260\273\000\375\223\274~\3059<L\245\203<\340\375\257<\263\331\220\275B\353\211\274\376\377\255<\231\3405<\265].=\352\241P<4\366u\275\246\023f<\370f\273\274\206j*=i\332\372\274T\305\332<}VP=\3076\277<\275\303\255<\370\"\216\273\202\305A\274ohX<>\236\034\275\330N9=v,\014=\232\234\326\274m\t\371<\345\320\215\275\361j\236<\277\221\222:\265\354==\222\372\304\2742\341~\273;\322Z=\010\235\301<\224\232\206\275\004\037\251\273MX$=\027Dv=\030\315U=xZ\026\275\256S\234;\371C\004\274\275\376\005\275O\220\327<Y\214K=\000[\217\274$\037\032=B\366\353\273\332% \275\263\000&\274\232Go\274\204U\014\275\031e\323<\224\177\230\275\010h$\275\030\210\024\275\222\340\333\274\240\212)=\322\000\351;\341\323(<\217\310L\275\314MG\274_\313L<a\014\354<`\2760<4\200.<e\217\377\274o\005k\275\340\021\234\274\241\360 <\177\006\254\274gob\275\254SM\272\\\234\033<=F\204<\361\342=\274\001`\002;\315A\026<\334VV;\206C\302;\265\271\026=4F8\275\365\334\014=\006\200O<w\001\r\275.\373 \275n\320m=\267\212p\274\002\315\r\275\033\364\207<Z\201\'<\215\300\232<\231/5=\311\354u\274\n\007{\274\023\370\351\274\254\2434=\231:\372\274\324a0\272\"\374#\274l1\177=P\000\212\274\320\0225\274\242\310]=\001]%<\315Y\253=\332\312\351\2738\365`\275|\341\357<$\340\374\274j\305i<Q\331\207\274\207\342-<\020\232W=f>\265\273\017\206\221\274\300\345\316;E\004\326\2741w\366<\000f\016\275k\'s\275\034\262\222\267\034\216\007\275E\342\327\273\342\277\340<\262\025\345<\313\377\232\273<\333\033=h,\323<\246\351I:=\017 \2745\366\026=\352mI\274\222\205\347<\267\006\306<\332\300o\275\\L!\275\014\rK<\246\245\\<\304\263\330\274\351\372e\274\226\342o\273\"\312\264\274\230%\204\2755\341$\275\355\360\211;\344\024X\274A\2018\274\371\375\t:\247?L;\334~=;\367N\275\274\026\334\205<\317\3428\2748\260\t\274Wl\337\273\325\004\233\274\232\020]\2752\025\"\273O\265\301\2744\223\234<\316|\266:\227RC<L\366\301;\331\360\330;\204\211\020\275\351s\343<u&,<\333s\003\275\351\226\244=\005a\347\274\330^\376\274u\265\032=J\326=\275\3518\010;\204\232o\273;r\315\274G?\024=zL\267\273\260\202R\27592S=\235\246\017:\246f!\274!%\251\2743\370\210\273\306A \274M\2075<\245\247;;\217\020v\274z1p;\227\351{\274x\374W</NZ<\322\242u9\2429C\274G\023\314\274\031\241[<\247r><.\024X=\204{1\274u\363.\274\302tX=\361:\344\274\271\321\200<J\232\314\274RO\327;?B\372\274\000n\235=\035\360b\274\265\220\014\274\336\250\336\274\002L\316\275\r\222\373\274\001u:\275\311\375\340\274\220&\264;\036\240\277;a\232\002=e2O=\350\206\037\273\215E\250;8\327\325\274\357\036\013=\035\035\343\271\364*\026=\217\330\257\274-\227\347;\273\247l\275\317\334\343:\323X\231<\250\250\330< ,\027=j\324\034\274\005[6<\314\005\004\275=?\352<L\353\236\274q\234\214\274\211G&\2741\275\324<D\010\311<}\326\372<\302\200\260\274\t\004\035\275\230\217\035\2743\352\221\274n\"\372;-\363\371\273\210\303\014\275(-\202\273Z\242\2149\257W\223<\370\035\020\275\315\341k\274\206\366\264;\3112\320\272q?l\273\275\006^\273x\332z\274\322\217\331\274\270\033\010\275\364\332\022\275\037Xo<\356Mc\274\034\360G;\314^\237;\220\363\"<3\027\301<\030?!\275\314\350\240\274\203l\201\274\3768\267<0^\212=b\212\t\275\376\322\027\274h\017\026\275C\022P\274(4\014=\262\t\215=\367y\304\275\303\353#\273\316q\212\274\270R\215=j\310\004\275=U@\274W\307\270\274\361<\241\274l-\010<xC\345\274-\320\221\274\317)\303\274c\267@=\251\244\026=\320\354\311\274\222\021\313\273$\036Q\273\034\304\256<\nA\206<\226E\316\273\275\312\301\272\222\215\355;\233Q\240\273\220\233\341<\270\021\264<M \257;\221\333Z<\017\251n;\205\212\023\274\302~\270\267\213\363\331\274~\302!<\247/\374\2740+;<\300\025\024=\316\003\036<!\032\017\274\327[l\274\350m\251\274\300\033\004=\273b\225<<V\233<\245!\211\275\266\235u\274\235W\375;\345\224j;\264\022$;\350JH<\266\0247\275\370\262\240\274\224I\025<[\264\325<\230%\265;\357\354\256\273\340\033\031\275sTl<\241\351\034\275\246\021\006;\230\274\"<a\323\020\274`\316\304;\202D\340<Y\322\013=\313r~<\264S\010\275Y~\037\275\235\001\017\275\275\2548<]\331\034\274\244\332\001=$.b<^Z\020=&K\277\275\375\035\332<6+\205=\303\237\277;\337\021\257\275\300\r$\274g=\317\274]\006C<wW\200<\274\"\203\273\3615\216\275\344\371,\274f\0075\275\316[5<|=9;P)D=0>\306\274\033;\237:\315z\324\273\367\246V\274\0256\227\275\265#e=\323\2675\275s_c<g\203\202<\246}?\275\341\307w=\373\345\274\2759\340\246\274\276\233\025=\005f\211\274lZ\323\274\035\r\246<\001\234\003\274\373\n\002\275\001\363:\273\240Xm\273|.[\274\250\324\232\274\2239_;\316\3734;\306\032\212\274\347\244\325<bQ\305\274y@\010=\206\2313<\310\031\375\274\363\235\374;O\302\304;\205w\372\271\255\346,\275\t\036\210\272\277\267\004=&\014\325;\027u\014\272\215D\212<\242\242,\275J\033\321<\314\027)\274\325\306<=f\267\324\273B\352\300\274&\210\004\275\321du\275|\350\014=\317\255\017\275zy\010\275\272\233y\274\201F\212\274\033\225I:~dI\272\340?J\274\325\316\002\275\370\207\371\273\303\377\204;\017e*\274\016(\r=\030\232\230\273\225]0;X0\225\274>t\177:\235H\006;Gr\372<\013;\005\275R\345\234\274\340\266\345<P>z\273N\201\314\273\021\307e<\256\024\326<\214\2145<\033\002\372\274\366\224\232<\230\275W<\302\034O;\036\245\027\274=)\203=\214k9;\2657\343;\310-\255<\310\267\244\274W\231\222<\036#\264<\377\344\025;}\002?<\204\323r=\340H\203;\332D\371\274\240\033\366\274\024\374\372<\256\240\027\275&2\213\273\032H\217\274\304\206\024\275\252\364\257<\234\276$<\203\001\033=\t\316\321<bB\373\274<=\326\274{\026A\2754\254\222\272\321&\037\274\024/\267<\331#\316<^\335.<\243\336B=x\363\331\273\316\346\\\273\250=\343\273[\201\227\275e\253\367:xD\224;\034\017\252\274{#\331<i=S=\343\354\236\274\256\314\253:\202\323f<\320! \275\304\211A\274\226\003\004<v\310Q\275m\226\201<a\264n\272!v\317\270P\267\361\274Td\254\274\264\322==\362\022\325\274\301\243o<\363\340\331<\305Vt\274\032\\\255<C~.<rv5=\251rt\273\013{\202\275\035\307\022<\315G\014=f\206H\273\247\320\334:\004F\242<\202\266\020\274\241\346\272;Dp\270\274F\360\033\274\276\377q9\027\201\260:\231|P<K\0177<\'<\232\274\325\212m<\271p\026<C\214g\274\303,\201<\256\203\034<)O\220\274\r\215(\274\2729\236<R\257\315\274\034\332\377<\373I\210\274\2210\236<7\306E\274\010<\220\273\273Y\333<\356\373\014<\246\317\302<\325Y=<2@\007<x\274\200\274?\324\007\275\227\252\236<`\200\000\275\364\3111:\033\245\026\275\302C\226;\374\345\322<\362\306W\274\323\361\"\275\313bs\274\262(\344:\000a+\274p\016\331\273\031\336\333;\331\346\";\014\337\312\274\313\366\206;\355W\0059\331J\257<~\205/\275\337\303\025;\332\253\317<^\340\034< \025\341;BqL\275\273?\030\275oV\374\274\3139}<6\242\211\275\274\037\335\274=\250\210\273\026\022A\275\327\017\235<\3500%=\320\315\020\275\306\300R\274P\010e\274y\252\272\274\361\235\004<\367K\372<I\273\343<\212pq<\035\304^<T\002\n\275\373\203\007=\316\343\224\273\265\022 \274k\215\327;\237p\204<\357\224\305<\244\355\013\275\342\307\005=*\236\267\273\254\315\370\274\354\205h\275\334\217P=\031E\310\275\2333\241;\354{$<\241du=>)\306\273\263\247\033=?,\342<\013\001\023\275\004\212\305\274\262\206\030\274(H\305;\3346}<X+\025\27590\365<\310\266\006\274\246\022n\274~\324i<u=o=\207\235h<\326\250\005\273\311\001\005<\363#\370<9@\261\274\230\004\232\273\3033\251\273\325%!\273\303yF=\261{\016\275v\3445=\022\276#:bq\221<@\254y\2727\036\177\275\250\341\374;\'\177!\274)tA<\320|m\275{\374\n=\267i\242\273\021wn=\356\376\002\275G\217\303\272\233\343\362;\331\362M=E?m\275:\003\255\2747V\020\274<\'_\274^\263\231=*~7=\032\334\303\275\205\254\240<\335\303\2709+:\341<)\3120\275\202M\223<\0011\2179\024@\262<w\215\315<oNM9\032[\211;T+\263;Q\227\311\274\242 \330<\256\t\235<\261a\267;\375\210\205\274\021\307\325\2754y\271\273\305\324\312\272t\235\226<#\026\354\274\374\275\315\274\n%\332\274\327\226A\274\302\227;<\202\215y:\"\270\221:\247\221\352;\251\266q;O\\\246<\255I\335\274\"\206\240:}\324\232\274\007\324\020\275\2044+\274\203I\021\275\302H\241;<\341\211\274\217)(\273\372\213\220\274\225\265\005;\254\373$<\310o\370;\313N\024\274\277B\302<\343k\030\275\226\373/\274<\256\3329q\326\327\273\034\256\023=\375\211H:\262!(<~\026\'\274\265p\346<\220\373\\<f\325\233<\312\277\016\275\363i\336\273q(m\274\261\243\"\275l\215\321\272\326<\235:@I3\275\321\2673<h\273\227;\240\371\202<\200\237\315\273\246\273\360:V\237v\274\320B=;x7\333\274\024\266s<\261\261\014\2742s\316\274\322\330\345<\230\307n=\372\307W;H\374\231\2757\022`;\312\320\236\274\033\037\031\2741A\321\274\002\376\345<X-\021;\241LL\274\373\301\370<5J~<n\r0=\261\343>\275Ju!<\323\354\177\273^9\352\274\340\216\367\273~_}\274of\262<\372\212\222\274\375JC<A\002\"<\263\\\272\274\377$\311\274\032\017\324<\234\212\333\273\355\035Z\275\244f\031\275\342\030\021\274\227!\256\274\261\312T=r\200\261<N\377}\273\220LE\275\241\251\r=\371\354g<\024\245\371\274S\325\016\273\343\342\270\274\303+\261<&/\334\273\037\017\006\275\375\221\314\274\277]l\275\355\032\367\273=\214\300\274\374\305\310\274\243\022D\274.\322\030\275\246\021\207\274\317\354N<F\230\257:CPS\274\000\377\251<\241K\007=\310\207\315\273E\252\364\271BL\347\2742f\204=?j\350\274k:\261\272\256D8=\325\0015=.a\034=\022\270)=\202\356\257\275\276\334\000\274\303\314M\275\033+\255=F\333v\275}\213W\274\234\204r=\240\277\022<\034\3114=\271\362\000=\350\367g;\222\373\267;\262\206\303;\340\025\030;t\316\341:H8\224:1\212\311<\343\026R\274\260Z\350<\021G|\274fO\211<\000\265U;\323\333?\273\317Jn<\024\266\203<\371vK\274\313V\261\274\205O\353\273\3523\025\275\360\037\217<U`\274\274\211\265i\274\033\243\006\275\n\027\201;c\177\226<\266\342\236<\243s\265\274u\227\213\274\316\265\022=\247n/\275\270O\327\270@\265\216<\232\023\271;\031O*:Ds\213\275\2757L\272o&G\274k\230\302<\231=#\272\362/\326<`\242\016\275%1\263\274\321\320\235<6\016\016=\257\004\023\275\202\352n<<A\037=\362\307o\275\360b\233=\216\315\242\274V1\315\272\211\331\237\273\221\210p<\222\253\372:W\225\234\274\224\313}\273_\032\375\272qH\034\275\232a%\275\031\330\007=\007\247\226;\365\237\r<}\326\373\274M\246\325\273$T\013\275]\215E\273\215\014\200\274\233\302x\273\321\222q\274\205\254\271\274(\232\261<J\264\342\274\312oZ\273\003\274\245;\276\237\344\274\324\016g<K\221\216\274\256\377k\274f\262\310\274\322M\004<\006Y\372;\326\361\241\274\t\351\230;\366\n\340;\350\253\376;D\223\252<\273\325\234\274\254\020E\274B\336\221<\334\272\361\272\332\233\302<\271\025\315\274\353\254\r\275\233t)=A<\246<q\213$<\316.\373<\232\026\250\274\210\362\225<y<\350\274\"^:9\370K`\274\021\246\222\274\000t\204\274\332\323\206\272\340\245O<\226\351H\275I\206y;]\225\007\273\010\314\210;\223(\020\275\214!\274;b\327F\275\003\010\276:\006JB\274\3234\004=?v\n\275\300\265T\273\243\362\301\273\232L\363\274\350\032\016<\245,\220<\007jq\274P\210\212\275\204\n\006==\366\375\274\\\201D;<xH\275\224\253I;R}\005\275\357\274\217\274\245\211\373\274\374IB\274\220\245\371\273\336\342f\275\337\301\245<\326\274\221\273\023\2654\273x\306\357<T\326\013\274Ta\264\273t\346\260:\244\312\314<\327\373k\274%\211\314<\020\025\036\275\251\311\377\273\231.K<9\345\365<Bs\224<W_\325\272\323\233\223\273\366\004\364;\0038\221\274\3126\205\274\335Y>\273\025=\010<\222g\035\2745\322\265\274\326_\242<\021\017\t=\035\315\241\273\342\262\375\274*3\000\274\320\331\r\274\333\261\351\273\212\031$;z\226\315\273\210X#\275\207\024Y\273\215\305\265<\022\236\030<_q\206<\020\220\220<\200\326;;\022\306g<P\376\013\273\317\344\233\274,\017%\274\326\325\230<\267\023\211\273\331\242-\274g{\251\274Q$0\274F\t#=\024\250N\274\014\347w\274\233B\216\274xf\264\274V/\236\2735\302D=}6\034\274?\304#=t3\255\274O\321\035;A^\236\272\352\375\250<\213a\000;\240;\204\272\311~\016=\342\022\003;y\310:\274\304\210\242<\217\310A=\265q\346\274\001\241\006<o\r\305\2749TA\275\316\243\312\273\330F\177=\007\316\r;Z\245\305<\177u\261\273\031?\005<$\304\350\274\253\020\353\274\213\237\205\274:~\235<Wd\177\274\251\327\215<\275\023\223\273\034\230\014\272305\275\221\242\220\273\360\256*=*#\270<\235\315\237\275\006\263)\274o\307<;\347\301\006<\000\033s=80\326<O+\205\275\225UU<R\307\263\274\241\305\017<\363\010\026\274m\321\360<\244\\#=\027\244\331<2u6\273\366\017\250\273*\247\002<[\3633=\026D~\275\272n\371;\365\274\254<\310\256\352\273\314\005\226<lt\243\275\005\022s<\331\342{<P7\2059.K\245\272\330l \274}\237\232=s\202\240=(\t\342\274\227If\275\320\327\247\274\030f\235=/\375 =\220\236\037=\330Q\267;bQ\006=\352\3666\274\313k\207<\305\024\326<V&\t\274G\313\t=@\035V\274\256\031\225\274F\003\016\275\274\366\376;\275\0008\275Pg!=\371\332&\275\212\247\305\274kQ\251\274\332\363|\271f\274\215<R\371\006\275\366\350\233<\207r\025\274\347\272X\274\330\203\210;\360j?\274c\021\220<\362\212A\275\026\267\200\274\214\265\326\274X9\246;\354\000e<\357\271\027\274=\027\240\275 \204\322:\025K3<.\030\032\273\277\014(\274\255\244\231<\346\250)<\224\271\351<a\303\230<\247\362*=\254\230\363\274\245\251\257;\364,\322;`\355>;\264(\250\274\353\256b=-;|<\233DZ\2740L\336\274\350c\331<\373}\204;\277\017\367<\323\311-\274:\344W\274\274\365\243\274:\377\221<\343sy=*\177\204<\312\2677\275\0373\022=\363}\203\275\032\346\033\275\351\307l<\317\2742=\312\nH=\366\273\340</\034%\275_\271\362:P\322\261\274\227vn<oz\031\275\247\215{\2743Nu<q,\004\275w\264\226\274\316\2510<!\\\026\275|\025\326\274\370\226G\275\373\267\215\275U\353\200<\326\223\222\2754z-; \224\027<\213m\007\273\332F-\274\322yQ=\225\333\024=\223\3648<,\305~:=\306\307<\272\307F\270\210R\276<+\235\351<\260pj\275\324\302\024\275\274j\213;\353\371\274<\304\225T\274\360\323\202\273\361\372\177:\320c\021\274\2104d\275\217\333\367\274:<\3139\345\333\206\274uH\267\274\035/n;\0029\253<\237\205\327\273\034\002\203\274W|7<\316+ \274\356\227\232:\022\212\253:\337D\270\274R\216<\275\346\322\\\2743\351\030\274\310\025\341\273\261\001[=4\010)\272\234\253\204\274\'\361R<\212\206f\275\231x0\274a\352\316;\375\234\3779ulR=\206\031\254\274\346\003\360\273\215\321\013=\231\304\321\274\260\027n<\006g\204\271 \331\316\274k\257E<\020\223=<j\034;\275\245\331\020=\002S\243\274\212\001\323\274\310\254s\274\257=2\274\177Su\273\355\3069<\201\177)<\337w\343\273\032\250r\274-\267\223\274P\233\253<\026\251\312\273%\352\242<\314\270\225\274\303\001\000\275O\022\017=X\324\315;\025F\223=\337j2\273z\036]<S\366\023=\271\262\\\274\310\004\237;\367\033\332\274\361\264S<Y\312\230\274d\260\020=!\'\003\274-\021\017<v\335\256\274a\035\313\275Y\310\230\275\013\301\025\275\210\031\245; I\204\273%Z\364\273\272\216G=Xk,={\270Z\274q@\216<{p\006\275\266\215\332<m\352\0318v\010}<?4\010\275Jt\\<C\311E\275H\342\271<\273\030F;\213\311\216<v!\020=\031\342\212:n\n\301\273\231+\200\274?\223\236<\333W\245\274\253\312\305\274\231\264\242\273\204t\211<\364&/=\024\257\226<\322\202\202\274\033\303\\\275\301!\031\274\242\300^;\236\337\241<\373\261\210\274\227\334\305\274_n\2409\023\355\020;\200\221N<e\313\023\275~\307\373\274Y\352t\274\215a\235\274\360\336t\273\325\033D\273\177\311\267\274\\\317\253\274\204y\231\274,\221\005\275~p\364<\312\204\010\275\216\276\322;\330\370\022\274|Y\007\274\251\250\234<g\017\315\274\212\231\337\274\230\'3\274W[\311<\304\241\200=\313$\327\274\030)\356:\374\204Z\275E\303\371\271\316\341\005=R\024v=Xk\264\275\300\263\000\274\030\247\355\274\243(\r=\245\"@\2750\247\261\274\216\r\242\274\204\300\311\273\353)a<py\010\275R?@\274r\272\211\2746\037\306<\216\375\036=Y\223\310\274~5\000<0Q\020\274\023r\323;\336d\204<\223\376\225\272Q_x;+|\217<&\333N:\026Q\301<\226\3224=\245,\235;\270\\~<\206`a< b\034;a1\256\272\300\326\324\274v\225\227;\370#\003\275\005\340\010=\260F\224<a?^\274F\311\177\274\347\200\273\274\227\211\301\274\356\213\342<\t\252\025<\"\177\177\273\265>X\275KNk\274\0376~<)\\\002\274\033\312\013<\306\342\242<^\214\034\275$\347\230\274\260\335\373;y\254\'<\320\025\222\273=\022\217;L\355M\2753\rP<\254\372\016\275\331Z\"\274\373U\237;?(\033<\212GT\273X`A=\214\367?=\213\021L<Q\246\010\275\343\237\233\274o{\362\274\250\370\243\274\330[\031\274 \002=<E\220\003;E\206\265\274W.\234\275gxM;\312\254\030=Y\230\027=\316\004\224\275X\213:<\360O\274\274\036\305\301\274\020\256\220\273\243H\014<\024\272\234\275\032\257\313<\033\337\030<-\330\306:\212i\365;\347\315Q=\354\177\032\275\262[\007;\3477(\275JS\235\274\023\r\212\275\210\031t=\026\316\212\275\356.C\275\251\343\212:\\yA\275[\323\244=\236\331\020\275g\2268;\236ZV=.\021\22179$\267\274\302\260\204;\261n1\274\216\002\202\274\334\021-\274\235\002\213\274\203\032\n<me\320\274\340\321\243<~\016b\274\302\251\355\274\256\251\213<\322g\250\274VG\010=k\345X<\341z,\2758\027;;\274:|<M7\r\274y\244\007\275E!i<\355\n\230<n\246\204\272\206\n\205:\302\001\031=\006\331\007\275.]\027=\324\217\202\274\351\022\026<\276q\253\274\224w\312\274\246/5\275\202\352p\275\032\336\370<J`\256\274\013\302\371\274\357\252\343;\325\032\261\274z5\3059\307\222\234<HN\\\274\241\274&\275\027_9\273\270\366F\274Q\267\252\274]C\263<\270\303\374;\361>8\274F\002\023\275\245\304\315\274\264u\253\272\347\326\022=\rF\225\273O\234\327\274\302\001\245;\014W\260\273g\265\315:\244\274\211;\206X\006=(\305B;u\366\001\275\220Q|<\220\347\032\274)\276\236;-%\277:\312C[=;\026\314\274\221|\253\273\3056[<\303\271L\274X+\353<\376\030\301<\351\037\235<\237\207\274\273B\'l=\322\331u<\270\207\254\274\3166A\2742\310\035<\330\0042\275\357\364\271;\020\236%\274\377{\026\275/\316\203<\t\177\240<\223\352\217=\236\331\004;\334\344h\274\334*\352\273\236\244y\275\227\016G;\314R\2539@\256 =\362N&;9\370\200<\216o/<^\240\212\274\263;w\274D\303\3459\026\234G\275\357\253\341\273\325\377\023\274\344$\251;\244u\277<\034\370g=m\205\331\274F\263^\273\226\0367<\006r\262\274T\271}\274v,u<\272\276u\275.\ta<\215\014\322<b\376\252\273\271\313\036\275\031\352p<H~\005=[\216\002\275#@\000=\331\033\311<\024i\300\273\022\362\335<lJ\323:\023\365\373<{1\210\274\231\274\224\275\363d\253;\226w\016=+\307r;\000\215\313\273O\327\337<\235\026\030\2747\017\313<\000\304D\274w4\260<s\"\016<\307Bq\274\032\234\275<&RY;=z\227;\021|\264<\226\000\256:\347`\216\274b\346\205<\276\331\243<\036\300\335\273\251\277\257\274|\353M<\266\250\201\274Y\373\003=e\345\201\274\220\233^<Fmi\274\256NA<\230#\n=\324\026\201<\226\3743\274\206\242\273\272\261\327K<v\235\353\274\t\n \275Ip\026<`(\226\274\205\270\t<\333I9\275:RZ<\220\020\3348\225\300\251\2733\276\025\2753w\257\274\177\007G\274E#\004\275\373>\322;\027\310\207<I\3552<LP\235\274\313\207\347:*\326\r\274GyG<\006\267\212\274\301F\201\272s\\R<\271\2008<\232\253\316:VTT\275|\225\005\275N\362\272\274\305Y\321;\"!k\275z\336\304:\332\223d\274\213\376s\275\315\030\272\274\245\276P:\347\3249\275:k\377\274\260>\223\274\032\\\003<\000g\253\273\372\302\027=\274\2667;)\257\031<\336\231\016;&\356\2728\3133\307<4\303\205\274\312\217\334\273\352\314}<nE\213;i\237\315<h\035\261\274Z\370\217=\274F\372\274p\244\301\274\235\016\250\274e\374t=\233[\275\275y\005~<\266\014\002\274\312\316\327<\\\352\236\2740&\030=@\004\010<y\320U\275q\302\235\274#\255\214\274\315\034l;\217:\363<\223\202\225\275 \315\r=\306\246\250\274!\257\371\273\354L\215<}e\306<J\014\235\274u\303\340\273\247J\001<h\360A=\245\037\003\274m\275\033;\370\234\n<v\303\251;\306w\021=0\310\346\274\301 \016=5\214^<\375 W\273MZ[\274\355r\212\275\030.\221;\333\337,\274\243hP\274\271\nA\275\004\366>=Iy\363\274\356\026\031=\233\2035\275\270,\037\275Q\024\367\274N\353\264=t.\\\275\244\2341\275\037T\255\274v.\021\275;\231_=q\310\202=\361\340\251\275\255\232\306<\234\323\013;\303i<=\361\252\217\274\342\255\322<z\311\240\274\017_X<sxd<!J\250\273\3153\261\274\"\276\372\274CM\021\275\334\251\227\274\363\206#\274\004\021\226<S\361\256\272\035T\201\275\302H\020\274\315S!\274\327>\r\274s><\275\247{\266\274\342\006\221\274\030\335\366:\270\357\365<\224\326*:\212\014\255\274?*\002=1\227\360:\025\220/\2735x\232\274\243\230;\272\002pZ\274:\034(\275X\006G\274+\347\034\275ml\306\272\275\035\226\274\365G3\274\026\303\302\274#\370\203<\023\326o9\315\251\006\273\235\033O\274\351\210\331<2\264\010\275J\006w9\224\224\034;p\332\225\273:u\330<\177\026\202\273\246\267F<\312.\217\274\271\031K<\352\013\311\274\340\355\031=vxY\275I%.\274\204\324\220\274\\(\031\275k#z<\205\275.;\203\017=\275@Y\2507\234\2034\271\364\306w<\231\300\333;7U\233<\241\235W\274\"\204E<\330&\270\274\2751<<\035\240\332:&\255\210\274\251\020\034=\223O\222=\024\235\213\273\371\304\265\275\321\216\310<\302\247\216\274\317z_\275\243-\266\274\3157\035<X+\204\274\217B\274:\364E\321<\317\177\224\273\216\276w=sW+\275\246Y\221;\216oy\274\355\274Y\272\254\035\n\273\213e\357\274~P\325;\222{\001\275P!\200<\013A\236;\"7]\2740%\316\274>^m<m\274\236:\361\325;\275\371Um\275r28< \236\n\274Z\257L=\231p|<\027-\235\274\353\006_\275+^\177=d\320\377<\206d\266\2747\275\334\274\\\332+\275\275\376\341<\225)8\274<\303&\275}\332\030\275\033\0223\275y\034\330\274A\305\023\275\324\n\232\274*\254\237\274\215!\257\274S\034\032\275\301O\313<5T\314\273\347C\246\274?\242\223<h\321 = \314\210<;\2025\272!m\005\275\032l#=\230\350\332\274n~\352\273\030ET=\3168\341<\333\363*=\361\336\210=2\260\254\275xJ\217\272\327\033Z\275\364\2273=\314\215\207\275H~\004\274\034\230G=\261\034q;\027F<=\370i\205<\322\200\307\273\032\022\216;X\326<<\257@u\274{\343\243\273\267)\366<Z\340/<\252U\253\274\304\357\r==A\237\2734\307\376<b/\313;\217O\265\273P\334\201<e\302\006<\374\311F\274\005\320\003\275\214\325G<\307h\026\275ug\217<\272\214q\274\2449Z\274\350\310\036\275j8Y<\\\267\221;\251cS\273\021n$\275\375\271Z;\270\"\235<m\360@\274\261\2639\274\341u9<>\243\000<<q~<[\257\236\275\004\253\340\274=\353\242\274\322\224S;\000X\355;5\214\255<\361W)\275\372\020\255\274\273\343\355<\314ST=\275\242J\275&/\337<U7b=;l\\\275\024\344\033=\345\032\35194\025U\272\342\3452<\353!,\272i\025>\27539\302\274\261\252`\2749m\027\274^\270\002\275\316\314\314\274\3431>=D\036\014=\345ch\273\256U\037\275\353L\177\274\022\234\217\274~\3573<q\317\231\274\300\264s<\206L}\274\007\251\216\274\362)\034:\034\330)\275\355H\226\274x \376\272h\374\365\274\362\305\245<>0\234\274<Xh\274\347\341\202\274\311\346f;\246\022N;!\264\371:T!j<\343\016\237\273\307a[<`6\346;\304\022\213\274\3428\372\273\023\030\334;R\341g\273.\300\357;\215\214\274\274\306\366\034\275\005qY=VA|;\353_\310;\037\331\302<27l\274\n&\355<\244u\226\274\2432M<h\366]\274\005\200\263\274\342\300];\303\rU\273\032\303\263<\3153\213\274JVK\274\270\233\230\273\262Q\200<\320\212\327\274\271\026\021<\335BD\275\335)\247<\034\234\317\274\370p\031=\215\230\212\274\357)\257;\306\022L\274\347\305M\274\341%J<\240\264\035=\203\233\273\274z\264U\275\266)D<\274\256\263\274\327\230 \274\356\350\024\275\206\237=<\007\221\311\273\3044\202\274;\336!\275w\3601<I[\241<\023@/\275\345\2372\274\035\322\222\274\031\241\367\274\347ig<\250I\027\273\373\340k\274\337bS\274\000E\246\273\001\243\245\274\361\n\364<$m\033\275.B\356\273\013\237\3379\037\351\251<T\243\321;p\014\020\274\354\332\257;\255\353\303\274:\207\324\274\306g\235\274\002\023\200\274\332\272\320<\364\227\223\273X\251\021\275\322:1<\034\014\253<\006-\340<=\260\014\275\211\026\n\275\340e\034\274T\006\247<\032\306\315<\215\t\332\272\235\333-\275Ji^\272\345\256\010<\372\264\3159\357\322}<W\\\003<\n\017Y\274$\225\031<\361\273\037\274\2237\203\274.\246\231\274\243\351\332<\273\235\360\2721\017\256\274\016\314\256\274\313\200a\273b\210\206<uO\326\274\315\260T\274wM]<\227\367\321\2744\377\240\273Db+=\024[p\274\335\376\226<\020\374\225\274\237!\216\273\004\374\242<\024\024\004=\236:\255;K<\014\275\203\324\037<5\376\014=g\313\232<5\311F=\365\r\312<a\307\217\274\000\022?<\013\005\303\273\317^*\275\360b6\274\033\000F=\033\215\023=\200\300Z<\030\244d<\001\'\333;\314{]\275\316\330\216\273\300\310L\2741\306\224\273\371\237\324\274\362\026&=\252%\357\274P\317.\275b\027d\275\356x\177\274r+\027<UK\'=\276\310}\275\330+\311\274\253\305\001\274\325*\004\275\276\374\004=\\~?=\337\375\206\275\3721|\273\3060G\274JS-=H\036f<8\235\t=@-\253<\234\206Q<\316\372\017\274\3116\273\273\244i\304\274\233\360\335<*\371U\275\037\333P\274*)\364:a\213\263<HQ =\245\246\003\275\232\313\254;\344He\2734,!\274h\334\240\273nul\275!9z=7\021\364<\251|\263\273\250Wf\275\257zW\275\374=\005=\027#\240\273nd\357<;Y\362<\230\023w=\331;\271\274t+\001<\224\242\367;b\365\027<\234\205\003=\307+5\275\212.\216\274\216i\271\274#\2765=\347\335f\275\341\245\274;\250FR\275,DA\275\003\362\222\274\240\306\036<\020i\226\274\270B\270\273\201\311\326\273Z\0048=`\3278\274\351\370\006<\351\324\251\274\237\025\332;\226\213\026\275*\226\224\274\306\022\246\274\337\225B;\356u\263;\r\304\324;\027;f\275\335\315\202\273)\327\255;\235+\257\2746\n\223:3\006>=\371\010r<\340$C=\344!\001=\330\217\213<Sm\353\274L\340w<\261\346\3609\311\316\232<\277\346\034\274\350T-=\366\\\027=\201\275\352\2738\361\000\275\255D;\27407\220\2746z\202<\205\234\\\274j|\361\274lh2\275\240\026&\274\177\265\006=\301/\031= G\227\2755\237\236<\r\0255\275+=\360\2748\214\344\273\037\314\002=^\"v<[\027\354<P\\%\274r\032\336\273\244\376\245;\353u}<\034\006\037\275\341U\355\272\362\010\266\272\213\340M\274\330N\246\273B\260\304;\224:l\275\263\274k\275\242\365\205\275\230\021\321\274\032C,<\231\013\177\275\257l\026\275\016\023+=\216\212\016\275\237\3351\273+\271\313<\203e\251< \340\227<p\006\023<&r\n<\271\243{<\263e\216<\265m\317<\025\2531\275\2242\002\275u\306\254<t\'H<\240o56\r\230#<\026:\033<\356\037\255;\361\'G\275\377\010\367\274\240\315\245;\n\372\007\274\242B\313\274\333\025\267;\201\200\351<\353\021\234:\260pR\274\344H\263<T\036\236\273\363F\370\273\323z\t<\207Hr\274\023\374?\275b\251\375\273\225\306\216\274\302q\n\275\362\227\000=\372d\035\274\256\177\255\274\236@\334\271\323/9\275\023r\212\274W\305\217;\005\304\017<\247&\330<v\362\266\274o\004\311;\330\3272=\341*\'\272\207\nw<%\272\024\274\347\005\023\274I\2769\273Nz\310<\200W\265\274V\323\373<,]\032\275\326\347I\274\347\266\240\273i\013\3639[\245\243\272\326\003\352;\3373g\274%\027\027=\205\231\262\273\345\325\313\274==I=\0223S\274\324\254\271<\002\306\334\274\031t?\274g\233\000=\377\363\200<\376\377\242=\243e\205;\211e\025<lh\245<;\365%\274\002e\304;<\252\213\274\275#@<\265c;\274\r\006\224\272\326\210-<\251\205\256<sWn\274K\300\223\275\370`\265\275\341\014\265\274T?\206<\257\235\201\274\242\271\241\274\204\213,=*\377\307;\t\n\026\274\321\225\214<\226\223\016\274\340?\250:\036\362\264\272\373\212*\273x\027/\275-*\221<Q\334\025\275\004i,=\346\354{\274\341%\275<Zw\202<\343\3679;\'\245\215\274c\2554\274\374\325\216<\377\"u\274F\216\344\274\032\307\343\272S\032\001=\014\261E=q1F<\246p\372;\361\000f\275\360\037\275\274\365\275\251<o\202]<\226K\363\274P\215\350\274x\212\266\273\336i\236\272\330\213\330;\202\371\326\274\203\224\026\275r\306\237\274\246\377\257\274b\2220<u\303?=X^\207\274\262\007\206\274\244\022]9\247\346\371\274\010\203\006=.-\017\275\025z(<\025\0230\274\266?\272\273\213\256\225<\306\210\234\274\367\252\304\274Z,I\272\245\302s<{\200\036=\266\344@\274\242:\342<\233\215F\275\2154\243:\234\336\253<V6*=\270\200\227\275\016\246\001\275\036a\311\274C\365X\274T\206\026\275\274\320\255\274\2263\233\274\005\t\354\273l\252\030<\322\006\326\274\357\005\376:Bm6;\243\212\224<fV\031=\371l\363\274\336\321\304;\273\rI;\202\216\364\273\017\267\360<\320\362\350;\245\323X<\215fO\273(\311\200\272Ev\371<APo<\034\243\351\272\240,H\273\000\031:<\020\304\216\274\221<\316\274l\357\315\274\022\254Q<>)\301\274\026\204<<\363\372\342<M\301\322\274}@\205\273;\002\247;z\345\231\274\236\233\226<\002t\002;3\317+\274r\204\225\274\314\2500\274\211\272\241<\200\307\262\271\r\323\021<fFV<\3279\327\274MF\215\274S\014}<\177\340D;*\347G\273\032\276\021;\247\345\030\275\240n`<<\027\265\274@PX\274\330\305\024\274i\022\037=sO\213\274\320b\321<vBU=ZP\016<`\345\035\275D\224\246\274\334I\036\274\272\\\"\275;\3279:2t\210\273\032:\262\274J\366\004=\210\031s\275\237\224\255<\277\255\226\273\331\2305=\300\036\016\275\270/\302<\355\325\230;\263\334\010\275\224\005\024\274\240\375\352\273\320\375\225\275\317\250\262<!\254\023=rS.\274\243E\330<\347{6=\343\311%\272v!\"\274\364p\023\275\254\034H\273)i\203\275o\001\206=: \177\275]\302\305\274\342v\247<3\336\266\274\345\242\234=f\321\312<\302\217x<\226\223O=\014\0245=y\344\"\272P\217\324\273tEZ\273D\343\303<\207\207\223\274\320f\264\274\360\022\374<jU\304\274;\361\343;6\326\300\274\311\231\363\274\016G\246<d\222\212\274l\312\210<\241v\3169\013R\024\275\263/\215;Gu};\365F\031\273]C\230\274\311\037\027=\254\027\273\272\355\032\255\274\340\014\214\273X\271\374<Y\340\030\275;\371~<s\345K\274<\000\327\274a\274\300:\203S\235\274!\311>\275\005\232\201\275\303`\231<\214\201:<X\000\001\275\335N\363;:8\323\274X\232\017=@T\010=~\034\305\273\241\375\327\274(\244\307;0\267/\274\202\016\371\274\271\007H;\310\252=\274sfC\274\304^\026\275\242k\276\274\262\212\347\272hJ\036=\211Z0<1\242Q\273\272H\356\274cV\001\275s\233\346;H\235f\273M\032\257;U\2735\274\225v\036\274u]\253<\002P\367\272\236\355y<\273\222\220<G\005\244<\247\367\360\274\003\251(;$\005\247\274\212\305&;\233,\376<7d\353;\035\220v<4ET\274y~\004=@>\357<$\246\364\273\235\272\260:\335\225\334;\245\255\304\274\276\032x;\023\362i;\225/\326\274\270\3560<b\343\315<\016\260\212=F\210\277\274\276np<6h\264<\207\254\254\275\310\302\244;\277\334\223<\037\rz=\005f\372;5\326\247<\034M\232\274\\A\215\274\214\205\210\274\024ZC<E\310\320:;\026O\274>w\226\274\260\267\373<\260\230\337<\331[3=r\315\252\274\002\3070\274\255\350\250<\225\264/\274\">\232\274 .\364;?|v\275\270\222\016<\271\230\007=\220\2255\274\\G\016\275\224\203Q=\332R\272<\220J\021\275\267\n\013=\352\311%<@$\004\274\312>%<\007\213\202;\0149\'<\356\020\'\272xH\227\275\364\215\235\272]\214\003=\205\262\033<\017\204v<UG8=;<\336\273\341p\010=\363y\205\274\262e\335<z\225\021<\376\344\347\274\263B\210<\252\264g9\233b\254;\034\343\333<\206Y\317;\020\246\237\274U\'y<ri\227<\334\341\001;\025\000\372\274\010f\247<\211EB<l\216\341:\3711\220<\313\344\346<\035\355\373\274\014\033\265\272\014\311\014=\022\321\211<J\272\270\274\232\341~\274\017S6<\233p\305\274x\350x\274U\352.<\212\324\251\274\257\2247\273\370\210\246\274Xp\313<\352\367\204\274\345\210:\273\274\000\232\274\001b\275\274\3100\342:\n\367\013\275\341/\303<\300\033<<\210\356\001=\3509\250\274\266Jf<\365\354=\274\351\214\214<\007\314\240\273\'\306\237\274{\005e\273\003\234\316;+\234\334;\014\265\026\275\253\307X\275\033\265V\273B\216!\274H\253\t\275\300p\224;\273\252\255\274\236\0051\275\375f#\275\326\273\177\273\025\360\335\272}\2628\275\277C&\273-\246\204<>.I;@|l<\246\314\322\273p\033\035\275\'\210~;\002t]<\261I\021=\252\375\020\275\\\363>\274\250Z\253<\237\177\035\274\243\355\276<A0\222\274\033\"\253=zk\222\274\222\205\206\274L\214\334<\n\204/=OP\275\275\022\302J\273\326\326F\274\371b\200\274\005\033\202;W^\254;!\351\"\274\333D2\275\216\0333\274\236\227\021\274\270\213\343;x\001\335<\2652;\275(^\007=\177\004\246\274\277\234v:\320\203\001=\346\334\242:\017\236\344\274:\220\241\273\256\355\024\273\233\177)=M\275z;\375\244\302\272\245\361\362\273\364+\363\273\337\021\025=\1776\330\274\314\010\234;`\302\324<\345\347\006;\016\315A\274\202$\223\275\020\256\257\272\214\373\301;N\213D\275\304/-\274\352v*=\030:\225\274\373\023>=s+\001\275VJJ\273\340$\311\274\242:\017=\370\363\347;\343k\346\274b\355N\274n\373\016\275\231\323\033=\3220g=\255\273\224\275\252\240\240<\312E\006;\273<\002\272\3548k<\276|\375<U&\206\274\300Q\252\274~\264\001=,M\000<\010\2003\275\365\333\312\274\002\313\316\274\202\326\344<\223\027\024\275H[\003=\2359\266<f,\025=\005\360j<\365\370\253\273dS\240\274\35639\275\370i\361\274\316\357\216:e\360\316;\017\177\002=$[G\274L\324\250\274k\272\021=\n9X<LC\016\274u\361}\274\003\023\026<\260\022D\274\003>4\275\365\306\273\274\226\244\265\274\020^\272\272\254-\352:\201\016|\274\245\264\371\274/A\272<\270:{<\004\345\244\274a\332`\274\363\371\350<%\333\375\274\374@\271\274\204,\327\273\3308\002\274\364U\217<\204]\205<d\304J<\330\020\216\274e\251\270\272\2071\350\274K~Z=2\211x\275\230\2478;\321\255\356\273\317\223\004\275\214c\244<\351\224r;\322K\037\275Yn\t\273\010\356b\272\230\372H<\374\353\245;\031\326\027=\203e\221\273\032\224\007<`\365\005\275\314.\317;F\033\243<\006\335\305\274\033\010\351<5\037Q=_\000\363\273\332\377\303\275g\370\272<\332u\244;[\266\203\275\000#)\273\246\212\272;\305\353\005\275\267\377\232:\374\320\320<\356\337\341;\231\334_=Eb$\275\026BN<\365\270~;8\022`<^|\203\273U\335\267\274P\010\265;\253\030\031\275\275\270\020;x\302\241<\277\311\222\274\201E\234\274R=H<-j\261\2730B\017\275\227\234p\275p\\\001=\355\345\016\273\2615}<\351\247(;\177\221\361\273K\267k\275A\277b=\372E\033=\005\203\005: \001\252\274\231\221/\275\214\373\314<\363\034\024\274\225\340\034\275\006\366\303\274\364\356\003=\030z\021\275\371\211\006\275\231@\240\274\305r\201\274L0\253\274CD|\275\376$\016=\355\353\235\273\327\035\027\2747\261\316<\270\220\370<\357>\217<\307Q\214:\314X\010\275\300\373\0326H\221\336\274\"\244\'<\357\004\315<Y0><\250\236&=\010\202\224=pa\270\275\224\312\246\273\257\025$\275\014\326\340\274f\3072\275\346\273R\274\321)\360;\336\004M;\254\203\234<\373\234\277;\311`m\274\244X\2106)i\262<W\347\026;Qa\223\274\267\341\356<\305\003{:\\\317\014\275\200\223\211<\017u1\274d\362 =v!1\273\004!g\274\261\031\243<2\240e:\377\205\226\274#sF\274\323u\366<l\263|\274\264\337H;\035\002Q<&\304\321\273g6\034\275\261B\324;\t\370\231\273G\201\377\273\020\264\322\274\"#\333\273\365!\213<6x[\272\003\347V\272\201\013\267<9-@\275!`?\272+\263\367\274q\254\033\275\244,\264\273\014\274\177\274@4\000=\231\263I<J$\024\275\226\027_\274\346\273\303<\240q\210=X.\020\274\216\t\267<\t\013;=\344z\360\274\'\010\025=a\250P<\2324\363\274g\343\222\273\020\377p<Tb\010\275\367\263%\275Ag\340\274\224\243+<\246[\2159$\323m;\274\326<=\302O\214=\246_G\274&@+\275\2628_\274\361\277k<L\305Z<H\261\324\274\0160\324<\376tD\274i\212\217\2749qw;z\374\000\275\377*\020\274\243x\351\272\025\263\345\274\246\351\361:f\311\310\273\263Hy\274\272>\216\274\310B\321\273w6\035;\223\177\246;\330|j<3\237g\274o\367C\274\342\004\177;\234\330\364\273\322\333\230\274F\"\344;\215Fj\274\344^\261<\002?\n\273)\335\355\274m\361\241=x l\274\330\007\304;b\355\271;g\211\277\274F\350\013=~\274\004<\330\337G<\376c\241\274\274\271$\274,\342\303:[*\344\273g\367\335<\260\245\247\274ve\245\273\\\373K<\3269b<\036\367\225\274\221\342\207<\215\225b\275\003\212-=\270\030W\274{_O=J\210\306;G\244\375<R\376\230\272\306\014 \274\345W\275<\275\240V=\333Z\204\274\331C\312\274\207i\014\274uF\273\274~P\255\274\231J\271\273]\272\273<\177\372\'<\344\343\246\274&\225\303\274}\371d<\245h\336<\213\346\324\274\364\255U\274\001_S\2741\2273\2754\321\205;0\306\0229r\357<\274\355\010\037\274\243P\207\274t\262\367\274\307\201\227<\0300\244\274\223\320u\274\232\024\016\275\023\200\'<\025\231I<\354\023\371\273<\177\205\2743X\334\274\323Y!:}\343\224\273\302^);\207\2648=\340X\'\274k`5\275\200\250\001<\360 \025\273\001\026-=\313^_\274\026R.\275e\212\273\273\024I\237<\202\365\355<\325!\242<yF4\275\336\243M;!UY<U\247\003\274,}\242;\270\365:<*\341\220\274}\252\306\273?\274\321\274l_J\274\203\316\n\275\337\315\362\273\017\337S;\340\"2\274N\250\031\274\210\275\030\2748\247a<\204\370\236<\027v\354\2736\264\376<\301-\261;\023\242\350<\312\233\242<)\317N;\342\0340<\357\226\271\274\003X\241<\257\241\357<g\272\276<\330\346\270\274B\003q\275\322v\230\273G\3600=j5\337<6\216>=\306\260,<5D\236;\203\225\217;\230\215O\2731@\245\274\222\225\201\274\017ha=J\226\t=\340\202o6P\224\231<\261\360\007<\220\202\231\275\356\277\013<f\206G<\372\241\316<2t>\274\340\226\007<Xa-\275\025\034\002\274m\301\257\274\032&\303;\311\261\023\275\004\363u\274@\323\236\273U\004P\273\312_U\274\373\361s\275VG\022==\005\017=\343\340+\275\271\201><\373\177\253;\260z\375<^h\215<\224\315\371<-R!9\025\362$<{\360V<\253i\205;\201\001\301\274\022\\\261<fYr\275\r\010M=\260Y\254\274\346kF<\232\340O=Hh\004=\343\007\330\2712K\032\274\232Qd\273R\r\223\273H\203\030\275[\242(=]d\322\273\212 \275<\'\371\322\274U\336\024\275\217i\205\273\t\272\335\274=\340\207=\232\024:=\205\032\212=MpS<\321\306\216<}Z\350\274\3772\331\274\333\376\275<)\274(\275]\333\037\274\206!\201\274\362\304\207=\340\025\203\274\013\202\315<\304\260\350\274n\350\330\274\245\232\270\273\317\227\344\274\376od<\344\"X=S\223\334<\025\312\342<\375\234\234\274\374\013B\2728K\023\273l1U<U\221\2369%)\260\2739H\233\274\224\302{<\271\343\376\273<\022\001<\206\220*\275\330\235\206\274\274\217\030=\271\"\246\274\252|W<\360\236\n=t\'N<\222\227\204=\247\024\006=\226^\204\274\337s\005\275\253\316\037=\270\200j\274\363\305a<\372\000=\272:F\026=*\227\355<bMI\274\005\366\302\274\0016?\275EMQ\274\326\244\034<2\005\273\274\276\214\027\274\201F\260\274\241\314\360\2744\2249\274\220\311\020=\342T\247\275p\300\326<\330\322\260\274#)$\2755\267|<!+\302<\365ji<\312\025\262<\340\377I\273\014M\224:\335\320\300<>\211\031;?\202\264\274\3209\017\274*\"\341\274\241\217\224<w\033\202<N~\263<\267\317\273\275\322\325(\274`\347B\275v?G<\036\334\333<\003\213\203\2725\334\236<\"\234\205=X\300\007\275{\306\017\273\236x\361;$+f\273\347\370\302<\\\021\235\274S\234O<d\376\236<\355[\246;\303e\217<\325*V\274\n\235\356\274\031\322\006=\376K\347;U\324\031<\001\274\315;\365\033u<\263\227f<lF\004\275\371R\360\274_\274\236;\220YE<\272\361\312\274\344\243w<\3515\206;\326\031\010<\233\374\017\275\337\243\347<\271\0169<\317\236\250\274>r\275<\033\245m:\005\327L\275\030zf\274\370\364\327<\260\021L\275J\337_<\245\002L\274K\010\214\274\037\236m\274\007\236\263\274\024\266\220\274\376\376\030\274~C\204<\271\204\330<:U\037\275\310\261\013<\255\216\002=\332\033\0079\tj\021;\371Ou;\261<%\274\305\031\230\274\316\321\250<\343\250J; \252\356<\017\302\216\274Qr\270;\274\221\214<y.\214;\212%\370\272\007!\266;\023\000\021=\351\025\355<\022\364W<mp\033\275\214\252\212==\006R\274@~\024=x\272\022\275\345\036.<g\230\376:![\302<\236\264\223=\005\273\030<l\347\':\021\216@<\021_\303\274\351.\313;\225\251\221\274a{\205\274m\372\233\273AP\234\274\270\204m<\263\333\247<\363(\353\273\021\027\272\273\231\340k\275\335\342\177\274N\021\322\273\003g@\274bw\277\2740\021\256;\255`\033\274\364\006\315;\002\332\261\274_\001\233<\346\025n\274\'\256\274\272Rq\245\272\367h\210;\224\343\207<\304\376\231\274Q\332@=/OQ\274\313\022`<\312\r]<\"\250\004\273\\\024\244\273\200\202\3519d\002\362\272Q\241\263\274\312\255\302\274g\337\345\272\372X\375<\020\342.=s5S<\307\007\'<?\373F\275/Z>\274\337\322\265<_\227\261\273\276(]\274\260\032G\275x\312\247;\234\376\025\275\271\256\227<N\326\202\274\335@\023\275\316\027\224\274\017\330\205\274\232\275p;\'\232r=\022\272\242\273!\255\340\274\310\213\222\273\313q\345\274x\350\224<o\'\217\274\374\030\232<\005\262\036<UU;<\335\323\023<\365\237\254\274\310\305\031\274\0247\313;\335\367\325\273\2225\316<\226\036\013\2741\323\023=\350\235?\275\360)\014\274\247\250\254<\033\306\036=\304Yg\275C\021@\275\34792\274]\3613\275<i\001\275\275\242;\274?\334\205\274a\325%<\t\323\341;\336\272\320\274\351l\307<P\\\006=-\263w<\004\252M=\267r\270\274\312\364\252\274\206\024\274\272\"\321\335\274\322~\360<3~\250<\261\016\027\273r\234\245\274*08\273Q\316\373<2\'\233<\214?\273;!\374\026<o\034\312<\224\214z\274c*\036\275U\374\215\273\034m\031\271z\351\373\274~\t\336\273Q\246 =\320<\211\274\257\245\r;M]g9\205)\223\274W\256\217<\024GW;5\033\220\273\252K\301\273\013/\021;\310\270~<\031\005\3759\366\330\246\273\222\255\276\272\n\247O\274\254\266\250;H\324\332<\220\311\031<j!\201\274r9\225\272\275\375)\274:5V<\361\362\254\273\257\226\254\274\255\252\244\274\315K\033=y\373H\274\372\303\251\274\362[3=\230\204s\273001\275\2377\271\274\356\270\r<\374\235,\275FS\022:.\247-<\203\233=\275\206)\224\274mXB\274\340\302\310<\267\332!\275x\351!=0\224-<7\234\227<?n\262;.m\030\275wb\315\272=\314\301<7\364\001\2742c9<\376|1<=\242\234;\212w\335<\314\313\003<\257>\007=]t\205\274\216\245\216\274\301\230m;\325#{\275bmh=\206+\223\273fM3=Mi.=\205+T\2755\214\207=R\276\344<7\203\346<x\326\355;\004h\036=\036N\200<k\211\213\274\340\271\324\273\034B_=\243\363<\274\230{\033\275\016\034E=8Z\245\2740\332\270\274%A\271\2740\016\240\274\3354\221<\023\r7\273\313\227W\2736\234\305\274\330\245\t\275\014\020\254:\232\005\373\272\367,\014<\237\366I\273Z\351==~\271\322\273\363\022\301\274\223\007\277\273\310\035\271<\002\3307\275!\220Y\274\022\t :\336\034\023\275(6c;Z\276\325:\347\277\035\275\0245=\275\035\276\233<k\245\r=\255\3355\275jC(\273_\230\020\275\242\n@=\236\032 =$?\001\272P\210\023:y\013\312<\224D\220<i\215\342\274\177l/\274\313\0166\275/\324\372;\263\355\331\274\241\343\375;\355\026\256:\230*:=e\366e<\221\032\345<R\337P\275\206\313i\275^\241A<JP\036\274,T \275_\277\253\274\312\274\211<\253@\024=\026?\335<\340\351\357<\252\340\221<\274\"\370\273\033A\362\274\312l,<\273~\031\275V9\247<\371\r\351<\335\253\027\274\006s3\273\035\2452\274\030\355L<\245\337W=\243\301h\274\371\223\300\273\342I\213<\300\026\347;\247\324\r\274<\323\321<lV\205\274g\277><\371\377D<st4=\374U\035\275\242C\005=A\014\221<?$\207\275\013\232?<\003\002\352<:\205\250<\346&\243<\361g\343;\212\'z\274*X\312\274\324=6:>9\314;\222\335\034=\374n\265\273C\205\262\274\na:=l\351\300<D=i<\2174.\274\354\232B\274\350#\331<\330\265\345;\303\037$\275\036\352|\274\244Wn\275V?\303;%\367\214<8L`\274\242=\300\274\t9{=\372\272\254<N\356\013\275\216\357)=\314^\304\273\212/\330\274\325\352\370\273\344\364\213<\221\331\r<\310)\374;\317\355\207\275\231\335\207\272\016\320\327\272]\234\200\274\023_\254<:]@=_\001\004\274C\226 =\262\225\211\274QL\275;\336\312\262\273\227\033\020\275\377\200\t\274\262I2<\016l\231\273\240%\307<N\254E<\3347\020\274L\364x<\222\026z< \2438<\231\326\274\274FO\005;\364\200\334;+k\315\274l\320\234</\264\260<\271s\275\274\"\214Z\274H\027\335<\003zT\274A\035z\274H\333!\275\306U\245<\020?\2019\343j\263<\244\2135<\327`\005\274M\345/<\207\220\n<[\251+=\347\367\215\274[L\21286\244\223\273\306E\300\273n+><M\247\312\274\037\035\210<a\371\374;9\241@=\246\336\243\274cf\312;n\241\034\274\225\026\221<\250f\014;\325\253\311\274\364\r\026\273n\006$<\302\t\363\272\030]6\275\010\311}\275\253\033\337<,\2247\273Y\272\203\274\342\371\210;\272\322\243\274\n&\266\274\326Y4\275\331\265\372\274\222f\366<\243>C\274\257)\207\274%\005\223<8Q\005\274mO\310\274\302\261\274\274\347\200#\275\r\252\343;\374=\271\271\315@\r\274\375I9\275\302\273b\273\232\324\001<D*\t\275\263N\340<\235\214(\274I\2502=\265<\357:\203\001.;Z`[=@\246\302<\271\212\233\275\017t\237\274\301\006|\274\260\241E\275l\023\247;\376St\274\032\372{\274VwY\274|\251\003<\261\345\036\274\022.$\275i]\351<\317\361\300;\035\266\014=\021G\025<\353`\267\274\272\332\331<\220\315\210\271\371\3670;\273\361\017;\324\251\213\2748\317\331<\326[\222<\014\314\354\273\235=\367\2743EY<\263\261\r=\252\346|<\252\323\203\274\017\237\037<m\274\016;\227\304\302\274\221\211\207\275x\t\204\274\353\0375=y\320\252\274\305n\256;r]n89\336\247\274\206fd=\273rL\274Q\312\211\274Vbr\274!\327x<7\206\341<\303\267b\274\245\334\331\274pY\364\274\331\2577=C\016\212=\377Q\277\2744[\206<Y1\004\275\000\246T<\341f\230<q\223\000;\001\2319<<\036,\274\355\225\n=\342\244\007\275f\201\363\274\024M\367\2741\346g:w\255H=5I\233\274\243h\034<\211\243\350<\232\034\202=\024\320\235;4)\344\274\375}\343\273\360\001\240\274<,\307\274\365\'C<F\003g\274\261a\364<\016<\315\274\366K?\274\3218\004=\322\277\240<^\014\306\273\304\r\223\274\002z\345<\232P\354\273\226\022+\275\313R\340\274~\266\301\273\364N&<\274\253\020=\211\214\203\274K\360\310\274*\233\231<b\351\217<\n\2250\275\375\341\241\274\303.\022=\221\336&\274\022\2326\275m;N\274`?\223\273\316\366\247<\220\261\005=\3064\341<\353\203\210\272T6\254\274\310.\241\274\321\0249=C\251Y\2758\022\024<\235\313\035<Y\225\315\2740\313\303;x\350\255\272\2319\343\274\350\322p<\236\220\347\273\240M\232:(\357\324;\020\235s=u!\223;\203<\266\2735\345\347\274\207@\007;Qm\365<\027\363\315\274\371\225\\:\2755\351<\364E\217\274\342<\265\275L\205\207<\363M\310<\0272\r\275\376\007\244;b\250\232;\277\005\032\275\207\002\027<\t\177i\271B\224\366<X\334\037=\'y\'\275\271\210\376<\214N\n=\213n\267;\377\227\242\274\250\313\213\274m\256K;\257\333 \274\3146\326;Wu\230<\241W\275\274\362\350\211\274\2007P<3\240\366\273\360\004\344\274\332\274L\275\342\034.=\3752\232\273\322\037\311\274C\014\253\274\206\354\210\273C\237\\\275\222\235#=\353\211$={`\027=n\217\256\274\330}M\275\244\3667<\r\235\016<U=\313\274\216:\246\2748*\223=\370i\017\275\246\336\375\274\244\203\235\274\271D\035\274-\342\035\275\273\276\013\275\2562\372<\321\247\024\274\235O\272<J.\221\272\334\224\377\273 L\027=\371\352\023<@\3055\275\321s\001\275\000\264\207\274T\266\234<\261\234\366\270\233\220K\273\247\364f=\2206N=\264_\262\275\231\001o\2741\031/\274\203\314\215\275\277\200\370\274-\242\223\274i\314\331\274\244$\362;\004\261\245\274\2564\373\271\247\307\336\273 \346p<\357m\005=\007\272\272\273\256\231\377\274H\237\034\273K\337K\274\'ua\275\274\323\354<\027\201o\274\2061(=\020\232\344:\264~\t\273\344H[<\261$\304\273B\033\254\272\253~\370:\237\357\020=\334C,;\017\023_;\354\377\274<V\211\026<I\024\001\275\356\310\271\273Yi\241\272\207\214\030\275\021\341\007\275\362Q\204\274\273\004q<\335d\377;\260\211\002=\r\005\007\274\242\360\237\275}\302G\273)\305%=\253xp;\272\021\347\273\304\014\024\275O)\224<U\320\031=\230\305\005<\337\253\335;2K\344\273Z\343\221=\326ZT<\256\331:\270\tCe:\264\321R<\010\021\031=\234\"\271<\300\370\312\274\213l\271\273\312\313\374<\225\306\270\274\252\314L\274\303\0141\275\n\363f=O\006\257<\006\237\344<\262\003\334<\274_W=\267|\254\273\301I\025\275e\263\311\273U\024\245<K\351p<\013\207\320\274\347\030\003=6\256w;k\017\301\273\343\\\030\274\355\214\337\274\352i\236<\311M\035\274\024I\034\275\212%\235\274\367\273\210\273\274N\363\273\307\303\2509\235\300\313;i\212\212;\205\324o;\310\030.9\343\337\300\274\337\303c\274\337\213S\273\274\360)\274\336\023\004\275z<\216;\253\325\311\271\353\003#=\022\027\273:R\256^\274\233\033_=\357\016\220\274\302\032\033\274\251N#<\305#\222\274\033\240\026=\343f\213\274\367j\333<\337\'\337\2742\221\030\271B\346\360\273\r!\207\273yj\251<\226u\320\274\2049\020<\031\352\001=\362:}<T\230\301;\003\374\247<\320\227/\275\207^\032=\037\323\373\274|\270\221<\254q3=\325\036\241<\210\270\317<\235\0148<\202{z;\013\364\310;l\003K\274R\271\374\274\215\317\235;\244\210\200\274\213\022\231\274\227\027$;\255\033r<\320\\\246<\212\366\314\274B\316\n\273\347\353\344;\016\324\237<\254\225j\272t\241\325\273V\365D<\302\260\367\274\206\365\332\273\313\206o\273\375\256\017\273\t\263!\274\334r\313;\0351\220\274\362\336\233<m\001\213;m\351\257\2722\260N\275R{\333:\2433\275<\034\374V\274\006\202S\275\363\264\217\274j\310\027=\335\306\340\271$}\300<3\360\025=\247n\240;\030\226w\275ejV\273\tn+\275\227\2315=\334\363+<\023}\024\275\272\323.\274\345\260\263\273\315\177\317<\354z\244<\362\277\323\274R\276><\304\216\246\272\030\234\263;\300\000l\273\305\352\350;B*\2138\026\346\214\273\343 \024\274\211\253\276<]\252\326\274\326\277\215:\3608\254\272\345\3622;d\000\326;\000&\227\274\004\230\202<\311S\032=\"6\371\274\322\352\232<\350\037_\273\362\364\017=\342\317\021\275\301\351\252\273\264{/<\006)\315\274t\022\264<\001\330\265:X\374\327\273\\z5\275\330\3568\275\347\246\336\274\347\204x=F\230\016=\235\204\022=\240w\227<#\234O=\250\030z\274\246\021\230<aB\312\273S\257\033\275\037\375\020=5\313\234<\221\337\323\274sM\024=u\223\266<\220\226g\275lP+<\"p\001=\237&\357\273\206;s8.\177J\274\032<\013\275\317\375d<\300c\301\273/\243\205\273;\2479\275 *\331\273x\037\332<\2172\220\274\034<\'\274p\002=\275T\027\357<CJ\230<\327\344\252\273\341\036\302<\353\363\323\274y\205\010=\014\365\021;\305B\330;\370|9<\310v\034\274\006\023\223<\005n2\273*;\"\275\276(s<\035\037\020\275X\031\020=\016f\211\274\375\352M:JD-=u\021^<\203X\314;\360\305\352\274\330\314\024\274\224\214C=\2054\016\275X\271\001\274\222\022\225\2759\321\215=\354$\177\274W~\030\275\200C{\275^\203\034\275\274\007O<\374\212\233\274a{\261=<H\010=0\324\336;+\307\354\274\205\313\013\274\325\240\314<\327\226\323\274\273\250\232<\366h\212;\305h\223=~\n\010=\377K\"=4J\256\274\360\335E\274$=}\274\271/\207\275\007q\350;\007\003\221=#\031u\274/(\304<\375[\252<\3725O\273\346\267\373<\0268\024= >\360<?\271\205<\251\252\022\274\226l\005=\305\215\037\275\247\223\036\274\375\377\033\275\037\002\371;h\325!=\366\222B\274\324\261T<*I\025<|\256\227<\240\343o=S\274Y<\233q\037\2759n\236\274\037\200\035\273X\030\020\275\335m\233\274\225\370)<\332\023\r<\"\3604<\351\177\231\274\005\225\t<x1l\275\035s\005<\343\007\'=\327\037>\275\346F\001\272\2535`\274O\324~\274\346OQ\275\203\333\023\274\222\234\024\275<\255\240<\010\031\330<PM\016\275.\336\271\274\343\323\217;\271V\036=2\2514<.\254\271\274\036\301\004<\222a\222<\306\001\262;`\265g<V\301\254<\300\266\231\274\253\333\014=\360\241\020\274r~\032\271;\314g\275\034\301V\274\376\207\313\274m\271\317<\023\265]<\360\033\036=\342\216Q=Q\222\320<r=\017\275\373c\177<%\020\221;d^8\274ggW<\022\274+\275\216\023\243;\026\367\021=\255\031a;\312\000\314\272g+a<\177,\221\274c\215$=\314Z\361;\'\367\367\273\223\264z\272\374\371\000=\270\243{<Z\031\010\274\323\251,\275<h\205\273\205\236\304<\002\313\250\274\031\313\270:\231l\025\275N\003\205;\362\007\026\275\242e\377<\326r\360<\216\344\365\274K\017\030=\360yb<\271*;\275MsT\273C\341\340<\261p+\275\r\373\236:\266}\000\275\2728\346;\225^\236\274V\030F<\354\032\333\274\200\300\'\275\352\245B\274\365W\016=@t\026\275vxR\274\366\271\301<\304U\227\274\222\227\2029\266m\3139\205\315\354;$x\222\274?\230S\273\020\273\020<\262\342\352\273Wo4<`c\216\274p\327\360<E\321e<\335\026\367\273\027\234U\273\2555%=\313x\206\274!v\217;\266!\213\275H\346\225=\341|\220\274\032wY=+\375\227\274\355\226\347<\224w\303\274E\333\036=\222\360\234=@V0<`\340\254\273\333\177~<\362I\016\275%|\263\271\025\367\313\274\2103\310\274\354\031\'\273\242\375\341\274\254\343\241<(\2630<Y\277\224\272\010m\023=\312[\344\274\345[Y\274k\366\364\274\302[&\274\2245\r\274\265+\200\274\315\006\013</\267\233;/\254Y\275\200j\007=\230\201\272\274P\337\"<\223\002]\273B\366\014=\231\376\245<\316\214\336\273\213\221\r=\205\205%;#\020=<\231\3349\274\006\021\307;;\240\r;U\007\2538\010\247\252\274\035\367\364\274\213\207\364\274\251\222\337:*\206\306<OX;=9\201\255<\3139\320\271M\336\t\275^r\230\273\377@\030=\351#\211\274\rb0\2744\261\177\275LB\214<\361O\362\274\323\000\004=\236\000\234\274\330\361\366\274" + } + } + } +} +node { + name: "conv2/weights/read" + op: "Identity" + input: "conv2/weights" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@conv2/weights" + } + } + } +} +node { + name: "conv2/Conv2D" + op: "Conv2D" + input: "norm1" + input: "conv2/weights/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "use_cudnn_on_gpu" + value { + b: true + } + } +} +node { + name: "conv2/biases" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 32 + } + } + tensor_content: "\177\324l9\030x\334\273\032*\3559\332C\016;\316q\032;,P\243\273V\341\2616\243\221r;\2220\200\273\204\337G:S\013\240:\177\276\2359A)\240\273\0227%\273\226\014\";t@\3359\304\032\323:\375\223,\273\3107\326:\365\325\034;?^\013:\224/K;\241E\n\2730\236\345\273\013\245~\272w\335\232:\256c\246;\241\374\341:\221\010\342\2724K\247:%\\\001<\303x(\273" + } + } + } +} +node { + name: "conv2/biases/read" + op: "Identity" + input: "conv2/biases" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@conv2/biases" + } + } + } +} +node { + name: "conv2/BiasAdd" + op: "BiasAdd" + input: "conv2/Conv2D" + input: "conv2/biases/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "conv2/conv2" + op: "Relu" + input: "conv2/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "pool2" + op: "AvgPool" + input: "conv2/conv2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } + attr { + key: "ksize" + value { + list { + i: 1 + i: 3 + i: 3 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 2 + i: 2 + i: 1 + } + } + } +} +node { + name: "norm2" + op: "LRN" + input: "pool2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "alpha" + value { + f: 1.6666666852e-05 + } + } + attr { + key: "beta" + value { + f: 0.75 + } + } + attr { + key: "bias" + value { + f: 1.0 + } + } + attr { + key: "depth_radius" + value { + i: 1 + } + } +} +node { + name: "conv3/weights" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 5 + } + dim { + size: 5 + } + dim { + size: 32 + } + dim { + size: 64 + } + } + tensor_content: "\0137`\274\213\001\336\274\032\207\360<\246P \275\367\013\010=bB\237<\026\032\267\274\342\246\216\273\370Q\343\274\3435\202=}\036\002\275<\304\033\275\246\330\203\2751\244\231< \336&<\004\210\3649^\337\273\273\006R\010=\345\215\"\275\303;%\274\034\364\324;\361\262\345\274}P\261\273f>\032<\014\221!\275\016\270\371\274\206b^<Q\233\200\275f\316\325\274\315\322\206=\260\341a\2743l\276<\3265\010;\215<\247\275\030\t\345<\374cH<\232\267\200<\243\214\235<}\006\200<\315A\320;\205\365\211\274\212\256{:\223\2530\275tGa;\242\004\002\273=\004g=H\326\177=\217\220\357\274\227Zq<<\270\370<\242\034`<1\r\324<M$\273<\207\266\225\275\264\362\n;\246\343\001<\300\244\024\274\320\263\373<\251T\003<\357\212\202= x\233\274q\310\003>\342B&<wI\316<N\2774\274\026c\242\274\324\324\211<\032\256\013<\030\264\267\274#iy\273e\017\n\275Rg\211;\310\216\240\275s\313\217\275\230\213\007\275\316\036\037<%]\026\275\340Sy<\231\246\214<\356\014\325<\367F\232;6A\227=:\325\027\270+s\243<a\356.\275\303\355\200\274\274\254\001=\234\006@\272\207^\374\274O\325\320<flk<\007\251m<w\023?\275\341w\247\274I\220\265<\000d\262<\365\364k<\000\341@=$#H<e\356J\274\212\251\271<8`)\274\206\003\034\274\260R\310<\371\225\232\274\225\037\320;Uc:\274\202\332i=B\305\250;\335)\307<,\023\205\274_\363\261<\225\362\272<\237\352m=7\325\004\274\0048E\274\3126\217:T\271\244;\376\215D;\014\355\232\273 \337\272\274~\374\231\274ac\324:\266O\214\274\325W~;\371\353\225=\364\373g\272\321n\375\274\376D\327\274\001v\300\274\252\373v<\372\304\223\274)x\353\274Y/\001=\343\355\243\275.\342J\274x\322C<L\rr:\300\231\332\274\240\242\266<\323\263\024=\202\310]\274j>\375<\305\334\'\272y\340\303\274\317\377c<\372\036\207<\323v\034\275w\212q<\r\227\236<O\201H\274E\313\032<\323ln<r\245\375\271\233\306\005\2753\312t=\020\tn<b\205\254;\305H\375\274\254m\360\273N*\352<\030`\227\273\373r\036\274\"\007\272<Y\206\007<\265\'(=\220\025H<\232\354\314;{\261\367<\302\275\031\273\226\225!\2744\332\307\272\247\366G\274\274\311[\274>\232\200;\002\243\377<\334~?\274nn\035=\233\3661\273HR\377<H\236F\273\235|\257<\327\276\274\274\034\371\246\274\225\202\006\275m\222\202\274\022\235\341\274w\241\224\274\006.a<\203sS=\244\370\270;\223\341\223\273\3735\315:\367>N;H\264\005:\222\236\227\274\237g\221\274\205\313K9MX\233=\234v\241;\317\254\017=;\270\"\275\340x\352\271B`W\274\313\276[\273&L\014=\233\323\"=\007/\203\274\004\231i=\341\214x\275\306\336\022=\321\031\017\274\207;\027<\034Q\243\274v\305\010<\240*\226\273Hb\214\274\360\351\n<\014\003\212\274\257\237]=3\372\304\274\266\264\356\274\354\177M<\244Q\213\275\332\273R<\321V\027=8*\006\274\226N~\275\203\271\232\274/t`<&\205\313<k\'\035\275y\320|=\253\201C<\312t\352<\353\027&\275_=\240\273l\002\201;\'\017c<\355h\240\273\356T\256<y\357\316;\253Tm;aJ\262<6\371\220\275\215NN\275J\302\013\275\346TR=\236N\270\274o\265\216<\273\002C\275\0330\354<\203u\223\273&\001)=:t\r\275\335,?=.F\277;c\021\034<?l\031\275C\302\021\274\254\266o:\321&\';\206+\371;\266\334\220\272O\246\313\274\022\254\342\274\313\r\360<\tXe\275V9N\275:\205\253<G\331\r=pt}<\363E\255\274\341\3604;\010b\252;H\363G\274\300\217\242\274M\r\"<^\333I\273e\372H\274&\220M=Om+\275\275&\334\275X\352\233\272H1\374\274\320\354\023<\307\333\242\274\273$\300\275&\260\272<)}D\274\230\266 \275\276\036\211\272\340\272J;\313\\p=i\350\223<\274\332\303<\204\245\000=s\'\026\275\2053e<\235\355\206\274\326\212*\275\265\364\r;\261\351\021=n\377\324<\233\004\n\274m\2302\274te\220<\241\202\202\274\252\303\326<\020lP\274\212\303\264\274\215\263\374\273?\024\271\274>\207\034\274\363\345\016<\207\234\267\274\000\306\261;\215\205\3579\265\321\002=\t\234\016=\353\300\303;\016\262\377;\026\013\305\273\010o\321<o\226\212\274\266\323\020\274,\211\034\274\262\375M<e\035\333;\007\314\252<\264s1:\207)\301\274\321{\\\275\0048\n<\003\016\244\274\336\003^<\017\207\021=\206\271\376<\356\361\003\275\216\255m=|\035\030<R\315C=\006\245\337<\001\254+=f\026X\273\245\014\253\273\273V\026\273p\307\017;\021\016=\275\\\276\261\274\235\223\034=\317P\224\274c\317\330;\'9\323;\004\277\374\273\016\302\314<\'\224\233<h\274\240\274BA\253<\211\274\013\275\010\346\021\275%\231\361\274\301\254 \275\32153\275_\270[:\362tV=\000\266[=\340q\200;K\305\315<\315,\005=\001\334M\274\363\3064\274T\262\367<\231!3\274v\315\317\273\260\305\010<\301u\022\2749\374Q\273\034\314\232\274\336\022\207\274\262\316%\273-\263\002\275\273<\340\274\235\243\230;\032g:<\035\227h<\177\301`\271\366\0062\275\337\270\261\274\217,\275<\276G\257<D\364\301<\321\202\307<e\307\240<\341?!\275\323\372$=S}8\274S\343\177\275\324\211\343<\272\263\374\274\036\245-<\235*\200\274R\220a=\256R\035=Qj\330<\361\247\031=Y<{;\314s\243\273\002\033\350;\365\311\035\274\324\267\264<\353\323\377\273\205\344l\273\361HJ\275\2413\302<$7\345\273\010F\314<\342\313\306<\210Q\240\274\t\224\326<\030\354\217\274X\032\243<\264\t\204\274\314\362\264<\271\021\277\274\311v\200<A\021\220<\230}\356\274\350\317r\274\312\206\212<I8]\275\215\356y\274y\333\276;>\372\302<\327\304\215<e\353\266\274\233\377\023\275\354\232\253=o\001\321\274\253S\023=\223\237i\274\2703\013\274?t9\274Y\037\247\273\n\201o\272+\001\214\274M)\025\275i\237\023=\032\234!\274z\373\205\274\331v\206\274\2777W;\221eE\274\177\364\310<\273\r%\274K\334\020\275\261\374\r=\317\317\224<\327\220\252\274\n\000V;\346\036\365<\021\271y\273\245<\277;\316\273\337\272\256\334&\274\213\352\036\275\234\272!=\353+<:>\033\244<\034\306\364;\354@\261\274\322\023>=0\211\254<\321)\034;p\363\202\274&\264\020;]\001L\274\216\002\234<\302\\K\2744\r\251;T8\230<\233\377\260\267\206\350^\273_\032@<\215\377\013\275\222\353l=\254\026M;\014x\276\274Gy\247<\300=\211=\rTP<\277M\031\275\036}\341\274E\327\254\273\254B\000\275\'\273\233<\201a\300;\330p5\273\003n\226<\212=\353\273\222u\016\273<\345@=\262\n0\272\363\244\032<\307\224a:\367c\026\274#\017\375;se,<\032\327+\273\316\235A\274\017=q\273\332\031\331:\364S\030=\315\254F<Um|=\3022\215<\347U\222\274\002\275\324;\016AP=\221\'\356\274\336\361\022\2758b)<\370+\261\274 \273$=\"\227\353;\316n\267\2735\332\211\274\013\314P=\273Z\305\274{q\032\275\360b\340\274\235\005a\275\317\366\213\274\324p\233\274\363\254]= \005/<|\210\316<\221i(=\336\"\231=\240\247I\274`\366\326<\357\016\312\274\234\367\372<\345(X<\253\344U<\031\005\242=\206%\225\274\341\326R=\254\332\"<\207\371\033\275\300\014\307<B\000,\274\377\244\"=Ud\200\271e\3307=c\376\327\274\342\300\246<\351\356\003\275\226\200\333\273\"%I\275+\273\212<1AY<\366\346\030=\244\375*\273dY\301<,;\003=\22422;\377mu\273\2333\347;\247\271\001\275\242/\001<\"\370\201<pcR\272\225L|<-X.<\276\3511=\247$\257\273\n\231[\274-\257\037<\375\243\351\273!\222\213<}\027\226\272\200\036\007;\256\245R;\252\241\r<\354\367\333:z\244\310;\332M?<\032Cv\275+\t\250\274S\326\313\274\322\360\265<\013\244\r<\230\222\r\275\006\300\301<\013\345\214\274\271.\270<F\313\017<\345bK\274\206A\271<d\207\031\274\030\343\263:\335\375@\275\032\246\006<\247l\034=\024\336\322\274IP\316\274\177W\006=TI\305<\3622j\275\372<\314\273B\211\251;@\217\357\273\035\226\351<CQ\317;?\331\370\274\004\261\201<\365\252\201=:\025\255<\245\261+=ne\r=\257oU\275-~\217\275zN\327\273\335\273\274<W\"\352;xj\262:x+\322:~\371\t=\200\336\037\275\320\314\355\273\211\200\336<I\210\222\274,!%<>\033W\274B\020\025<\211\304\241;(#\351;t\243\215\273>\207\342\274\243F\213<g\023\322<`d\376<\260\005\023=\227\274p\274\351i\210\275\n\2709\274\230\273F=\255\022\000=\016\361e\275\037\000\210\275n\223\242\274\310\322u\275\253\2309\275\232F$\275\310\344\330<\263\220$\274\031-P<\021u\006=\'9\361\274T%y=\346+\376<\',\273:\037\237;\274+\365k<\317\375\313<\306%I=\3435\215\275Q\235B\274\006\227\372\274\001m\203;)\311\334=JVg<\3618r:\274\'\"=p\201\357\273\202D\027=O\3616\275+\226\367\274\315\324\273<\036r\243<\254\026V\274\324\204v\275_\201\013\273\261\320\200<a$\030\274m$\240\274\363\234\001;E\330\3549\353\267*=IU\030;\241\350\262\274^C@<a\363\317;\310\0371\274\255\247\010=\265se\275U\302\003:\216\372\335\274\223\331\316;\237Y\236<\027K\326\275D\377l<\375\200\211=\260\211/\275\306Lf\275\311\220\340\272c\017k;\376YH\274\001\342\003=\232<?\275\227\243\025=~\014=\2752U\362<\277\r\214\274f\356\241<\262\307\035=\220i(\274\246U3=v$Q\274\3311\214\274Xl.<\274\354\222\275D\337\372<i\030*=\267\316H\275t\3338\274\367`q<rA\332\274\017\250k\275\304\207\022\275l \303;\235\331\034\275?\231\004\275r\237\312<\3730\001\275\214\350~\274\361\032\214\274\205\3669\274M`\365<\355\036;\275s\257d\275\271%\225<M\253\240\274\002I\207\275\377$\003\275\243\265\024\2753\006\302\274\206P\233<k\342(\2756\337o\274\335\273g<M\372\250\274M\226b<\332\343\313<$?\314<\370\030\313\274\303#h\275\031\034\207\274\275\227?\274]T\353\272\035\353\350\274\326\233\001\275\331\300\276\274d\351\216\273\373\006K\275n\321\320\274\026\317P\275\341m<\274\006\244\177\274q\232\273\274\315O\326<\'S\363\274\342\037\003\275Y\357i<\222V-=y\006@\273T\253\232\274\002l\3048?\333\255;\177=\227\272\025\000\016=\333TI=\230\030\216<\324\234\016<\002\033\001<o*\271\274\363\315\351\272n\2612=j\371%;Y\377\244\2742:\036=\243.\212\273\204\356B\274\005\267\032\275\277G*=\'u\"<[\'j\274\215=~<Y\002\027\273-\030~\275\3765\326\273\242V#\275/v/=`y\231=\025\254\206=\304\327\327<\202:\026=\036\'(\275#\207c<\372s\006=\310\250\312\273\333D\200=w\277\227<\356\006O\274\025(\254<\025\311\023=W\005><\356c\017\275\252\265I\274\030\334\201\2732\262\022<\323E8=A\177\211=\274\213+\274\361\227G\274#f\334\272\377\343\026\2753q\n\274W\303\035=0\277<\274\350\017\376\274\276-I\274\256\271g\274\210\324\017<K\323\261\274\036/\344\274\342r\235<\251y\210\273\026\267\221<\266\367\013=\254\333W<\"\250*:\243F[\274H\356\004\275+\363\032\275=\310\256<\353!\007<\373\346\031\275&\244?\275\254\014\254\274\312\036P\275x\335\274=\311\351\376;L\3779<\337\236\266\273\032Q\316<\213oI\275`\343><\261\023p<\256\254\265\274^b\274\274\243\307w<\264\361\007\275\025\210\r=\004h\313<\263\354\032;\013\202j=\233@T=no\241\274\024\3046=\224\375\310<L\345\021=\353\036\026=Z\310<<\222\257\021\275H\025\033\275\0319z<\325\344\003\274[\301\"=\211k5\274\361\271==t \241\273\004YA=\214W\357\272\033\005\243\274\327\335\357<\232\tz\273\340fX:\306\036\336\2749\302.\274F\306\235\274\t9\200\273\222\365\211\273\303+\027\275\265\366D<\014\306\007=\346\220\332\274$\261u<6\3160\2744\241\213<:[^\275c\330X\275\236u\026=\212`m\275=Q\367\274J\322*<\356u\327<4=E\275\n\217\201\273\341t\236<\356j\332\273\\\277\235\274,/\237\275\330\271Q\275\232K\027\273\337ev\273\231]\206\275\210\231\316:\364k\232\273%\2112=\340\265_\275\362a\245\274=\252A\274\021\217\322<\374\001K=~\374j\274\274f\330;\033\356\347\273Lw`\275\370\003\021\275\330\213b\274\304g4\274Gg\031\275\372,\202\274\212>\374:\365\331\237<\033\3357=\357\240\274<\344\316\223<\243\005r\273\\2\237<\334\035\337;\205\211\327;{\361,\274S\300\376;\221\273\031\274j}\337;\324\003M=d\016\253\275\037lu\274x\006^\275\370\335\301\273\241,\223<d\235\"<\214\362\362;\317\302\216\273v\303\274\274\034\364\204<\233\0347<\005\311\022=0\346r\274\373i\000=H 0\274,\362\242\273\263\003?=6k\257\274]k\212<\317\314\341<\222\313\321\2723\241\021;~\210\205\273\327\367\213\274\033F\235<E\257\216\273.\341\317:2\315\022=\352\345\214\273vZU\275\301{\024=\316W\225\274\223\203\031=\n\263\333;3vo=\243\307\251:u%\370\273\302GL=\007}?=\2573\207=\036\341\331<\340\302a\273\315\205\r\275\245\001\333\273\315\206\210\274s\264\334\274\3047\013;?\375E\274\3258\222<\006b\275:q\002\r\274\023\275\307;\235\203\342<)H\030\274\001W\306;\014\232\304<\013\311\003=bS\364\273r\220\242<F\330\210\275\242>s\275\320\256\006\273\343\2505=\343Hg\273>7&=>x\201<\201\243\003\2755\233K;U\323\245\274\323Tr\274\013s\274\274q\016\264\273\221_y\275\220\177\227\274hu/\275o\002\271</\006\032\275\000\032~<\217\203r\273\235B?=\334\361\276<\000\004\217;\371\227\271\275\236\022\2719g\202\033<\223Y\266<\325\214\262\274\235\236\314\275\017\n\246<\303\033o\274\206\002\000\273T-d\275\013\312c\275r\177S\274\266\034\013\275J\000\004\275]?\006\275&\250A\274\237z[<^\232\221<\233\331,<F\204\300\274te{\273\031\363\261=l_\267<n\3629;W\035\377;\302\277\302\274I\374H<\246c\322<.\223\211<I\325\226<\220\202\343;\340\275n\273\275\211\013\274vb\324;\314\365\024=\343\354Q=\221\222#=\342\351 \274\345\"\007\275\330\337\"=\027*\347<\335\210J\275\341\354I\275\n_y:9\312\335<\251\250\022=S\346\301\273\031~\272<W}j\274\275\216\323\274\336C\360<^\345\322<H9\022=0\276Z\267\250\343\022=)\r\204<\375\243J\275y&\265\273?l\251\274\374\204\227\274\340\3060\274\213\236\210\274\363\312\336<\227,n\274f\232\031\275\224\361\325<@t#=\203\0348\273\255\344Y\275&G\001\275\2012\221;\257\241\300<\3624\222;\003\347\207\275}\030~\275YLG\274\363(\343\274yq\203<T\326\021\273\364ZT\273!RZ\274D\264\202\275\242\205\206\273.\364\204<\215\231\250<\233\250\230\274\303\337\306\274\006\304\373\274\r\213\263\275\335\364e\274\360\375o\274\025\244\336\274\203\277\355\273\216\005\022\274\n\235\273\274\345{\307<\272\215\321<\222R\357\274Y\004\311<\227\342\255<\371=\202;O\342\324\274\261\000\010\275\3163\n=4\236\363;\004\262Q=f$Z\272BRK;\3004\\\274^\332\305\274\363>\213\273g\321\335\274\334\366\335;E\221-;<\210\374\273k\303J\273\'4e<\314\303V\274\215\264f\275^\0234\275\335\234\r<7\250\222\272\005\3157<\307\027\230\274\250\035\006\275\324=\002=\375\357O\275l\340\333\274=\202>\275\345\260\305\274\262\242\223\273k\322\331<\271\332\373\274,\255d\275\216\210<\274\244&\320<q\330o<\016\235\002\2751K>=\t\377\311\274\331\340|<k\322\205<\210\270\205=\214\2251=\234\374\363\274\026\020\232\274\025e\250\274E}(\274\352hL\273\266\312\252<\n\003\035\27367\027\274U\354\226\274T8\021\2741k\260\273\003\204\343<\344\337\004<de\255=Mv\\\274\253\271\351\274-w#=8Y!=\021\3764=\311\347,\274\037\001\263</c\372\273~P\004\274Qn~=L\022\r<\031\013\267:\3334\207=5\rt\273\243\265!\275\014;\'=Y\235\373<\230*\003\275\331@o\275\266\021\226<\352\3444;\346\031\240<\215\023\002\274\t\230_;\254\233\343\274\026\240\232;\365mD=@\306\001\273?QP\274\234P\340;\342<i;NL\365<o\335\270<\347)1\274\3717@\274\314I\344\274m\363y<\200\023\237\274g1E=\255\215m<\013\226\271<\032\231d<\220\304\341<G\303\252\273\210\302W<Z\003!\274m%-=\227\363\315\274\264&};#Am\274P\230\373<\\;\216;i\224\240\274\213\305b=\330\274\033\275=Z\007\274+\304\204=\334N\334<U\317\364\274\234c\010\275.\302\262\274\270\371\'<\312\032*<Vq!<=fV\275\"dj<\177\020\003;\272=n\274\256\3054=\201\211?\275\3145]=\357g\005=\2200\'\275\005\005\253\273IWH<\331\2635\2750\235\007<\212^\220\273\330\177\004\275\341k\222<v{6<\233\313\307\273\013\330\362\274\311mA\272\326\330\r=\212\343?;\'\237\037=j\340\020\273]\217c=\032R\005\274\250C\254<\206\234\331\274\333\177\346<\225\007\323\274hs\027\275}4\236;k\333s\274\364\372\201<\256\267\022=\224\336\267<}}.=\310)\240\274\037vc;&\023\320\274\360\216\366<\352\210\214<\263\301\210:\260\270\007\273\177(\333\274k}\204\273pP\"\275Mx5=\360\375\036\274g\235\335\274\340\227\232;\256\2718=\001\263\334\273\000\332\215\274W\024\371\274\271\337\240<\t\242\n\275\370\024\201=\312\334N\275\3603\264\273\230K\227=S\245\203<\352\024|<\3451d\274D\303\367\274\007y\262=x-\220;\265\312\264\274~\233>=\244I\324;\025m\363<\000\272\216\275Gx\376<\222\0278=\031}\251\274\216\275\321\274i\316\270<\335q\327<\367\241\334;r\254\'=ah#\275\300\371\257:\203\207)=\374Ym8\3421\267\274\2520\363\273\375\0269<>mV\274\346\r\241<\033-a\274\274\361S\275\304\005\035=\246\237\247\274Zuk=i\213\241\274\366\314P\274f\206\273\274a\347\021\275W\014\224\274#\350\001\274TI&\275NE\004\274\313\270\325<\271\374\"=\305?\343;\351\276v\273\005n\272<x[\216<\354r\200\274r\242\316\274\231w\317\273\230\351\371<p\036\227\271\3377\340\273^\356\212<\t\t\177<\250w\377;{;\235<\333\272r\274\336\230M\275\t2\005\275\3138\221<E\372$\274\tM\376\273\225\363\355<pO/\274\211\373\010=\374yk\274\367\254m;Y\204\021<%\000\325;\351S\326\274h\221B<\020\330\304\274|A\234<_\0061<?\007\330\274\177`\364<\340\215\367<\365\360\213\274R\337\006=4\375r\274\r\314\003;k+q\274\3524\321=nm\232\274\300\257\374\273\362P\003\273\213\301V\275z\022\220\274\313\322\262\274n\266\033\274\224\317\017\275b\320\007\275\204\234?\275o:\023\274\027j\244\274\335\371\374\274\347\275\274<\360Au\274\343\226i\274\016\032\331\274\303\327n\273T\304\256<u\312_\2754\235\363<\374F\220\274<\371\305<\207I\325;\002\207\270\274\312\311\220\274\211\020\343;{\004\213;+\375\005\275B\363\006\274\255C\305\273\223\234\215\274\252\327F\275\210]1\274n\'J<z\340\202<\377}/\275#un<0\265h=m\253D\275\2549\356\273\345\035\353<6?\210<\237\367U\275\341\020\304<\232\315r\273\216\2678<\037k\315\274\232\335\253<Z\237\375<iC\226;\037\270\330\274\314\244\373<\020\261\376\274\005u\344\274\360a\007\275\177\253\362<\261\362\307<\372\027`\274\003\035T\274<)\356\273z\236\002\275@\3338\275J\020\215\274\333\n5\273\273\246\010=_9\252\274\366\325\005\274\210\277\253\274[\342\352\270\245G\256:\314)\026\275\247{8<o\231\246<}\341u\274\320\254\365<\230\213|\274X\326N\273\351\200\035<+h\355<|\222+=\277\303\004<\344U\232=\241\363\212;\240\016(<\00406\275\257\373\035\275\214y\326\273\267\357\257\274d\224\372<j%Y\274\000~n<\307\372\302\273^\t\335<:\256\206=\32420=[I\240\273\236\333\301\273\206\307\204<\307G5\275\031\365\270<\352\264L\275\256\217\364\274\014\370\250<:\257\204\272e\272\022<\262g\201<\002\007\002\275H~\204;x\256\226\274\243\300\226\274\372&\007=\340\237S<-\032\025\274\023t\017=\264IT\274\346\024\243\274.\360\'\275J\365\260\274\177\311e=\213\340\022=\257\251\202=\313\211\276<\324%N\275\303E\324;=\212\032\275\271\026\032=\nh$<s\256\212\274\263\'\273\275r\257\226<R0\231<4]\"<Jb}<Y\222m\273\355\333\312;\232\256\034;\202bh\272Y!\374\274Q\370\264<\243j*=\001\342\246\273r+\007\275\r\000\262\274\346\316\207=\370\212,;\307\350\367<7&\254<+\006\310\274\030\030i=\245\035\346<aP\212\274\260\035\377<YH\250\274\352\264\222\274XY\261\273\314\005\347;\341\266\256\274\343\247\261\274\302\324\010\274\307\027/\274\374\252\252<l\013\006<\317\237b\275\314\334\271<\354q\013\275\324W\373\274\320\350\221=>G\245\274\304E\216\273k}\327\2733w\225\274\016\244=\273\271\217\001\275;\372\272< \342T\273q\262\372<\262m\347<\305\213\215\274\321\317\277<\314V\263:\340\032,=]\r\242\274W\021f\274Be\361<\323\253\261;\332\212\203\274\340\0323\275\340\355\225\273\006\304w\274\342\247?:\234Y~<\331\306\n\273(\237\021\275\227#\221<V\336A\273wNX<V\322\364\274\361]\224;]\225[=C\241\215\272\tVF<\363\211\247\274%\321\275\274\213\255\373<3\315\330<\340\030j\274?X2<\315\200%<Si\203<\303\002n\274\252<\301\274\2667\036\275\251\013l\2735ne<\003(\366;mq\252\272\n\014\266\273\231zG\274D\363\003\275\241\377s\275\315\n \275\214K\210\275\027U\324<b\350c\273\370\177\242;\340\320\332<\3724\373<~\333\020=i\326.\272I\037;\274\247P\362<}\367\247;\213\364\367<ja\027\275p{\025<\214\200\005=\330F\014=\2773\213=\300\2531\275\3639:\275x\035\250\274\341\237\305<[{\001\274\322\3221=\376i3<\030\203(=\013z\203<\201\374\330:m\324\013=\302\272\022=7\344\375<\037\352\311<\232\010\233<\255\303\363<z\307\226\274\212\243\264:15\273<\263\224\036\275o\337\234<H\007\301;\021?\333;\204\"\023\274g\261\300<\366\255\013=\030X\365\274!E\025\275)*\271\274\270\365\t\274J\245\221\274\n\303\270\273Z6\207<\273\265\n=PV\342\2739\345\215\275\350\265\033=\010\271n<r\222\r\275\024\261\024=Z\363s\272\244,\306\274\377.s=vS\002;\213\243\037\272\234>\261\273\266\017\355\2745VI<}\177L\273\313Cm\274\335\217$=\026\2517;\302H)\273\370\257\205\275\374\240A\274\333\014H=\353\020\254\274\262\226a\273\302\265>\274g\250]<\257=X=\370\355\226<R\233\324;~\200=:\177g\203;\353\332\321<\260\354\254;\206\317n\273k\220\376\273J;\255<\344\273y\275fw\371:%T\220=\027\227f\274?\337\233;\244RO\274O`F=,{n<;\273\251\273\273W\037\275\007\365/<{1\347\274\365\317o\274\027\250#\273\342\263i\274\226E8\274\233\272\014;\027\206\013\274u\327|=\234\341V<LWN=\332\357\263\274\204+\033:\0173L\273\363b\211\274}\335$\274\363\254*=\316s\262<7\217\201<9&\030\274\205\032\313\274#\217\351\274\031\355\027<\016\014\004<\351\267\036\274\221\250\"<\205#`<.\3557\273\321\"1\275\230.\222\275\332f\206\274\270%\222<V\273\316\274\210:\222\2747h\003\274^\304\260<\354\2002;\327\005\036\275\311\024:\274U\324z\273\346\237\320\274\022\350\251<\201\275T\274\375~C\275{\242\342;:\251\023=\237\201\017\275\"\r\205\274z\377\346<l\351 =\246\214\336;\001\tv\271\332\214\t\274\363T\t\274\n`\\\275\266\027!=\315\337!\275Y\027\214<\024_29\3656-;\023\344M\274\233\262k<\002S<\274ci\232\272m\320\037\275L\257\206;x.\214;\373s\210\274(\301\317<\340\307P\274\245\366==\026\370k;\030\243\335<\262-2\275\335\215\371\274A\335?\275}g\251;2\216R\272]\nW\273\231\326A=\265\236\027\274\314\201\327\274\021\301;\273kaf\275\3507\264\273\271\234\225;>\354\345\274b\277\017=w\277\"\273U\240\220\274\353\024\301\273\rF\344=\033^\020;\377\255\201\275*\331J=\024\027)=\301\341\t\275\372\036\337\274\000\260\034\275\251\022\370;\\Py\274T\032\376\274Ip\347\274>\336+\274\333\232y<\270\317\321\274D\237\"\274\331.\360\274\001\2254\275e>Z;6\233E<Q$h=qc\'=\257\323\210<A\274\235\272\303*X9z\025!<(\227F=\364U\214<1\325\200\275&D\217\273w\235\013=C\317d\275`\205\317;9\342\027\274b\205\\\274\\\036\344\274\220\tu=\371\214\204\275\330w\206<\351\201\036\274pjD\272\334C%<\013\260q\274\252\241\244\274\027:\272;\232\255\r=\031r\275\274t\t_\274\221\324j\275\236Q\260;\r_K\272\004|\221<AA\326\274\327\267\224<F|d;m\\\023\275\002\250\013\274\231\323\327\274\201\321\033;\271\202\033=\355(~<\244\264\031\273;\307\344\271\000\n\256;\361@\035\275J[\361\274I\014+\275\370\354\263\2747W\306\274\307\222\346\273\324\343\013\274\350\233\340<\277\023N<\036G?<\276\354t\274\266\314\357\274,\213v;`\200\235\274T\024\341\273{^5<\227\201\320;Q\313\371\2730\026H=\013s\371<\267x\000:\0025>=\'\035\321<)\364\352<\t\026\205<?\3157\273\200\246\214\273\\P\314;\353\263s\274A\031&\273)C\227<\342j\027=\231\227(\275I~\240:\351E\"<\027\346e<\332\272t\273\201\360?\274S\313\305\273\217\r,=\205\242\343\274\212\311\210\274\351o\276\274\320\354\357\273\205\253B\271\017)n=\363!\311\274v\2416\275\216{Q=\256\371\216<\360<\255\274n\267\237\274e,\370\2736\235\251;9\210Y<\261.\240\2747\021O=O\325\010<\355t\225<\244C\202<;h\341;\024Pz\273\325\377\311<\307\333\n=\242l<\274-:g\272\033))\275m\315\344;\277\265R=\252\236\346\274t\223\r<Tr\275\274\021\216\256\2748\226.\275\003\310[\275\252\177\n\274\331<\021=\207a*:\026D\216\274\231r\340\274$7\014\274\035\322\254\275\226\334\237\274X\226\000:\246\001\023\275\376\255\013\275\332\366\002\274e\336\020\274\351\025A<l\247\"\274\t\004\360:\\\266\267<\356\314\240;\363\3104\275`x\016\275\320]\334\274\375\221g\273\t\270Y<\241\3219=\366\356*=\223O\334<\310\204\266\274\277\224\035=l\330\374;\216ID<\312<V<\332\230\255;\336\201(=\375\253\006<\241\020\371\272$\360\267<\311L\203:\"\005\270\274\002\233\303\274\231b\333<\337\226\030=N\013\00395e\221\273:G\213\275\374v\270<\225\312%\275\334\232\233;\326\272\333<\305\013\220\274\233\372z\271,\346\2769\005\016\177\274\352_g\274K\323\027<\231\"\'9\351]\205\273<^[=\310U\243\275\352\310\264\273\223\034\306<_x\224<\267<\022\274\241n\t=\306\372\271\273\374\003\016<\032\300\240\274\331\002\250\275|\317\027\275,\007\005\274\333\342\202\274i\340\240\274U\313&\275$\000\034:m\311\361<\025\023\252\275}\251\177\274\346\031\354<\021\270\361;G\231\r\272\316\332\226\273\335\333S\275-?\245\275\241\253\204\273\320f\010\274\203\242\356\274\201\320o=\022DA<Ybt<\016\357v<J\302\352;Zi\260;:,\307\274-\310C\275\007\\\230\274\345=\006\275\317m\206\274\000_C=Q\344\215\273[#\226\272\333\357\234;\0167\006=\234\360h\274\257\303\272\272\2733\251<\033\237\355\274l\240\263<\317\014[\274\005@q\275\255u\023\275=\3417\275B\332\203<G\364B\275\265\006\333\274\233\203A\275\026\177+\274\222\230\331\273\216\215\212\274\023\352\375:\032\222w=\2246\031<\372\210\030\273\310\274\373;st\177\274Ly\000\274\311\253p\273\356\327 =cm\233\274\320\017|<o\336{\274\276\356\316<\260\233\275<\252\232\252<\253,\030\275\341\027\003\275k1\235\275\330J\277<\323\252\'<\244k\021=\307\013\203\274\274\340\316\272\022\212%=\335\303\266<48\177\274J~.\275\235\315-=i\311\273\274\263\272\375\273\325\243\210<\202\236S\275\231S\320<\275S\016=\344?\252<t\266\210<pY6\275g\235j\275\354\214L<g4\241<`\232\327\274\232a\220<\267\242\360\273b\231\036\275&\2148\274\344\315\252=u\243/<\017HO;W\021\215<\r\206\257<\007f\177;t\366\200;\005z\206\274OH\264\274!>\336\274\213\002\234<\023\035\327<$\035#\275\372T\037\275\323\2647;)\250+\275<d\000;i\320\225;*c\362<s\345\001=\350\032\346<\337\257\017\274\373\271\367\272\200\030\017\275\265\035\266;k\277\336;\3469\363\274H;X\275U\306\314\2747\035&=C\364\035\274\225\375\260\274\263s^=\341y\004=c?\214<2\203\252; Z\206=\206\317\257<C\'Q\273\037M\235\273<I\363\273\200PH:\342S;<\332cM<d\223\274<\022!/\274\017\204\334<:\267\257\274\360\227\263<&\007\241\272\263\261\363<?)!<\365\334\244<\366y4\274T\362\243<\312A\003=\336\336\004\275un \274\301\2509\2756\014\236\274\220)c\273\330\252M<\217\374\010=5gG;\375\275\033\273?\367\274\274\351\335_\274\334o\315\274\206\325\232\274\377z\320:\313\367+\275\200\250\261\273\320\346\013=\307?\211\275\301\351\010\275RS\272;\346\346\017<\247\315\215\274\221\217\003=\3212,=\316\010\036\273\2100\232\273:\300\235\273\022g\016\275\027\211\361;Z!\253\273Ko\034\273\014\362\272<f\263\353<\007\252L\274\245\004\330:\246\225\241:\335\274\342\274\347C\225\273\217\324\023=\227\274\242=\'\tU\274\345\001\270\273\3665W\274sO.=\223\2170=\026\367\347\273\177\362p;,\254\377<\205\277\352<\235\262\005<\336\313\247;\340>\223;QC\r\275\256X\003;2\301\323;\026\330l<\377\305\245\274\341\251(;\353H\006=\0221\330\272\232{{<\303\332#\272\257\232\237;\341\326\016\275\254\221<<\252\206\362\274q\304#\275\244\300T\274\326\264\271\273\310|\020\270\240\277\025<\345\244:<f\311\224=\022\302G<\030HB\275\263\010\213<\354\363-\274n!#\275H\025\017\275S\026\220<\354\275G;#\336\202\271\002.\n<w\373@<N\346\340\274RJ0=p\332\033\275t\220\310;\200\221N\274\370\215\227\270\177\316#<\237Im=\346~\361\272\036\242\257<\300\275h\2741I\000<\035B;\275\247\305\"\274pZ@\275\221y\367\274\002\2468\273\267g\341\275\272\226\023=0k\013=\340@e\274n\241\324<\304\214\370\274yW\226\274\300L?;Q\244\'\274\227\374\004\275#\364]\274\236c\250=\241%\356<\000\325\014\275E<\203<\333\"\231\274z\261K=5\"\010\275\227;1\275\357*\204\275s\346\\\275\242cT<\023\343\333<\374\237\"<F\363A\273|\254\205<\"z\310\274\177\275\316\275\355.8\275\334\336\003=*K\343\274cE\215\272\005\356\204\275~\234k\2741\277?\273\007\335\263<\326\250d\275A\214\222=\030\263O=\020\315\226;\301L\013\275\367\255\205\273\301\266+<b\240\332\273\373j\273<\275[\256\274\335\371R\274o\005S\275\324\342\036<\337\317>\274\344\317<\275%\240\325\274\372\272\266<\345\315\313:\002|\361;\276\300\206\271\250\357\342<-\267\267;p?(=\006\342\350<\362\364\325\273\335\rN\274\305K\271<7\254\236\274\352.v\275\251\n\211\2749\037\202<m\270\214<\371\215\000\275?\341\256\272$\372o<\030\257(\274\014\222\254:@><\275\037u\271\272\201\217\211\273I\340<<QX\037=.:H=\030ji\274,\325\353<I\034\007\275\376bX\2743$\207\274\202[2<\365\236\003=\005A\263\274\252\204c;v[q;g\321\237\274\220\336\206<c\356\313<\216S\357\274\347\270w\274\rq\317:\314+\031\273\256B#:e=\"\275U\363\301\272\350\232\026<\026\007\223<~<8<\327\261\301<\371\233\325;PD\334\274\221\027`<\222$]\2742\307\273\274s\035\017\274\277Ya=\037B(\275\t\034\365;\272\256\n\275\223<\266\273\001\277B\275\356?\226<\360H\013\275\036q*;\317\205\252<\370\334\220\273\r\245e\275\300]\225=\377\252\225<\332?\261<u~C<n\322M=\204\235\004;\2429\276\274\020M|<\206D\036:\024\030(\275ut\322;\255\211`<\257\260d\274\362\004\022<l\364\276<\215\317\230\274G\235Z\27464\024<\207\332\034\275\023\021X<\014\223\320\274 \351\355\274w\360\262\274\023T\236\273\204\273\205\274\346\3467\271\257\342&=\347\373\001\274\363\370\262\274V\332\266<\3300\312\272j\363q\274V=\370;`\201\361<#y\014\275\013b\230<\2754$\275\327`\211\274\261\316\020<\027\356c;e}\022\275[\235\307\272G\370\271\2747n\311\274\201\351%<E-\267<p\t\014=T\227\010\275\377\'\327\272\315e\277\273\302\262`=\235\tT=\024[O\274\300l\341=\203C(=\341\311,;\215\000\250<\0211\032\274\241\351\036\275\225\372\200\272\212\305\177\272\177-\336<-\214\202;U5z\272;\370\"=\276\037\370<D\010\366<\241F\263<\362E\274\274\305\230\025<UL\024\274\201:\017<\363\255\262;\210\226e\274\2126\013=\312Z`=\254\313C\274\257\313S<W\222\3619u\227\252:sE\307<\251R\006\275T80\274\215\240^<\243H\337\274\230~T\274\351#\225<\263\202\244\274\320\346\311\274\204\031\233<\225n\025=\300\002\367\273\0177a<\250\327\250\274)\340m\274~\300\3708\214\322\t<\335jQ\274\232\226\213=C1\271;\350\330\256<z\027\254\274\"\205\3708\334\306\263;B\342K<\231\255\002\273#\310m\275\236\000\t\275f@~<\234S\037=\367\027\207;h\367\000\273y\330r\274(\272U9\315\236\r=,\226E\274L\354\310;\033\267\277\274\r6k<\020\371\363;\353\242\256\273\";\033<\010\213\273\274\323\215f<\265\234P<\367\364\232\275\321\037\336;u\355\263<\037nM\274\206\212>=\033\342\037\274\364\331\033;\303]<<\350\246\261;w\260\"=\330\000\242\273\036F\331<\027\004\311\274\036\355\017=\230\340\222\274\013\034\361\273\256\335\023=\245\255\225\274P9\031\275\\ms\275A\315\244<\350\351g=\314\337_;-2\007\275A#p<\367\324\005=\340\311 \274\030\351\356\274\001l\326\274\255\010\247;=\'\347\274\204r\013;\001\361`=\260\204\304\274\020\203-<\323!-\273\367A#=\016 0=\255}\036;\266B\246\274\235?\310\273B\004\240\271\326O\026=\246\241\331<\275l\2329\020\317\336\273\320\300K\274\206\273\273<5\031\237<\233\247\027=7\277:8\225:\236</7\341\274\t\002\373:z\356\335<\226)2\274\201\215\230;;cM<\250\234\265\274\332-\272<\242\316a\274\215B\203<\301m\234;+L7=G\256\353<T<\200\274\226`\007\275|zS\275\300\370K<D\037\177\267l\3314=\217}\t\275\213f\022=F\263\304;As\240<\300)\036\275\r^\210=\371!\303;+\\b=\255\367\016\274\315\210\203;C\255\272=/Z\026\275\267\353\373<P\rx;\236\261\361<\330q\222<\3653\351<\321\'\022=0\354\351<p~\371;\373gH\274\315\270\255<\203\305r\275)\231l;\024\374]\273\026\360\353;\'iR<\333wB=y\201\273\274\366\334\0108I\221\373\273\372\010\201<c\302S<\360\373\241;h^\322\2731v\341<\274a\206:\n\003\273\273\314\325k=-eH<\200]&=Q\0264<\207\277\016\274\021\310\037=\t)q\274\234,\017<\024\356\310\274\2209\241<\252Dx\274\232\243$\275c\273B\273\233\0005\274\263\301I<\362\204\232;+6\035\275\034\350\267\274\256?\323<h\363\267\272?Z\217\274\333D\213<\363\374\344\274u\360\"\273\003\371\245<{\007\216\272\014\343\353<\332\217\232<\002\244\375;\366_Z\275}\010\200=\010\325\224<\007\262\245\273c\255\037\274\362\316\014=`m\t=- O\275X\324P<\025I\'\274\230\214k\273^\030<\274xJ\376\272\311.\244\274\034\237\364\2724q\274\271\034\343\270<:\326\234<\013\010\002=\246Q6\275\207\334]\275\312\256M<\031\364\352<b\351%<\230U\362\273\365\255\370<\230/$<\204\377\351\274\212\2507<1UH=\356\335|\273sJ\020<\002\231\342\274\332\331Q\272a\255\306;\270\t\3076o\020\335\273\264\343\002\275\241\312\364<L\031\314<\252,\263<\234\343\254\273t\346\225;\2014\347\273\373\031\266;\271\273\025=]D\200=\356?\\:\304\302\r\275w\264\334;\235\317~<u\327Q\275S\374\315\273,y\006<\023\224\026\275\003\347\207<\2053\217<\311rT\272\221^\326=\352nx<\277\311\r<Os\221\2735\2674\274\223\2147\274\326\337\241<\030@4\275\035\016\300;;\353\223;\"\177T;\344L`=\372\"\312\274\r1\236<\016U\374<\342\361\024\274YX\027<Qb\237\274\211\374\003\275\037\031/<hL{=C\222\302;@\230\357\274\307\265p<]ST=\206#\2529\315\311\006<\235\322\204\274\272MH\275\r\304m=\344\'\364\273\'\230\224<Er\253<T\177\016<0\226\010\274\230\362\000=\017\364\312\274\262ud<\277\216\213\274\222D\025=\334Xi<\202\035\301\275\'\210N<E\325\024=lm_\275\3263k\275\254\232\310\273/_\375<\027O\264\274\331\034\325\273\3407\233\274~mI\272\366\247`;ECq<}s\234\274Sz\010=\305i\230<\371\273\240\273\242\t\377<\001\003C\273\377y\221;i<\016<\3218N\275\036(\260\273#\254\272<\246#B\275\213p*\274\204\244\247<(\223\311\274\254JU\275\237\030\200\274N/\266<\337po\274\0147\324\274\243\247\346<\374!*\275\022\277\326\274\360\'\001\275\325\300b\273\354\303\"<\243G=\275\242\301\006\275\005A\373<\207\371\216\274\366\254v\2754;C\275\217\214\016\275C\246e\274]\213\355\272\264m\303\274\353\335\204\274\370\317\317;\322\263\202\274\317\347\010\274\226 \001<;.\"\275\351\034\007\274m\2013\275\021\251\247<\201\035\375\274EB\321\272\n\330\316\274%C\t\275\034J\020\275\264\365\243\273d\222\001\2751\007\253\274\031\216\244\274\276\007\341\274\361\250\311\273A\2102\275\317G\243<\350|\235\274\251\352\303;\003\007\347\274\362\020;=\261Sa\273C-\022\275\005\203\242\274\212t\255\274\214E\\;\344\364\306<\364\344^\274Q\370\266<\034\t\302\272\016e`\275l.\242\274\306-\345\274\034R7=\374|l<c\313a\274b\200\345\273\353\246\312\274\377\320\367\272\303\364j;n1\372<\271\300\252\274v\257\317<0\331\'\2737\223\177;\177\332@\274\016\355W<b\314\210\273=+I;Ot\017=\022&~<\252\177\306<\3338\020\275\260g\216\274I\353\372<\010\364\310<J\022\261\273(\314\302;!mm\274\276X\220<\346d\361\274\377\214\250<;\272\266\274?\323M\2749\325\306<\361G\242\274\034\033\016=\232\357\227<\204\236;=\206\370\225\274-3?\274\331\235\324<~\207\223\275p\222\234\273IO\001=\361\016\002:x1L\2742\221\032\275\252\216\321\273\2447\371\274M^@\274\036\342\243\273O>\317\273\3423\214\274\220\345\352<Ls\226;i\016\314\273\336S\241\274\226]\302;\240\252\220\275\257\027\201\274\246\210\275;\331\333\201\275\177\276\007\275$\000\207\274\007b\332<\203\242n\275\206\271\265\274\264\301\354\274\353\036\373;b\334Z\275\214\244\330;$\310l\274O!i\273\305\304<\274\030B\276;\210K\345\274\270\241\001=\307o\271\274\345j9=\356\300`;\250\236\206<\341Rk=\'i\232<R\245\200\274f\027\210<\363r\340:\350\212\264\2747u\004=\376\324W\274t:\004<pGv\274\'\013\223<\220\362\364\273_I0\273@\203\260<\326a\370\274\256\276\2149\335e\031<\007\256\021\274L\n\016\275\210*8=\256\250\007<Y\233\357\273\3428\265\274\000\3319\274\260\357\313\27431\264\273\2161\204\275\370\212\315\274m\264}<GW =X/\300\274\205\001==\000\271\301:!\307\020\275\022*\202\275\246\260\216\274\273P\204;g2K<\362-\203\274d\300o<Ib\363<\"\216.=\367\237\307\273\227\372\263<\256\257\310\274\222\017\037\274\000\363>\275Q\230\331\273j\267B;\023%\004;!\345\206\275\004\216\006<ZJ\310\273\322\346\304:d)O\274\n\250V\275+\363\274<Rf\277\274=\352H\274\237\321\302\274G;\021<}\353(<\233\335@\275b\246\027\275\313O\222:~\'\344\273jn\004\2752\305\200\273\240=X\274^\232\243\274lx\356<s;\035<\235\207\014<#ja\272*\271\305<\022;\276<\017\232\'<\324\211/\273\033\002\3549J~\234\274\267t\206\274\0069\272\274\0165d\275j\311(\275\203\253\206<\203D\230<\277\013\341<,\274\200\273\201\267\262\274\205\035\2629\211h\"<\006\334\206<+\303\355\273\236c\001=\\x\315\274\361\331\206;v\"\263\274\3370k\274\204\331\024=\307O\265\274\306P\231<{\251%<\220*\343<\001\311\345;LV\374<\336\325\007=7\357\005=\225\235\355\273\316\000\364\274\354\224\277<\314\245\216\274\375\353\310\274\270\305\'=n!\262<\216\202\347<O\006\221\272\350#\246=\212\000>\273\274\004z\2735\014\r=\037]\026=i\255\233=\212GC<\024\006B\273\342`#\275\204\037\371\273A\003*\274&g\365\273\3030\363\274\2761i\274\341\031\313\273\006\224\023\274\221\372M<4\372\236\274:H\363<\240\033\256\272\3243\205\274T\336\262<8L$=\261]\016\275\323/\335\273m\023\036\275\340\274\306\274toe< \343\255\2744Xy\2749\021\262\273\"h\204<\364\365\376\274\3175\204<}\177\213\274d\323\271<>\325`\274\312\300K\274\310\267j\274\243;\r\274=G\316\274^\372:=\2603!\275\204\375;;\036\010B\274\371\340\374<\010\220\323<\311\206\267:O\242\204\275L\361\014=5\255\351;\265%b\274\247\214\252\273\253LH\275d\240\213\274K\326y<\362~z\2740\205;\275\321b\260\274\262\006.<\371\267\246\274S\027\036\275\024\213\320\273T\037\201\274\332G\260\273$\315\212<\250\217\377;\037\001\252\274N\237O<\357\000\345<\000\020$\274\371\014K\274\247u\322<\265\246\216\273\240\200/;*\027#<\372\312\266<\234\374\274\273\342;\250<\023\223\006\275\347\022\220\274(/\224<\364\233\035=+\004\330<o\\\347;\r\270\373\273\017\001\027\275\232U\252<\213\340\347<\347\351\022\275\354*?\274\346\225_8\247/8\274\366j\004=\320n\325;\366x*<#\247\273<I\362\274\274\217\237\032<\263\273\306<\254\021V=\023\257\236;\234\034\212=\037\203\t<\227\353;\275\366}A<09Z\274\275\274\373\274\000\0326<\327\262C\274\263\263\235:\301\231\013\275{\\G\275\333[\006\275\350T\304\274\311\262\014\274%\000G\275\026R\223\275&f>\274\344f\305\274\203J\316\273\357\220\214\275\313TN\275dA\237\274\206\r@\275\335\265\224=O\332\006<\367\271\020\2742\267\340\274\277E\253\273.R\023=\201\247\025\274\3337\204;5}u=LH\322\274G;\251\273\322\222\004\275\206Y\302:\026\310$\273\275`m\273o1@\275\000=\343;\023\321U\274>?:=<?\310<;\257 \275\023\335#<\003\215\177=\0178\202<\233\306/\275D\276\221\275@\274?\273\223\315\010\275\330\233\327<.\001Y\274\223\265\001<\027\356\343;\331\020\363\274\n>\014\275\307`\215\274/f\027\274@\002\240\2741\374\332\274\362\016+\275\373Or\275\252\030\341\273\032\270E\275\t\324\026\275\206\025\246:\021@\272\274L\017\235<%&\313\271\263\017\034\275\367a\333<\204\021\007\275\374$\213;$\216\036\274\246\255\363\273mS\261<X{\034\273\360\341o<\331\227\007\274\346\272K\275N\3754<T`\271;f\247V\2753\352\375<\355&\320\273\314(\257<\r\366\206\274YTW=mv\023=\246S\244\274\313\202C\274\206F\017\274\207\223\212<\272\034\013<I\353\361\273+\217Q=\013\254\'\275\222\347+\272\204\274\001\275CJ\017<\254\241\272<\036\203\375;`\356\355<&I\202;\233u\344;\300\377\303<\260\013\316<$\3211<\260\314\304;\201\013\002<\356|h;\215\233\267<\036\346n=\245\346\244;5\374\224<\026\330\311<\246\2527\274\201g\021;\003\001[\274\3226\213<K,y\274\204|(\275\027\0305<!b=<\252\037d<\207\027\264\274&\232\026;\023\257\005\275\225\245\355;,}Z=\346\224n<\013\226\242\274\215\245\035\273l\346(:\354\251\367<\0101\031\274\265\346x\275\371\311\300\274\343Q\366\274v\364_<\374\357\232\274ZK\032=4\260\255<\265\\\241;\033\355\204\274F\314\234\2731\312l\274\256\367D<@\016#\273\260\357\263<z\036\267<\270\312\332;\356z\214\271\225\306\325<\013w\2769\272\004\321<LI\366;\036G\320\274}\031\203\273\252)s\271\235\365\220<\230\266\333;\222Q\233\274:\354\004\2750_\026<\206<\360;\267\354\273<\2411\031\275\243\017\365\273!\247 ;e\223\017\275?\215\030<-\300\241\274Eo\242=\271\226\023;\330\002\303\2745{#\273\223\201Q\272\241\241\204\275\000\016h<\330A\246;\0071\254\274\002\333\005\275\006@\010=[Yb:>\021_\274\0327\023<\251\001\300<\"\314h\274\000|\332\274}t\336\272s\244+=\271\005\214:\364z\306\274\022E\223\275\340h\270\274\320/\033;\341\302i\274\200\255\314<\323\356\374\274Ck\244\274\334\303\231\274\211\371\241\275\344\t/\272\001\363\014\275\353\365[\275\326\016\377\274\307?!\274\211T\377;\334\002\010=vC\t\275}+/\273\312j\247\274\376\t\036\275\235FB=F\207~\275O\221\377\274~\240\374\273x\334\034<\262\021\312\273F\373h\274\r\342O9\251\010\030\274\271<=\273\3577\r=\357FX<\340\251\262\274\022\260\270=l\204U;\377B\220<]\374\261\274\233-\034\275\"w\'=\370\273\325<,s\026\275\220\271\201=\265\230\025<\263\302C\274\035\306\300\274\337\031\231\275\255\213A\274\037O\022\275M^\366\274v\355|<:\300\362;h\036\325\274\214\254\253=\037\365\305\274\233_\207\273\335\356\210=\275y\025\275n\261\244\274\000\342+=?\"\234=\203\2462\274j%\277<4\331w\274\346\227\232\274Tj\246<K\233H=a\3661<7J\325\274H\006\246\274R\265\244\273\031\000\013\274\032\334\246;\036\274\360\274S\223r\273QI2\273\263\t\274<\360\264\014\275\220A#=v0\211\274\331.\004\274w\273\005=\277%\367\274\334\357 \275v|y\273\270\361\036\275\213\222\014=J%\207\274\260\230\212=\031\362h\274\207~\225\274i\034\002=\375U?\274\313\243\021\274\311\321\001\274\304\036W<\303\363\024\273.$\247\274\3425 ;\000\233\035\273\342H\211<{\213d\274$\177I\274\333\216:=5d\210<\267\006\331;\024\211\360;\214\210\032\275\231\177\210<\221\210\230; uA\274\tR\003=b3!\275\n\367S\274K9\236<oI\362\273\320\240\361<Z3\355\272Te\317<\235\2373\275\023\237\242\274\365\316\202<%\3722\275\342\233\232\274\334\353G\273u\207\\\274\360!\217\275\217\007\333\274p\274\025\274\3244\317\273\3661\261\274\246_\361\274\206\024\275;:\352\235\273X\362\335;QE/= \306\025\275\275\235\035=w\235\225\274\316\334q;\244\352W\275sX\352=C\t\313\273\265\344\020<U&\316\274\033:\344<N\377\237\273M\032\332\274\243\261W<c)`=-[K\274E\216\312\274\225\273\255\274:^\347\274U-~<sv\000\275%\204\335;h\346\345<;+\236\273N[?\274p\343\320;Y\334\252=<KW\274\307\021\010=\205@s\274\331\352\205<-\321\345\274\347\245\r\274\3103W;\320\376\233\273h\214F<\\\371\244\273\255\026l\275Y\371\345\274\"\212\377\273\245\341P={\001\005\274w\232\264\274\330\325w\274\364\036\010<\022\250.\275\002\201I\275\365\234\336<\355D\002<\332\203\352<\375\356\036\274&!T\274\033\351\311;\201\311Q<\211\031\347;v\242D\275\232f\372\273\225\356;=^`\203\274\323M\\=x\006\337\274\235\'\035\274\334\322]<dcv\272\321c\366;G\335Y=\333\304\365;\213\035S\274b!R\274Jz\003<b\347\"\275\204\3140\275\200\217\200\274\237\373\330\274\001\364\232;\001\\\356<Q4\233;\001\224\265<\000\006V=\250\3606=\311\203\252:{T\007\275\236\305/=?t#\275m\270\304<\203-~\275\331\360\n=\213\340\014=B\203\003\2736\r8\274Y+\336\274\245YF\275t\331\343\274\352\267\215<\020_\005\274\252\203Y\274\300\365\250;\237{\307\273\240\310F;\207h\r\275U\356A\274Z*\232<AJX\2752\212\255=\'\367,\274`\006\212=x\026\217<P\030\251\274\245\007\363\273\240x\213\274\205\245\220<c\240d=\3344\311\274\206\355\304\275u\231\315;\353K\336;\026\345i\274\301\311c;.\212d\273\253w\177\274\274\255k;\265\312\204\2742\2645\273c\003\233=\266o\313<\007M\350\273\240\n\017\275\200\257$<\032\322I\274\361\215\265<Q=\221\274\2204\257<\335V\316\274\003\212t=\244a ==\271?\274\032Gk;{\323\275\273\324\256A\274@*\006\274\004>^=[{\331\274\317\300z\275\224\266w<\232I\277:\274_\t<\243\214\203<\357\260\026\275\331\310\264\272d\312\013\275-\247h<TcG=\261\357\222\274\255\027X<r\230%\273\356\352\3159\202\207\277\274\250!\354\272YhP:\3149C<\005\211\345<\275\344t<\327S\245\272*\023\014\275\234\203\270\273\272\216\374<C\366\r\275}\262\006\275\323\374\222<\330\263#\275:\351o\274\353.\271\274_\252V\274\334\250\264\274 &6\274xD&<~\\\317;\247Q\272\274\356\323|<Ry\337\274\233\215\032=\303\201T\274\2613\017;\247;\035:\001\253i\274\236\315\313\273\335R\216\274z\357\357;\360\355\253<\215\232\231<\314\254_\274Gg2\274\017-\203<Dw\337<\215\377\331\274\33627\274C&\202\275\362\321\224;\213\306\362<\314\032\264;\033pf\274\1775}\274\260\300==\224\242\t\275\250\023/\275>\020\r\275%2\334\274^\300\214<\001\3627\274J\024\3429\327\320\377;\223e\326<!8Q<\221w\370\272\341\356\033\275\257\315\316<\232}g;\213*Y;\371\256\357\274\026\334\315\270\210\366\235<\220\377\206<\3672/=\t\211\351<*\344\006\275\'\255\002\275\355\213\250<\254\037>\274=\330\362<\360\316\210<(YS=y\323\221<B\302\002\274\251\336%=0O@=\200M\263\273\3050\224<F_G< N\233\274\334\3729\275(V\002=\035\271\247<tpJ\274\315\226\322<\351o\026<+7\316:\243\225\344\273\007.h<\241\364\256<iT\251\273\351:\020\275\363\225\001\273\274\350\037\275\252j\340\273Y\271f:sz\001=\367Q\337\2747\277\211\274\271\327\023\275\"\337*<\037\004\366;$\352$\275\224F\270\273\224>M\274\330QJ\275/\222w\273\270]\270<\331\034\363<\235@\003=\303\021\365:\325\003-\274(\203\371\273\303\001\034\274:\0314\275\323\306\027\274+\255\204;\200r\030\275\320B\347\274\232\352f\274\177\277#\272b08\273\313\363\225\274\257^\005\272\014\375\325\273h\231\005\275vY\221\2749\215\322\274Now\273\020\251h\272\013\376\273:K\223B\274M\0377=\263B\177<\371(\007\275\271Mv\273\343w\r=\254\2769\275\003\345\014\275\352\255\340\274\341\347e={P\315;\233\243>\274d\211U\274G\030\026<O\315\004\275\334:\n\274\353\236\247\273\r\352\0168.W\017<%i\n=2\017\222\273\035\"K<\021=\001=\341\007\275<\033\272j<\210\354\303\273j\004\317\274\306\253)\274*-\324< \004q=\360\005\n\272\345\357\232;\3349\236;\267\317_;*nO=\2205\207\273\320\206\271\274\352\2645;^\246{<\205hD<x5-\275\r\006P;\277(\036\275\247\224\225\273\354aI\274p\014\321\274\261\324N\274\327\305?\274;\006$=\320hX\274\205\3653\275?\027\341\272[\322\240\273R\324p<\035\332\362<c\247\311<\263C\245\274\203\265\333\273\262\024=<\303\2333\275\324\320(\274>\226\321\274\005o\003;7\334\342\274%\220\n=S\313\331:-\356\374\2747\007/\275P\347\002<\212I\371\273To\037<\372\323q<\377\0240\275\221\242s:\361Z\"<q\205\323;\226\037\214:\002\275o\2752a\327\274ih\232\274n\226\343\274x-G;\215n\242<6\n\214=\335w\357;(\321\214\274\257;\234\274\272\373\336;U\030\200\275\250\215\n\275\n\335\264<\000\032\267\274n\3245=\227\350\026\275F\323N\274.\236\320;\0221\351\275\203r\201<>\225\026==G\240\274\024\330?=\326EE<\352\223\375\273\335Z\356<\230\307\033=\242\354\322\273Jd\305\273\'\007 =\036[\032\275\372x\207\275\372\034\034\275\273\232\205={\tv\274\035a\002\274\033m)\275\340\032\225\274\337\235\343\2745=\251<\223g\002\275\213\263\014=\223J\220:\263\246I\274@`\004;F%\276<\321&i=\273w\235<V\"\202<t\2337<\217\344K\274\316\274\277<\030{\213=\027\032\254\273;\205\200\275\301\321\255\274|\020\005\274\255\016W\275\320\3615;T\361\375\274\024^x\273\374\024\273\274\230Oo=\005c\343\274\223\027\376\272\215rS\274\2408$\274\311>f<\266\351\243<q3\017\274\201\2161<tt:\274\275\257\212\273\306\224\273\274\020\201\025\274\341hV<\247\r\211\275@\253 <Y@1<_l\337\273-\003\\<\272\216\266\274\244\201q<IBP\274>\267\254\273\t\216v<\230{\355\272\376>%\274\260\345\013\274K\204\300<\227y\260\272\312\230\000\275\310]1\275\232\334%:\030?\207\274\320t\226\273mB\347\274L \324\273\255\301\'\272\300\313[\275\367I-\274\201\270\364\272u\320q=\216\315E=\253\346\022\274\340 m\275\314%\260\273\341\nN\274\216\305+=\323-\026=\336\224\300\274V\341\025=M\014}=~$\t<\210\200\273<\376\252v\274\360\345\230;\215\\\226<h\"\260\2722:\014\274\276\301\247<1\3255=|/2\275<\271\240\274\37374<\003\316\317<O\315\204\274\215\352\261\274E?C=\335\242y<\030^p\273\034\346\311\274\334\327\202<cO\030=\177S\222\273_\035n\274u\361\351\274IN\010\275\\i\357;:<\234;H8\213;\017\375\227\274\375M\305\273 F\254<B\n\254\272T\303V\275x \273<C\006\275<\310\224\353<iq\232\274(l\365\274\021\340Q<\007\343@\274\257Yw\274\241\227\307\274\221Eu;_\002\363\274\034\346\347\274@m\311\273N\220\372\274b$\270:\237\036\024\275\352\261\260;\232\222\271<\271\302\205\275xg\273\274\362\215\213=\337\350\242;o\353\216<\321z\013\275H\215\265<1\315\216\273\334g\256\274\026\030\214\274cb\005\275 \n\271\274cMG<\321\340e;\257-#<\266Y\271\274+\021M=B\033i<\"8\005\275\301\372\254\273b\367\261\274!\216\272\274x\374K<X\004\352\273\236\334D=\235i\025=\350\036:<\355\036\355\274\367\rK=\267*\022\274\013\241 <\270\206\200<\275\316\204:Q>\t\274\177\2615<S\274\250<B,m<X\034\313\273\2729\346\274\321\323)\274wcD<=(\251<4\211\222\274\010\031%\274\030\013\242\273\326-\344\274\377\243!\274\220\0149<\355z=;\256\216:<\027\000p\275OH\377\273\004\372G\275\350\261O;;\227\244=\3777\277\2740\252\305<\222\335\035=\347\025\212\275\3777\260\274M\217};\271\212\253<kE\026\275f\363\337<\330\025r;U\034\370\274X\350\013\274\034~Z\275\001mK\275\227,v:\320\312\020\2742\214\373\274r\270\243\273g`\005=ED\355<eS\247;\207\367F\274t\335\020=\357%\331;\304xI;\371\344\305;\207\373F\275\250\017\225\275\033\271\301\274\311\3072\275\003\364W\275\337UY=r{\037\273\231\223\003<9\"\204<\232\'\201\274\305\247X<u\023\301\274M\347\373\274\273(\214\273\024\301\323\274\310\217\230\274`\257\n\274ONo\273\251\205\277\273\026\331\277\274\217\320\324<6\207\355\274\341\251\265\274.\221\254<\346+W\273P\244\031<h\024\225\275\247\240\342\274}~!<\203\230\005=\2114\037<\325\202\345\274\361\211\257\274\346\330\216<\002(\r\275\023\026\277\274\336.\031\274\324\244i<\t\225H=\262^\216\273[a[\273\030t\304<\322^<<_p\251\274t\\C;n\336\203=\261N\205\273\231\027e=u\302\312<\346\364\n=\250\325\316<~9[=2\237\342\274$\227\\\275\250+W\275\026\242\301<\233Q<<\311\3162=~\003\305\274\016\351\273\274\326\255\375<\2163i<\3600o\274\214\025R\275^\010\357<\300\351<\274\263\335\215\2736\270A\274:\242\372\274g\016X\274u\'\264<\001)d\274\026\202\006=\363\230\203\275\345u\316\274k\177\220\274\001\200\244<\377\305\312\274\215\314\020;\014\rG<G\214\177\275\377/5984[\275,\235\016\273\302\252\3519\221.\200<}\314\023\273K\207\001\274\3223C\274\251\205A\275(\214\314\274\364\234\260\274\010K\244<\020\014\313<\254k\250<q.r\275H*?<\'Q(\2740TR;\276\364Y<Q\214\325;\332\241\021\275\257g\262\272\267\207@\274n\250i\275\313O\256\274\234\030\212<[\2443<\027\2213\275\243\375\217\275Q\253\212\2743\000/=\341\373\376;\335\363\024\275!\311\261=\266LG=3\361\220\274\306N\370<\207\020\351<!,\\<\350A\014\275<h\342\273(\342\n\274\273~\251;\377}\356\273~(\224<\027P\200<C\005\246\272\1779\023=\265\265 \275\335\233\220\273\345^\024\274~p\251<\224i\214;1\370\271\274\254(J\272J\341\373<4\303\277=H\026[\273\216\344\006\274%NQ\274P\273\272\274\326\000Z<\030\373b\273vQ\007<\240\316p:\201\021\336;\253O\316\274\260\343 \275\016)=\275\300%(\275\344\200\037;/kR\275\355>;\274\201\207\304\274d\323\250\274\373\275:\275\"^\273<\037yJ<\2743!\2741\267\022\274,\214\265<\312\223\220\274^H\307\274\366\206\237\274\243\244;=\253\014[;\022\361o\274\233b\301<sm\220=7\025\256<\271&\261;\354\265\3708\213x>\274\226\374\343\274\377p\200<s\276N<\005E\231<\255h\024<\364\312\257<\352J\021=\373\340\303\272\347\326\204<_|\207<e\2060\275\363\t\312<RQ^9\327M \273\335v\014\275\346ic\275\361\313\001\275\322C\227<i\244\021<1\0248;$(z\274\233M4\274\320\210s\273\033\022\212<\345\234\210\273\252\203B<#$\306;\337\324\231\274VA/\273G\203\001\273\225\207\323\273n\027\260<\373\377m<\336\244\031:Pf\201\274\207u\026;(\360\354\2746+\375\274\0374o\275na\357\273\240\224\225\275%a{\275L\231\223\274\023\304`<\006\342\212<\356v&\2750\317h;HH\237:f\300G\275)\212;<}R\363\274\034O\233\274\245\307\030\275}\266H\275\026P\230\274\261\372d=\2222\273\274\0066\204<\251v\201\274\304\037\244\275\016JD\27562\352\274I\332v\275\237\000=\274\216\264\317\274\276\320\000\276\35741=\341\314\277=\260F\205\275\033\037\202\274$\205\335<\002\211\240\274\n\263,\275\256\365\254\274\000\246\302\273X\223\'\275\351\t\204<3\211\322<\235\021\333\274Y{\241\273\353\351\245\274\347\253\216=\000h]\275l\001w\275\277\007g\275_\251\240\275\305s;;\324\n\026\274\006\317\"\274\3478\226<\265sn;\261\032\363\273\276\217\327\275\267\230\234\275\031\027Q\275\217\361\177\274\273\022\243\274y~\377\274\2643\377\274e\221e\272\303\355\237;\232\205\037\275\230\320\220=ow\314\274R\001\032<\262C\215\274#6\222;\256\270\255\274\0269\024\274\320\375\2249\226{1\275CG\275;\034z\017\275n\241/\275\226_\037\274\274T\276;\312\221z\2747\341\207<V\373\261\274\0360\231\274\364\017\200\270\246V\314;\242\347\321\272Y\356\035=\240\213\277<6\351\272\274\027,\021<\345\353\240\274B ^\273if6\275\265\327\004\275(\246\310\274T-\r;\212\226\223\274\013k\025\273o\0373\274\035\001~\275\306-\276\273n\245\341<\240\254k:\313\355^\273\253/\324;\245A\257;LE\021<\352N\021=\372\251\014\274qO\235\273\234\210D<\273\206\235\274_\257I\274\251!5=b\000/\275\207\213~\273\252\273s<\256[\370\274\020\232-;\\\272\267<P\312\013\275\236R\352\274\337l\333;\020\213\346<\353\245\003<\223\357\274<Ei\260\273\341\220\354<=:h<#\273\354\274B\336\230<\231\005\022=\002q\006;\037\306><!8\232\274?\n\006\2759\347$\275\204vl=$\245\016\275X\3627\274\353\220-\275v\261\'\275q\355b\275\271\341\231<9\255\363\274\323W\000<\267\244Z\274\232\311U\274\221\367\221\275\000\253;=@\"\361;\037\345\246<d\3613\274\tv\321<\262pF<y\256\224\274\342\'\264<(l\303<\326\254$\272\024E:\274\247r\031<\321Mv\273\202\354z;\005\230\365\273\207>\177\274{\320>\275\227H\376;\270\317\220<\260\215\275<\252B\355;\231\'\\<\364\303\334\274p-\302\273\014\222%\2742\016W\274\301j\210;\217\222\346\274\322\250\244\274\017\252\017=\310\230R\274\r4\207\274\363\n\230\274\213\265\017=\002\321\370\274\016\313\204\273\327E\203\274Q\022\241\273L\351\251<K\325\031\275!\214\022\275\215>^\273\345\331\027\273\002\026 \275v\'A=\237\203\037\274\366\032\024=9\312*\275\342\321\212<\370\2221\274:\3103=\340\245\2459CU\257\274\\I5=\014\350\311<\020\314\363<\013f\317\273\273\270=\273\366K\210;i\273\337\274#~\347\273e\025\230<}\350\311;\346\214\306\273L\215\322<\253\205\331\274sP\353;\217\301\365</\2376\275\327\361\314\2742\326Z<7h\223<\335\220T\273)\342\236\274\200\372\201=\017A_<<z\000\275H\027\253<$|\227\274\330\236\034\2729\231\324\270\361\234\347\274&*\271\274\372\246~=\267\313\225\274\200\031\212\274\304\200\356;\351\352V\274\253\372n;*\332%<+a\354<\\\242\005=\236!\304\274W\352Y\274\235@Y\274~\310\306\273\267BW<4\272j\273\177\r\223<H\316\341\273\207\221\324;}o\334\2741/D;B[\334<\360\363\226<\'\213\267\273\375hg\275\317f\264\274$y\205\274f\354\337;P&>\275L\373$<\267\\\313\274\254\242g;d\017\271<Mq3;\222\227K<$\245\235\274\357l*<\266w\014=\262\000\014\275\235T\263;\'Y\014\275\325\361\330\2744\253\224\274\266|b\275/]T=c\370~<\230\205\017;\2578 =\211\322:\275\000\320\363:PN\320<\340\246s\271U>\304<Z~\235;\323z\364\274t,\026\275\231\274+\274.\000\201\274]Pf<9\331\213;\271\345\001\275\3151\373\274\226\331\035\275\223Q\343<5\035\325\273\371+\241<}v-\275\345\225\242<\254\026$=\226*\207\273O\225\206\272\037k<\274\217\317\217<\2560j;*\311\305<\007\030:=>\017H\275DU\342:\274rP\274s\274\r=\244|\252<\363\276J\274z\031R\275\373\305C<\262\336$<\334\226-<;\352\320\274\341mz\273r=\\\275\177\034\307\274\367\240\020\273\2505\312;Pq\273;VNv<F:\225<\243c\022\275J\323\266\274\033\325\035\273Z\321\222\273\201\316M<\t1\006<\340\237\r\274\317\212\014\274\227\373D;\224\037\266;\024\213\302<\224\342!\274Y\375\244\274F\275>\274\2679\n\274%b>\275\337[\253<\260\276\005\274@\367_\273\224\337\003\275:\246\200<\356\333\256<O\347\n=\305\321\305\274\227\212\"=1A\031\2725\241\376<\223\261\316<\217hP\274-\231M<\355W5\275\321\273\317\271\250]\277<\305\356\327<y\207\272<Vjp<\260:\263<\217>\253;o\2012\274_d\220\2746<3\273\233&\t\274`YF;\241\226\020\273\270\022\214;\227\330\342<c\316;=\035\\\004\274\"\222R\274\n\"\212\274>xa<\222\273\305;\357W\305;\302\367\027:!EU<,\212R\275\005\373\240\273\255R\240;\"\323};I\303\231\273\347\357B\275R@N\274(\242*<;uF:\373\t\007\274\260G\202\275E{\320<\357}w\274W\311\217\275(]\362\274@\251\322\2742_o<\320\376\202=$4\203\274\355\277\234\274i\223\243<\367\330\014<\2748\306<\212\002\023<\214)\036\275Y\314\267\274\357\265\257<\371%\312\273\374i&<\206\t\241\273\017\247u;\274\370\034\274;\237g=F\2425\274\224\023Q\274\300\r\252<\351\013\005=#O^<\225%h;\202\005\234<\351\334\264\274\311\231\367<\376\257\316\274\215\376\237\274Wa\360\270:\231\355\274\257W\035\275Z\344\206<\002\0223<h\277;=UR\"<v\262\200\274\325\365\254<<\177\362;u\231O=\310\327Y\274\360t\372<\r\026}<T4\"\274\336\342\006==\275a=\232\337W<\215*\227\272\204\313\365\274\365\025\220;S\306\304;\274r\347<mX\317\274}x\215\274\346\337\027=6\035T<\370\352\222\274\231E0\2756\232\317;\205\345(<T\224\356;\017\351\236<\206u\035\2758\252/\273\352C\234<w\272\365<5\033V\274\234\335\004\275\375d3<\000\321\301\274+qg\275&v\007<\025\322\005=\327\330\256<R(\212=\311\371#\275XY\351<\352s\022\274\002}O\2759\327\270<\360\261\316\273\014\216X\275\305\3174=\326\023\002<\246\274\032\275V\306\327\273\005).\275\017\317\005\274\361\212\260<\331\375\222\274\350V+=k\r\215\273\345 \350\274[S\246<\374p\205<\r\027\334\274M\r\202\274\253\247\004=;\257=<\325\001\250\274\206RV=\224K\214\275\010\257+\275\242\307`=\356w\020\274<\315\n=w\261\237<\0321<\274\261\022\346\274[X>=3\304\000\274\244\202\340<Y\333\013\275\013\240\265\273\276\300\346;\013P\306=\024\004X\272T\336\326\273\333H\224\274\234\274\256\274\255\263\224:\336\211d;\026\233\350\274\016\247=\275q_\304;\206\360\301\274\236\271_=\265\037\232\274\225\277\212\272\002\353F<q\226\030\274\227\232\375:\201\031#<\333s\207;b\225\001=\007\317\r;\316G\200\275Z\\\006<\352UK\274>\036U\275h\330\013\274\032F\231<\027\365\246\274\273S\263\274\032\300\n=\321`\241<\203,\310\274\017v(\274$\320s<\312O\304\274\023\346\355\2747\240\220\275\364d\013;\363\rC\274\355\352\307\274\337\261\n<\344\245\345<\356\337\233\274\313\247k\275\n|\374\274\033\2761\274\357\230+\274\362@L:Q)\245\274[-=<\230\226r<\306\344T<M&\013<\366\250\027;\360^M\275\356\217o\274B\220\317\273\372\332\3109\332\200\016\275#\223\014\271.\004\344\274\306\244\033\275\372\373\267\274\303X\361\273>\340X<s\204\201\274q\177\271\273\310\231\272\275l\003\001\274F\003\366\273\305\233\023\274Z\375\272\274J\037\314<NL\206\275\375E%=\361\354\305\27420\001\275i\332\325\274X\300\256\274\314}\207\274\336%\241<\346p)\275\202@\364:\030s\013\275<\224\325<\372\3323\275\245:\304\274w\321\r=[c\371<I\224\277;\215\343\375;\203\376o\275(3\221<TK\321\274\210B\325:\034\346\014<S`\006=\307\371\246\274\204\317\235\273?\373\270\274\304\377\222\274\200\324r\275\213\316\020\275\244\027\265\274\314e*\274e\317\226\273p.1\275\026l\022\274\252\026\353\2736\201\357\273\346D\236\274\024\222\006\275\367u\210=\036~\022<\237\177r\275\021\327\236<\350\006\002\275v\327\207:%\256u\273mD\204\274\037\355\355\274\320[\307;\214ML=\220++\275B)q\273\265\013\024=\315\177u\2759H?\273\265\005\250;P(\225\274\360\344Q</\321\031\275b\304\003\274>\213\260\273v%\263;\351\342\035\273B\327\250\274e\311\273\274\341\212\020=WB\313\274\006\241\236\274o2j\275\271\204A\2746M\257\2752\036\001<_\375\360\273q\000w\275v_\323\274\220\314)\273?\215\001=\020\001+\275vo*\2754W\005\275\034\364\332\273O\261\367\274\032Z>\274\030\375J\274\205M\321\273\306\3345\275F@\262<?F\021\275-\250y<\241.\253<\302\003\332<\006\204i\275\333h\004=\344\324\022=U\264\021<\246RC\275\300Z;<\336\352U\275\312W\031\275\tfc;eqe<\204\235\032=\262-j\274A\263\254;yUz\274\256\232\262\274ZY\337:\220\322\030\274\367y\273;\031nI<\007YA\275\rC\342\274#\356;=#\213\276<\013~\217\273c\361{\273)\367:\274\345@#\275\224\020\0369qIt\275\277\362\210\274\033\'\004=\265\216\016=N\362\343\274Y\313\257<\001\356\245\272$t\222\275\\05\275\217\262(=\001\340\252\274%\274\265=\022\035\323\272\342\300\370;\020\321X\273\022\214\356=\'{\033:,\263\265<\t\304\267\273\316\260w\273N\307Y\274\004\007E\273\226P\260\273\320md;\313O\232\275\373@g\274\0265\001\275\016\240\226\274st\315\273\001)\350\274\265\310\036= \2262\275N\255\214\275\004!\361<\\-M<j\027G<\352\340G\274%\352e\274E\'\260<q\2658<C\260\010=\244\030\216\273\024\276\003\275\314\016\243\274\331K\311\274t\3343\271S-\003\274;\356\352<\314\323\237\275\362\315\265\273\017\211^<d\016\001;\220\t#<\225\236\215\274\322\323\277\274\210\366\300\274B\265R\275\312\2268\275|\004\244=\023\002b<r\231\217<\337v\241\274\300\017$<!\2422\272\247\233\322<a\037\r\275\322\221\235\274\005I\177\273!O\205;(\010\023=W+\316\274\266\342\177\274)\346\355;\027\3172<\2069\311<:\300\276\272\t\215\240<\226R(\274\371\231\340;6n\353<\036\335\267<\307\303\034\274\371<\034\2754\317J<G\271\360;\3773\226\273\024r\364<\314\t`<\006Y$<@b\311\274\2203f=\203\223\277:AX\221\274S\336\235;k\250C\2744\346\371<1\3404\275\302\320_;X}\367:1\363\216\274\365{+<`\346\\:[0\255\274\004\267\371\274\211e\002=p\225\243\274\267\301#=\365\371\032<4x\254<\312\036\314\274\360K\024\275\227}\231\273\3100r<\321P\022\275\350\363\273:\376\352&=\374\374\273<\242\312S<\2553G\275[\014_\274O\265#\275\200\350\020;\226\213\342\274\013\230\276<\362\337#\274\372\274\370;\371 \330<u\307\222\274\233\220\202=\243\226\004\274F\225\275<\312t\366<\317\221\370\274\231\302\217\274\316\036e\274\250\353\233<\233\335\n=\364\231\034<\004\2501\275\345T\t=0\035p\274m\335\376\274\321\'\210;\252\354\037<\305\3665\275P% =?\376\311\274\212\265]<\360\213\030;\266\345U<s\253\306\273\224k#\275\3643\020<\2543\240\274Vx\213\274\247\255G<\222\314\363<\224\360\207<o\255\200\273\323u(<\024\311+\275\334\227y\273[\275E<\365W\313<P\014W\271*\271I<k\225\242<\247O\376<\005s3<=\276]\275:\270\002\2745\035\316<\212y;;\231Xk\274i6T<\001g\334</@\275\274\017\262E<S\025\013<\331F\333\274a\371\213;\361(\351\273\033\25299\203-9=\363b\335<\037\242\301<\231(\221=\200\023\312\274w\326$\272\342\314@=\3452\327<\222\317\216;<y\010;\362G\\\274s\237;\275\317\334~\274\305G\363\273\036\325\275<\310P\216<\311v\264\274j\367\r\275$\n\330<\224\245\217\274?\t\230\275\240^\376\274\202x\312\274\036\276\"\275p\233\316\275\'3\201\274S\324\016\275\007\314,\275R\026L\2748\316\270\274H\0314\275\331@*\275\031\027c=\016\013\006=\006\215\033\274\n+\266\274\325\375\221<\214\226\033=v,\277\2747\025\224<\035\353\025=\347\'-\275\034\317+\274e\000\271<\020*\357<x\225T\275\235\202\201\273g\370~\274d\026\360\2722\265\302\272\014\300\024=x\225);\023\240d\275\003\t(<\314\311\016={y\\<\302W\005\2750\3034\274\327s\324\274\367\332\223\274\344\332\355\273\276R\0309j\215\023\274\2207\321;\225\355Z\275\275)\"\275\235\tg\273p*\014\275\243p\222\275\247\274\013\275\321\254\001\275y\211>\275\017E\225\272\300u\241=\005\257\006\274\000!\305<\351\221X\274\306&Z\273\2757\214\274\020]5\275\216\232\310< .\332\274\234&\026<\274k!=t\327\217<\246Q\024=kGm\274\306\364Z<\017c\350<\246\304\206\275\2136\323;[\344p\274$\230\233\275\200E3=\262\233y\274U?J<Q:\263\273\325\220\202<z\243\363:\330\346\331\2748&\r\275\033~\026\273I\244u\273\325l\235;lm\233\272P\324\325\274\344\323\356\274\3275\022=4\310R\275;\311h<\372\033\236<\357\'\017\275\214J\342\274\2714Q=az\016=\r\335\307:~~\232\274\364\036\375\274\352\230?\274R\257\001<\353\304\313<\r\235\254<\267+?<\n\334_<\\\252\002=I\343\211;+\202P\274s\\\310\273\262~+\275y\333A\273\342\003\353:%\211\372<\356\025b\273+\334L<\001\310\200\274\252\370\013\275!\213n\273\373\322\213<\021$\241\274\211\373x=\372\030\333<-\316h\274s\342\n;!+\300;\360=e<\363\322\323\274IK\001\275\321\262\016\272\335\361q\274c\203l<T\035+\274(\310\327<\215\236\216\274I9\265\272&\206\321\2741\302\270\274\017\310\016\275\312\235\'<\246-\365\274\244)\2328\245\337W<\257\247,\274b\005\020\274\025p\016=\202\273\223\274*\017h<\202\372\305;\253\346\341;\374\246\330\274d\311\031\275\277}\266;\314\312\262<\336\307\016\275\267M\002=(\355\230;\201x\212<\244om\273\361\206\237\2746\373\247\274\300\2052=\t\206\355\274<l\306;\014\306\312\273\371G\206=\260\240\243\274\375M\351\274bEg<\367\330\341\274X\231\237\275\277\031\257<F\333\336;\014\230\213\274&\204.\275w\272\027=\t\361F<}U\325;T\253f;\215\257\177;\036\301b\275s\356\373\274<\363\347\272\373Q\022\275T\330W<E\177\252\274\227\025;\275\337\'P=j\367*\274\266\277\010<)\214\326<W\205\211\275\255\255k\273\rW]\275\203MU\2752\337\251\274L\216\032\274\2642\006\275>\206#\274\022\202\260\274P\355*\274\213E\213\274k\257U;P\237\272;C}`;\333\034S\275w\372\020=\2661T\275\273\247p\274xS\323<\335\221\027\273\270a\235: \220\303\273\335\227\230<\271\242\371<\037\242c\274\205\337\251\273~Z?\273\350\250\277;Y\240\366\274\305\024M;\3628{\274\266\363_\274\216\271q\275\031\r\303\273\014r\372\274b\2150\275*\332\244=\027*\r\275|T\234\274\320B%<-\335i\272\233\215\026\274Ug%;\356+\013=\363\217\300\274\254<\222\273\006o\004<\374,+=og\271\274{\244\333\274\022\245\n=\361\336\235\274\320\222*\274\255\320\261=\233\327\210=\363\005\253\273c\323\273\275\307\2732\274\347\367\034<Qy\202<\032\010~<\277\254\214<\344\336\361\274\22431\275k\227\217<\366w\276\274D*\273<>\325U\274\263&\215<\372I\n\274\371\254U\274\206O\013\275\232\303+=\360\371\007\2741Z\217\274\020\337\275\274\020{\024\275\356\020?\275\203]m;\266\265c\275j})=V\330h\274\177\365\211=\004\222\233\274?\016e:b\202\201\274,,\222\273!\0375\274\200\'\002=\317i\365<\352/==\02539\273\336#\032;\3321I\274\0055\013\274\331V\345\274\030As\274M\337]=\007\330\033=\265H\326<\370\347\006;&\330|\274\343\302\242<H\337\361\272\016\253\031;\230Y\010\275\264H*\275pG8\274\356\203\313<\334|;;\212\240\345<M\314M<5T\330<+\"\366\274\213\t\253\274\373?F;\007\001\330\274|$\305\273m\241\016;\263Q4\274-\342\302<\323\260\207\274\'\250\311;\"\003\301\273\233Oa\275\204\034\240\274\0313\244\274\333Gb:H&?=\252\277\254<.\256&\274\016,\267<\341\342y\271\231\002\322\274\251m\000\275\307!\322=D\274\252\273X\210\370<\273\355\213\274\\\231\243<q\3368=\377\005\214\275\301r\307\274d\320U;\376nk\274\032\006\014<\330.i\273e\355,\275\001\3056=\345\266\201<\267]\031\274!\310!=1\362\216\273\231\000\300\274ad\207\272\214\316\213=\274u(\275\n\347\"\274:\360\275\274\242t8=pN\206\274\211E<\274@9\343\274&\223\205\275\232\260\\<\315\354?\274\352S\242\274\323\374\324<\204\345\177<\250\252Z=z4o\274\246\265+;\"\212\037\275)t\t<\352^\224\274\'\256k\273J\0252<\323 \306;\327\034G=\331\033\255\274\326\237\225\274b\374\316\272\230\346\212<}2\3549t\321\201\275!\352&\275\330\214\014<\nV\253<\243\206\227=\026\033\020<\375\253\304\273\327\374B<\320\341\210\274(\233\301;\213T@=5\273\316\274\237\003\361\274\204F\177\274\330P\223\270\2451\273\274\215\220G\275\256\025\334\274\251\307\276\274\263\273\275\273\026\222\020\270r\263v8\315Gi;yG\355\273\355t9=\317\261\342\273m\232\252\274b\300M=\030/\226\274\347T\265<\034V\035\275\327\0068=yk\246<\240\202\2039\326\006+<:\350`\2757\200\240\274s\227>\274\317j\020=\026\372\265\274\370.|\2741\\\371\274\037?\033<e\353\250<\3141\n\275!\327\204\273\234a\310<r\2564\2759\'Z=\2724\003\275\215\261\000=\366\214k\274\313\026}<\222\005\206\274W\211\307\273\365[c<S\032\331=\215\307\312\274\203&\236\275 \203\\\273\250\330\2649\006\264\026\275\321D\257;|\264B\273\277\275i;)\350\350\274\267\245\252<\223\203\202<\2767\032<\311\210\242\274Yk\213\274@\206\215\274\361\313\226<\322c\221\274\371\232\366\274\364\377)\275CDU\2744\217r<\244s\221\272\302V\022<\324\222)\274\226\352\263\274\245\332H;y\313\267<\232\237H\275\000\211\230:9\213\024\275\367\354\301\275V\023\365\274A\256\304:|\021\3759r\375\333\274\211\271\324\274\324\254Z\275\001\272\215;\362\360\351<$^}\270K\333\273\274\377#[=m\010\240\274\014\275&=,\227\225\274\373\005l<\252\365\000\275\222\373\006=<\245C<sX\234\274M\223\246\274b,9\275\234y\272:%\337\256;\017\342\321\274y\355\032\275\355\341\004<A\211\343\274K\316\t\274\347\265\035\273\336\'\257\273r\024P\274\230E\275\273\224\216L\274\244\t%:\023\226\250\274\004\324~\274\306\245\330\274\345\204\322<3\305I;\367\306\370\271*\367\205;&\226\350\274\302Q\231\2724\252A<jr\353<7\346m<*\237\034<g\025.\274\026\260\244\274\370v\301\274P\237\303<\370\347\004;\273y\326<Y\312G\275H\220~<i\332\002=\244 \031\273Rx\342\273\224\233\276\274\312p\356\270\177\376\305\274\306\274\231\273\274\262\227\274d\026;=M\022\211\274W\033I;P\006d;o&H\2741#\212<\334xA\273V\255\250\273ipe\274<8\211=U#\357\273K\276\225\274U\305\307\274\336\036\006\275 \264-;\"0\004\272\000\346\260<\313\227}=._\260\273|Y\301\274\216\013+\274Xq\251\274\022\016\323<\033\243\253< l:=^F-\2747\233);\230\274\213=\374r\212=\250M\277\274\322\364m<\212\363\207<\355(\377\274w\253\206\275\362;\017=L\324\221<\300k3:w\206\006=\311\351\343<\202\343\337\273\252\342\014\274\010\"\334:T\315\333;z\271\231<w\001\321;\3520\013=s\223\001\275e\327\340\273\315\320\020\274\356\275r:\313A\3079\265\336\n\274v\373\345\274\322\0169\272\001\010\021<\254\273\325\274H\314U<Z\032\212\273f\322p\275IS\024\275t\3132\270P!\357<\343C\345<\022\202A\274X\275r\273\320n\261\273\251\214q:\322\274\262\274\363i\275\274c\025n\2735\216\347\273dM\273\274\345\351\202\275\335\202\207\273\375o2\273|\367Q\274\367M\200;\314^\354\274\347\022?\273\255\250J\274\010Yq\272\347\257\266\274\217w\230\274\215\230\210;\351!\215\274\006\004\224< ek\274\017\357\314\274\245Ul\273\325\246\252<\260U\343\274\352\215#\273\372\366\325\274\026\033(=i[W<f\031P\274/\026*\274\240e5<YU%\275z\245\'\274\277:\340\272Q\241\232;vV\362:(\234\250<\252i\246\273\230\360\372\274Gx\231<\006\366\002=\261\231<=\375\326\021=w\026\274\273\316#\233\273\227\251\007=E\2530=E\300*\274f\235\201\2740h\204\275C\253\256\273\366\\\033=\210P\274\274TR\201\275\232\341\031<\224\313\233\274C\232*<n5\321\274\306b\020\274$L>\273\221Z\370<83\206\275\344\017\010\275\036\377\220</\351\365\273\025\262\373<[\261r\273J\204\004\275R\221\251\274b\242\372;$5\276;-\371$=\230\217\264;\357\254\001\2758@\331\274\\|w\274\027\330\353\274\334K\315:\256\201x\275\346m+\275\002$|:\346)/=$\007E<\273\253\024\2759\013\002\275\335\321\003\275\303SE\275_2*;\3068\226;\342\343\324\271\2042\217\274`\236_;y\306\212\272E\r2\274\037\350Z\275|8\'\275\273H\375\274I\234\322\274\230\325\225\273\017y\324<\344\204D=\251_\301;Rs\200\275\323\037t<T\315\213\274W\277+\275\221\221}<PI\223\273p\220+\274\365]\227<\340\272\204\275\266+\r;g\007\243<\212\343\021\275H,\261<\262\3744;6\351\t\275\310\315+<&\375\310\273\200\305\317\274\232O\355<\234\342c\275\270\201\266;t@\203:0iU<\204)X\275,\370\017\275\207d\002=\326\243.=\377\230\000\274\304.\302\274\216\026k\274xa\241;\021P\211:\210o\021\275\327\376\032\275YN\374<\320\247\331;\337\034\242\275%,\275\273\320\272&=l\277\305<\031\252\341<\014$\226<\212*(\275\002\264\031<5\351\252\272\031\213/=\002\301)\274\330\002p\275u\333\017=\272\033\375\2746<8\274\261\232y<\220y\251\275\002\345\377\273,\275\250\274k\322\007=\261\277L=\246/\342\274I\275><\370Z\200<?V\305\273\315\036L\274\320w\343;S\321\034<\301\005\024\275\314\036\241\274\001\245\033\273/\311\213\274W\'\237\274\361\025\252\275\234A\026\274\273\033\311<G\022\013\274\341\263T\274\323D\030<\352\203|\274VrU<\302\376\343\273P6n<\202j4\275\010\372(\274\325\244\202\273qZ\274\273Ic\023\275\365\177N\275\355\342\013\275\213\333\204\274\004\3560\275\252\334\013<\315Z\267\274`YQ\275(\202\260<\007Dm\275\222\262B\274\342\3720=p\240H=O\"\271<\356\212~\274\320\027\346\273\2403\306\274{\371\006\274\300=r<\324T#:o\020\\\274\341\360m\274\333\032!=\362\2631<\007\006\006<Oz7<Iz\021=\t\022\334\2724\354\206\274\341\206\022\275e\214\200<\311\343\010=\36413<\'\233\320\274\305\206\321<\005\303h<Oh\234\274\315\373\220\274k\247-=\216\311\316\274\330\177\013\274\'\330\346\274,\374\215=(\031o8\r\365\325\273\224\360\t;C\255\255\274@\014\254\274\006\017\373\274\351\366\0279=\354\254<g\220\302\274\322\356\272\274\257\366\371\2738\237\340<ib\262\275\240C`\274\312\353L<\315\377\023;\253\253n\275\032\306\244\274\377\022\000=\300\2428\274`q\200\275d\357\006\274\340\026\005\274\255B\244\274~\246\360\274\r\n\332;e\325 \275\251o\205\273\023\330\215\273\252\264\300<\035\023\312<a>\212\275\003P=<m\267J=eI\214\274?!\346<{K\207\273\205D*=\335&6<\250\340\274\274X\347\203\273qx\000\275>\250\r=\310\244\272<Cp|\274\205,\037\274!T\031\2759\236X=\321\336\221<\352t\247\274\312h\004=\367\375\356\273\251g\322\274\335\302x\274\325\201\254\274A\344\352<F\037\n\274\362\313u\27489%\275\234<\251<9?\337\274\246\263\270\273\022\022t;\273\006\030\275\224\0362\275\205;/<\320>9=P\3575<\034-\020\274f\233\004\275\364\323\327\274\234\325\263\274\310u\341<U\013-\275\323+\333\273? \276<|\305\220\274\235\251\247=\223\331\027=Y~\317\274\255\244f;\'A\'\275p\246=;C3\240\273?i\200<\204\362\232=\005*\277\274\231MJ\274\343\321\214<]\300\013=\270\234m\275\205/D<U\311\240<\014\330\307\275\266\266=:\323VJ=J!\177<ZJ\236;\264\256\220<\002\013\025\275\n\007\224<y\2176\2731\225&\274\344\342\t;\223\333\307;\247!\336\273}Lt=\237T\247\274U\270\231;\355\001\220\274ru9<a\274\372<0){\275z\300\020\275\360\314 \275^\254T\275\0147\275\273y\260b<R\227D<\235\036\026\274\3372d<O\237n\273\320\024<=\016S\330\274\034\324{<\365\334d<\t\002\254\274\335\200\245\274(\343\235\274\000\312\021\274\223{\332\274`\225#\275\354\305U=9\261\033\275\'9\"\275g\264e\272\377]\341;X\004\324\273\336\353{\275\250\177\337\2745{ =\374\241\216=\276\202\024<\177a><<\210\243\274\311q\233=\203^\274\274\327\314D\274\312\314C\274h\213?\273m\273T;5p\223;\270C\374\271\366\235\241<\177\272 =\260\025\207\274\266\216\267\274\234\323\037=T_c:\335\277g=\310\211\025=\r\257\336\274rf\236=\247\241<=\220\nB\274\244\005$\275\016\025j\273\356\336\007=]\306(\274K\035\361<\211\031^<[\2606\274=\274-<\013\236<:\n,R\275\030\222\"\275\326\373/<\177`\036=3\222N;k3\014\273\263\026\305\274\036w\247\274\363k\024<B\025g\274\013\313\022=\023\206z\275\035O8<\010\200\360\274\027\330\006=\335\231\240\274f\001p\274\374&-=\337+\001\275|O\212;\216u\326\275\220p\360\273D\342\244\273\'n\333<7\"\3549/-\t\275\304<\221\274\200\320Y\275\377G\277\274\377o\374\274\240j\376\273\026\034\352<B5\273\272r\264\016=\314\247\001\275\220\236\r=\375a\007\274\272\345\345<\n\341\234\274\326\215\025\275\255\241\213<~\331\241\274xd\003\275\007X\013\274\301\004+=\253\261\350\273\346\263\252\274\375+\240\275\270\242\327;\221\233N<\216\013\315\272x\233\246\274&\327\274\273\202W\350<~t=\275\376\277\351;\374\320\265;\215\333\3747l0\341\273\366J\346\273\340+\220\274Y\257C=\270u\262\274\367`\357<\267\0058\275;V\210;\250\247\331<\327|6\275\340\202]<\2619E\274\274ee\274\237\363\004\273\362\307\210\272\235eJ<\212\004\023=\246\272i=\177\323o=P\215\275:K|\017\274jJ\347\274\262\007\327<@\254,<<%\246\274\253\267\235\273*\366\r<\036i[8\221\034A\275\315\233\202\274\323m\034\275\260\333\271\272\345)\027\275]\301\356\274yJl\275\317\236*\273w\250\232\274\320ZX=C].=Ua\024<\214D\203\274\332\231\371\274\300D5<\317J\276\274\351\254N\2749s2<>\001\243;\327}\026<[Lc<C\202?=\277\353\010\274\366\203\r<3_R=\003\217\270\274\2577S\274e\262#<27\341<\312\334\273\274 \013\000<.3J;\005\332\225=L\237\221\273\034\376\177\273ac\003=\364\315\203\274\354A\263<\270]T<\007Pe<g\331N\275\r\376=\2755\316\235\274MX/=bG\204\272\023q\225:9$\274\274x\036\036\274\\\004\026\275\362\177\025=IR\033<\246hg<\254q0=\212\327z\274\343\034\250\273Ku\216<\207H\326;\024\217\311;\237\260\211\273\242\271E\274\220r7<-\242N\273\300\354\017=\237\336\256\273\374\322\010<Q6\310\274\235P)\275\326\220L\275\003\323W\274b@$<y\241\334\274C\010\224\273\357\335\t;$lU<\312\002\201\274\331V\312\274\276x\313\273\377\374\031\275[\221U;\0236\214\275\\\262\220\274\223GX=\231\021\014\275\t\374\214\274\025/+\275\355\027O\275\243\364\010\275!\022W\274\202 \272\274\023&\207\274D\3258\275U\037\272\275\363{\013=Hi\230=&\017\362\274\306\316Z\275\335\376\237\273LU\232<Q\211X\275\311/R\274\231\342[=\314\274.\2750s\215\274F\242\352<\303\017b:p\235#=\273aV;:\025\226=\207t8\275>\307\016\275O\277\n\275*{\002\275z\0300=8\245\373<\302~?\273\347,\362;\263\233\030=\007\036\'\275\230\310\316\275{.z\275\344V\255\275E\327[;\017\230 \273Y\363\263;\204z\024\273\260\314<;\025\321\327<\372k\270:#yY=\033\'\234\274\334L\270<\247K\321\274b~\033<\317\250\211;\221\314\254<F\027\025\274K\211\t\275\260\212t=\201\253x:\024x6\275\212\033\2339\337l\005=\266|v\274}WA=\367j\310\274`\254\335\273;\316?=-H~\274\177=\216<\212%\014\274\265\330\022=2\2355;i\302F\274\272{\017\275\370\022\325\273\255\3219\275\263\034;\274\361C-<\266\2105=\\yA\273\336\204\007\275\274\241\n\274o\375\037\275\234\230|\274I\226E=Ty\350:\266Fc<\303\311\223<\331q8\273l\364\265<K\2552=\345v\205;4:\225:\026\023)<\315;\216<\021\254!\275\360\346\336<\225\376\240\275\005s\247\274-\330\034=\327}r<&-\272;l\341\320;U]\237\274?\326w\274\362d\307\273\310\250\234<\243\000\036<\313\232\266<\177$\023\274f\'\t\274\335\352\337<\344\372\262\274wf\032<\321gA=A\201\234<\003\023\371;}\345\263\274\254=[\273\223\230\275\274\214%\324\272^\001?\275\362\316\344\274\0376\234\274-z+\275U\020\364\274\246\035\223<\203\347m\273v6\002<\351\335\255\274\257\377\24789\320\031\275 \224\244<]MB\275\005\341\000\274kz\014\274d\026\347;\316\310\264;\022`5\273$\276\275;\241\223+<\327\013\264<O\314\201\274W,\356<(\270\262\273\300)\302\274/\340+\274\350\204\245\274\336\024\361\274D\222\212;\264\000\r=y\003O;s\277\302<\366\330\333<\352\331\n\275_&\212\274\2413\001\274 \371\371<\3779\255\274.*\215\273\243\036\022\274\254)\257\273\217\214\317<\240\250\323\272\244\030\016\274\256\334\306;\336kj<\276{X\274\036H\3249t*5;o\334\211:\231\236\333\274\036\240\331\274\303\255\317\273F\324\034<\33528\275n\2542=ow\031\274\037\"\337:8u\257;\320\006G<P\376x\274?y\037=\377\036t<S\236\3079\211H;\275riV\270\017\312q<\261#l\272\217\247\240\274\364\342\227<\213\274\032\274\206~4\273D\177\357<z\300V<\253\333\t\273\010\304C<m\365\004\275\215\322\n\275\371\237\355<*\360\206\273\216\0035\275lb\037<\200\316\016<\037\204T\274\200n\"\274\261\2209=\3432w\274 \310&<\330\3035:(|\260\273\370\301\t9\332\004.\274\257\257\264\274\276C6\275\222\320&=\027\0360=\211\353\377<\320\302\221<\034\260\006;^\275\266<\3643\240\274\312\027_;\330\347\310<G_\026\275\361\3052\275@\'\002=t\312\231<j\001<<\357\260\227\274EU\024<y\206\240\273*\310$\275W\303\274\274+\340?\274\377n\222\274\001\326\323<\322[\306\273-h\234\274@\2100\2740\273\215\274\035R:\273\026\206d\275Z\033]\273\3674\314\274\271r\200\274\243\335\224\274\304(\222<\004\360/\274\257\007\235<\254\323\336<\033+\001=\275S\223\272\001gl<\303Q\021=\230\351\345\274\372A\036\275\203\330\321\274\266\347)=\205L~<^\017\004<\323\222\264=\247uU\275yxr\273\020<\217=\'\277\243\274\363l\300\274\337\266{<`\201$=\t#\016\274%\252\353\274c\261_\2751\375\006\273\231\241\016=\272\234\347\274\234H=;\t\"w\274\316ms=\334 \007=sa\267;+\252\251\274\032]\003<V\"\225;NZ8<o\251\210\272\340\203\301<N\320q=\247\033\242<\025\355\267\274\260\325t=M\231\032\275\314\003\2319\271\307\025\274[\250\304<AH\315\273#=\201\274\007\020\014\274\372T\022\273v\007\023<\300\242\030\275\347\261\300\274{u\343\272\355\352\303;\311\243\037\274\212%\243<\276/\017\274S\231\366:\031\307\007=\365\201\013=%\305\034\275\021c\347\274\n\254.=\017\004\241;\217P\232\274\200]q<1\032\301\274\tW#<\305\231\211<>\213\352:\371\362\371<& ,\274\330\270\3419\270\302Z<;<\374\272\"\002\241\274\000\252\352<\225\360\214\273D\331W\274\300>\216\274\021\254`\274\363\350:=\345\366\376<\274g\004\274\201\006\261<\232\215\266;\326\354\267<4#\036=\211\037\354\2740\354I\273n*\312\274B7\'<\235\212\017<\247E\035<\216\201\211=\234\241\003\274\370\004\014=\235\r\202\273d6y\274N\024\247\274\275\215;\275\3659\226<\355\220\242;\342\320\222\274\314WA<)_\"<hoH=\234\336!\273\035\034\303\274\255\306E;C\261\307<\007\254\343\274\2724\005\274\320F\037\273\347\2332\274\035\007\033\275\231y\017:,z\212\275Rv\002=!+o\275G\340\201\275ZY\214\274Q\263.\274R\241\253<\004P\031\273\007?\220\275S\344\347\272\233\370/;j\0026\275f\307\207\274-o\001<\304O\034=\335\335&<\021\002*\274\275\333\207\273*\230>\272\214u\272:\241\2013=\372\326\257\2744\337\336\274\004\2743\275{K\260<\005\027\341\272t\343\263<\375\205\355\274\324li<\236;W\272>\335\201=\216\341\261\273\234\216\022\274\216C\245<ciD<\\\214\345\273\313\336I=\200\226\233<\006x\020\274\300\001z=\221R\001\274s\253\222\272\006\"e;\0030\322\274\020\024`\275O\227\327<\014t\0249\2665\351</P\345<C\006\362<.\305\321<\327Hi\274\357+a=Oy^\274\247g3=\251\016[;\241M\302\274w\377\325<;\034\025=\234^3\273`0\t\273\224\n\242\274\210\366\272\2743\241\3009\353\372\201=\322\266\271\274\255\242\331\270\375\335\026<\r\266\201\275)\004,\275S\370\235\274C\3528\274on.;\302b\326;\332\n~=\013\325\232\275\213\322X\275d\004\225=1\251\376<M\270\242\275f\376\370\274\277\351\271<\210\\\251\274sH\353<\206\024\016=\267\302,\274<\345\272<\020rD<\3213V\275~.3<L\206\213<\037b\033\275aa\340<>\324\265\274k\337\206\274\031\"\246<\204\345\213<\023\3076\275\327\376\311\273+[v\275A#d<b\235\301\273\230\003\032<Pb\007=\365\322\273;\344\"n8\313\004:=rb\332<\270_\\\275\342\243\035\274\263c\324<z\202\250\274VK\037\273\244\202m=c\226\342\274K\'#\275\030\335\223;>;;<\242\030d;K=/<\210\3037\275\245\213\000\275\233\270\263:<\034\326\274z\035\254<)\305\005\275K\033:\275\275\217\026;T\3456>\016T2\274)\204o<\3631\235;\346\310\245<\002\273\213<0/\212\274\272R\230\274YP\006\275\376J\017=\034\357]\274\357\225:<\216_\033;\261\007\025=\177\324\025:\215u\260\274\254\263\330<jYJ\273\343\317\315;?u-<n(*;\331zD\2748&\363<\360\376\213<\346T\036\275\213\244\031\274\\T^9\3462\212\273vf\225\273\343\030\240=\273\271u<5\255\246\274\334#\"8\217\347\237\274V\350\202\274?Cf\275A\263\264\275\224~\303;\343M\364\274$\373/\274\276\244\255;\372o\227<\303\256\273;\177\362J\275:\244\030\275\302jM\273F\324\242\274\277_>;\236R\301\274\257\360\000=\023\364\235\274\241\264g<\205v\216\274\273J\000\273\021\205\210\2745->\275K\236\276<\000&\204\274\316\205N\275\316\250\331\272\207\264\261\274~\3104;DA\235\274\254\376\265\273\212\352y\274\207\275E\274g\372\217:\030\036\222\274~\237\336<QM\341\273\362\016\323\274\225\375f\2749Q\302\272\355\327\016\273\321\007M=\306M\331;\223\343\211;2\201V\272%\021a\275\241\234z\275@\361\025\274\231\365\353\274\272\255\206\273q\342\332\274\207\037K=\252\346p;\331\010\243\2745-f<\016\274\036\274{\226\242\274\021L\017=}\330,\275\317\367\341<\222\266Q<\346b\304\274\017t\014=U\373?<Y\342\316\274\202U\'\274\250\010Y\275s\347\3079\253\310\266\2757\325\373\274\264U\333\274\003\354\234\274\362\377\321<\010\375\300\274\034\t\212\275Cs\274\274\327\325\017\274hpf\274?\025\262\274\3438\214=\000.\236\274L\355p9\255\274,=|\223\020\273N\212\006<-\201\227\274Q;W\271\374[\241\274\262\215\010\274\332\203\212=\241Q\177\274Q\224\235;Q\263\272\274\2227\232<\271\257{;,\353\330\2744\227\027\274[_\216<\204[\254\274v\240\360<\303\204\312;H\220a<,\177q\274\257\220\203\273P\221\205\273\255]\214<\2543\337\273\203h~\274\321\037\013\274z\202\225\274du\250\275\217[)<A\021>\274\243\3263\275N14\273s\031\253;\026\020\267<.\235]\274\256.\255<\007X\264\273Qn\'\274I\231\\\274Q\254\251\274\211}\301\273\334\002\350<VN\013\275\204O\247<\204\273\277\274P\025\217\273\226\004\031=\345\220 <&[D\275\035\032\256<\205c\353;\360\274\267<g\034>\275\222V\003\274\220\032\014\275\276%(\275\014/!\274\372\271\203<\237\244D:\346\017W\274\241\355]\274\315\335:\274=\267\322\274\024\267*\274UO\203\273|uM;[\017\220<\334\217b\275}\227-;MD\350<\250JI=\"!\202\274\241\236\275;\272\320\266\274u\000w\274\025\372\007<S\340\013\275\035\274\014\273\313\361\005<7\243\252<\253\323)\274\250\033T\274\317s`<lpW\275\203\263?;\200*G=}5@<\3679Y<\177\260\330\274\345\331\272<C\nU\274s\034\235=\0275v\272\013P\210<K\241\360<7\211s<\027\331D\273\007\326\025=\240\215\351\274\346S\242\274\020\345\307\274P\371\236\274\310\177\224\274\204B\226;\277x3\274\277\027\302;\364Y\022={\370\323:\0017\010\275h.y=c\214\206<\266\345\321<\345\204\342;Z\231\017<\302\024\352<k\375)<\375\026\024=9\033-<i\377\310\274\373\350\303\274\342K\263\274\217!\217:p\0140\274t\304\375<\224\203\313\275\370\377\204<s\226\033\275\360\323@\274\241\362\306<\206\275\251\273n\016k<3\013\202<\237\327{\275\357\333\274\274\355 3=^\350\256\273\311\311\023<2\331\267\274\346\253\344<)\355\201\273D\323O\274\371!\305\274\375\224\n\275\tK\322;(\254\237\274+\241\023=XM!\273\232 :\273I\225\353\274\004\033\034=^\013\223<\337\367\246<\333\310\216\274\3132]\274\333cN<\3463\314;\310\315y<\002/\\\273\035\303d\273P\276\377<x\261T<b\361\232<\327`A=\031$h\273E\366\322<\3447!<\"\367\017<AJ\016<\304\330h\274n\220\233\274\203\202\233\274.\235\276<M\234\023\275\3143\356<(\000\n;\207K\220;\025\207J=\253n\342:G\375E\275\033J\332\273\334\240\244<z\363l;\022\302\364<\222\265\332<\371.\237<\260G6\275\245\001\004\275\001\3170<\026-:\272\231>\342\274j\224N;\302\255\357\274\360\205\322<\007\233\031<\266\244\213<\236\261G\273\201\222\\\274[D::B\275\364\273\325\024\257<\025;\334\274\350%r<H*\022=\032\252X\274\313\305\270<V?\226;1r:<\301\307\257\274\351\372\307\273\016\027\265\274\363zK<\274+\324<\343\333C=\005\345\340<\267\3518\275Lh\251;U\263o<o9\201<I\3418\274s\306\262<\343\364$\275\247\226==\302\024n\2734(\030=^_\026\273\035:\226\273\036\356\326\273\035\371\316\274C\230(=\021%z\274\312\002\343\273X\373%\272\177\003\310<`\365x=\233\023\323\274\214\264\032=$\003><\212\310\360\272e\036R:\374\330x=\353\270\246\274_}\007=\256\024Z<\374\216d=~ \002:\233\370\274\273l\254A;&\337\310<\354\341\362;\303V\260\274\353\021\332<q\t\340;z;\305\273\306a\037\275\005\213\254;\377!\031\275\301\314%\2749\213\005\274\343\'\217\274\216d\212=\326\0137=p\321><\300\267/=m\035N\274\347\024\225\271$\253\343<\220\201]\274\204\241_8\266\266$\275f2B\274\316\033\224\274Qf\037<j\035\304<P\304P\271\031\261\350\270\023\027\'\274_H&\275\021\205%=L\363w\273\356\031\317\274\236\205\263\274Dk\314\274\277\220\255\274\333\307\255\275\323\360&\275\216\177\263\274\215\"\330\274\217\343\244<G\224]\2740n3\275s\277\004\275\316\231\247<0\275\013=\253\364\230\274\230\"\353\274\310>#:\301`@<\210A\016;<1C\273[|\006=\256\035=\273Ch\266<lZ\326<\352\247@=\t\207q\274\333\227\336;\254?-<\307#\236\274\003\244\002=U0\241\274\350\265\373:l\333V\275\024G\305;)\304-<\206\266\246<\223nK<\257\3226=\372\252\"<\266\265k<j\313\204<\337\356\203\273*\013@\273\333Y\206=\026\0349\275@P\373\273\n\247\032<T\206\370\274&\274\203\275\034\243\023\275\276\364\227\274L\320\024<\004\026\200;\242U\005<\252\227\223\271\322j\362\273%\254\262<\014\266T\274\333\013P\274M\330\005\275\246\022[=q\323\303<\354l\211<\262k\177=\315\334\017\275\007\303U<O\274\021=M\213S<\303O\\<0\267b\275\222E\221:\302\236\026<\022\224\361\274\335aE=#A\344\274\252w\002<\252\010\035<\007=\350<\t,\246;\r4\243\273>\300\344\274\352u\243\274\2033\243\2733\022#<\310\2318\274\267\310\267\275<\315\2209\004\204i<\214\036\374\274\246\002\214<\243D\001=P\317\312\274+\273m\274Z5\230<\254\241@=h\202Q\2738\211w\274\261c\311\272\230&\375\274\324X\005<h\022Z;\337\022$<#\326e\271Xs\236;m\363r\274-\034^<[X\321;\204\370\237\273\332\204\354\274\336\2134\274k\333\021=\315{y=8\025\n\275\020\225\2368I\302\353\274\323\245\350\274%<\276\273D\032f=\341\002\312\273\366\022n=\231\022#=g\211=\274\236\212\256\274\3322I<\371\000\361;\004\250\270\274\001\334\374;o\266\346<C\025\354\274N\002G<\323\272y<Y\343\206=7V\254\274\371\207\213\273\266\224e\2748\2076\273\312r\365\274\317O\244;\265j|\274\247\263r\274\021=k\273b\014\231\274)7Y\274E\251\003=\372\222\037\274\210\004\n;\324\201T<\224\343\000=:\326\002\275[+\277\274\253\325\222<\256\006\235\271\202!-\275\037\214\330=!\334\320\273L\306\022:\261\264\255\272\025\"\260\273\271x\213\274>-\207=d8@\274\202\220\234<\357\257\377\273v\327\352<\001\265\367\274\241\270\262\273\215\361\306<Up\346\274\346\026\205\275X\271\313\274\221L\201\273{\321\320\274\204\345^\273F\222z<\232\325\016=\243\315\331\273\362\177;:(\224\254\273\001sG\275\326\275g\274D\233\227\273\206\265\301\275\231\336\320<\211\231\245\273\302\226A<V\016:\274\300\202\264\274\002y\023<\340\233\320<{JI\275[\241\336\273\335C\301\274\226\020\236<\035\3746\275\263\210Q9X\221\271<\336\036\252<\340?J\274\330\2351\274z\221\250\274\225-\007\274$c\345<\242$\217<\343\006i\275c\236\302=K\201p\274\230\215`;0\367\361\272\303\035X\271\352\270%:$\272\211\274c1\352;]>\216\274\327\256_=\340b[\275J\233v\273\326\\\267\274\317Q\037\275\241\275v;_\303\001=\n\304V\273Z\\\253\275S\366\353;\036\002w;\240\321b\273)\225\223<@W\005\275\334\276\224\274\267_Q=U\244?\274!\311\313<\204K\265<\205a\007\2750\213I\274^^\245;q6\205<OJ\355<J\026\t\275<\263\307\273\236\226|\275\256\033\350\272+\203\252\274RF\352<n\220\216<Gb\215\272\346\372{\275)\204\330\274\326\322\027\274\316(\031=\000\rR=\366\024\330\273\212b\370\274\354\016\032\275\177\355\203<\032\026\262<a\366f<\275\205\223\273\313qA\274\021\235*<\026n\357\274\355I\t=\306:\252<T\245\233<{\030\010\274\333I\236<\001\035\265;\240\222\255\274\337\353\001=\350R>\275\315\334\000=\326>\033\2759\030 =y\025\004=\243\330\034\274,7\212\275\371+U\274\014\'\205<ie4\274\013\n\006=8\r\014=\337\256\225\273`\205\035=\303\244\274\273\374P(\275\032\337\370\273`\241\226\274\2712 =\360\360\034=9\377<<\315v6;O?\034<<\031\341<\345%\"\274*\t\305<4\320a\275\220\361\273\274ED-\274\226s\n=]\276\263<\206\224\032<_m\210\2742]\207=\205[\243\274\367\203T<\2625\005\272r\244\220:\021\356\375<]\221E<\263\201\003\274>\031\367<\260e\021\274\234\357\310;9\224o<c\312\024\275.}\244\274\360\241\025\275*mA<\337^N=2\344\256\273\033\372\005=\272\205]\275s\342\027;;\325\031<\254\301~\275\375}\001=\366(\340\273#9\034\273\256\376\034\274&\301\010=\207\310:=\262\237\000\274\213\004\030\275\3049=\275\262\261$\275(c\320\274Gf\264=\021\273\000\275\365\354\033=x*\177=Z\375\230<\263\373\270<\031~\376\274\225\270\033\275\037d\270\274\373\325\t=JH\236\275\000\262\345\274\215\321H\2738#\223=\r\325\022<\0136\265<\320\330U\275\372D\232\275\336\226\026\274x\212\312<\260\312!<\343m\007=i\312\363\272_\252o\274\207\2731\275\304\355\343<\237* \275\332E\220<\326\2203\274\004?@\273\303O\027;\346\216\222\274^\254?=\365\271N\2746)o\274\006\\\235\274\340\257\325<\226\264y\273&\3206=\232\023\323\274\323\020\326<\341\337\371<T\312\350<~\364K\274\333\306\230<\3632\257;\306b\360<d\267\255\273\371\3771=g5\254<\273\323[;\333\306T=\261\303B<\320H\225\272\376;\372\274\252\2551\274l\315\300<w\032\352;)\027J<\250\203\014\274\215\016\325\274\375 \303\275\346\026\312<\307\352H\2727\335u\274.\201$=\320B\023\275\0318\034=$\tT\274z\224i<\017\324\002=m\351\333<\334_k\274Z\025]\275\210\357o\272+\346\224\274i\240\277<d\207\366\273\031\344\256<\263\002\274\274\2113\371;\336b\223<N\271\n\275\322\361h;mB\357<\213x\014\271\010\007e\274W\213\324\274r{\004=\262\245\202;\267\323\252<l\202\274\274vw\312\274\210\267\372;i\347\325=\247\212\224<1\376U\274\317\003q\273\220:\3029x\202\271\274\304\334\014=\357\322\343\273\232i\"=\027<\232\274\301<\n=\262.\254<=TS\275H\3433\275\363\302\224\274C\2339<\t{O<\tG;;\326/+\275\223\002\3149\247\354\360\272\365\360\275\274\221~\243:\337\251\026\273\325_X<9\032z\273\360\303\244<0\275Z<\034g}\275\352,\230\273\315L\245\274,H\201\275J\227\275\274:\350\226;\357\230\"=\226U\006\275\245\r\260:\222\243\247\2758>\311<\003\304\251\272\200\340f\274\226E,\275\232j,=\334\232$\275\0131O\274\343\213\235\273\204\to<\177\301\035;\003\'\002;\272*\031;\333~i\275\0301%\275\362\302q\2743\332\374<\343\177D<\177p\224\274\014\016\305\272y\"\262\273\023:\325<\237\326\r\275\'u\002\273|\366\235\272\232\334G<\354\023&=\244\314%\275\302\023\367<\221\235\265\273\220\307\225\274X\240\250\273#\036\256\274}\215\223;ag\226;\320d1<}B\207\274\222\232\251\271\003R>:\324QH=%s\304<\217\014\221<Y(4:\222\016\265\274-\341.\275\220J#\274fv*\275b6\240<\277}0<\337\343\003\274\226\343|<\242z\023\274\005\2737<\031\366F\274\334W\242\274\316\370_\273\371\330\370<\021\205\\<~8_=G\351\350\274\262\013\301;\177\215\254<p8\220\272?\267s<\376\242+\274\314\331\343\273v\224\255\273\253\322\247=\225\024\321\273\206\r\005\275z\n\307\274\254\273\217\274F,\002\274\215\365\250\274\"\3315=\263\214\001=0S\035<|\262\001\274\021w\313\274\335M\314\274zv\001=\350\t\320<N\321\231<\016\276\374\274_\310,\272\252\307\177<\256\001\231=\331\2421\275U\357f<\307=\214<\355E\252\273\230\017c\275\001\300\360<_\326\315<\177Q ;\267\256\305<o\205D=\252\322#\274S<E\274\275\030k<\215x\343<\304\263\366<\222\201.=\206\327\257<\235&\005\273W\205\225;\300\233Q\271p\211\314\2746\003\005=\035]\311<^\201\013\273\206\352*<\240ZL\273\355\262m;\005\322\242<;\247+<)u\032\27553\356\274\262\334\316:\330\2346=\226Y\350<\364\230O\274\350qs<\026\201<;\022v\203<4\242t\274\023\323\220\274\374\315\252<y\204><`\307\221<\245\034C\275z\304#<\335\026\233;%\r\230<\322,\027\274sSF\274\320\210\221:\020\255\256\273\024}l<\0141S\275\232I\222;\301Cd<\222\252\003\275\202\357\240\274v+\006\275\340\254\031\275\3246\005=\347s\252;\"X\3309\352!\273\274\rL>\275\257\314\253;h\307\355<\310\206\r\272\307\301N\274\254~]<V\025u\274\365\006\n\274mhS\274\304(\327\272\246\334\004\274\277\311O=\"\t_\273\343\301M\275\371n\244<&\276\016=\227\023a=\243(\301<\037\037%<\001\310\374\274;P\017=\232\004\271<Dbj\274g\003\335\274J\372\263\274\037J\240\274\037\227\230\274H\357\343\274\307\205X\2750\246\217<\363\2623\274\335*\025=\372>\260<\246?\377\274B\261N;\374\261\246</5\203\275\334\317\024\275\262b\000<D3\225;/b\237<\372\243\217;\037\020\362\274\3475|\273\251%\251<\006\227\315:\337\025\263<\231u\233\273\317\032\222\274\356\233.\275{\014\361\274r\t\354;>p\202;+;\353\274m\207B\275\"c\341<ibJ=\311\247\252\273\005\360\347\274n\223S\274Q\307\244\274D\031Z\275w\243p;\346\2358<\032X\306\273\207o\274\274\373&\224\273\301:\225;0\017\316:e\234\027\275@\206\030\275t\3271\275\327(\271\2738!\257\273M,\205\273SN!\2740\217\224;\232]\007\2753\262*<\367\374\";\026\253\331\272\233b\312\274\300\373D\275\000\177\003\273\204\264s;*8\221\275J\334$\274j\330\025=,m;;I9w<\241\236\261\275\027z\200<\221\371\206<\245\211\205\274\306\340\024\275%\207\342<oug\275\0009\255\274\307\035\370;\252\202\013<w\351\351\274J\301x\275\347\325\020=\032\217i<\323\306M<i<\032\273\021\037J\273\232\355\301<\234\301\034\274\033;\316<\375Y\005\275\344\301\346<\263\2541\274_d|\275\322\360\001\275\006\023\366<\'\020\351\273d\306\006=#\335\210<\003\316\332\274b~\336<\262\276.\275\211\205G=\315\214x\274wYX<uj\033\275B[\231\273g\230\315\274\002NZ\275\345\334\221\275\312_\221:H\245c\273\204\033\010=\366\207\313\274\224\217\370\274&\333\035=\350\257\207:\304\250,\274\023{S\273\265]\300;\001\003\215;\335\177\224=\025lG\275\341\367\277\273\2713C\274\226%m<\027M>\275\334$\367\274\201\327\225<\004\321>\274\365\2224\273@\364\227<$q\227\275j\301.=A\335\200<\372\315\356;\273\336q\275}!\312<-1d\274[Tr\274\242\021z\274\346\3112\275!\2746\275\225w\301\272N\265f\274\365,\314<\300\257\205\274@o\000\275\364F\351:\223\325\377;\247f\340\274\\\347-=\331c0=\247\023\203\275W\234b\274.)\363\272\222\326\002\275Z\333\005\274\032\245\354\272\377)/\274\342%n<\340\034\365;9\202\310<\335w\212\273\037\262Z<\371e\006\273q\260\016<\2775g\273\n*\244\274`b\226\273\225\366\027\273\205G\210<\267d:=\203\312\002\275l\177\264<h\026\361\274\215\344\337\2745\t\023=\010\230*=\211N6\275\363\200]\274\332\213\225\274\r#\260=a\377\000\274\226<]9je\356<4G\377\272>\212\356<p\357\265:\n\215):?\201\201\274\251j\205\274R\013\317\274\330y\210\274y\346\333<\264>\217\275\317\005\004=\275\316,;\003\370\276\274\215<N\2755\347\327\273@v\003=\256s\005=\3324\006\275\320\316\325<\305\327\367\274\3519\027\275\262\203\005;\3014d=|\216\035\275*\260\303\273\321\037-<\335\313\211=\376\033\256\273 \270+\275h\303\267<\275#j=G;\355\273\177<\013=\316\377\013<\246\007\267<\202\274\336<C\017,\274\214h-<\256\372X\274\242\026\327<y\316\224\274\234V\010<\225\243\225:\265Y\203\274x$!=\203}\207<~\243\200<\204\264\017=\350\'h\274={\237;\364\252z\275\242##<=\241F;\2139\312<\345kP\273G\235L\275\301\221\264\274A+\002\275\271k\323\2748(\3515j\225?\275\365&\356\274NW\355;\ncq<L:G<=\357\020<\'\262M\274%\304\256<\0011\200:\207QC<\035\232\020\275\210\317z=\034\274=\274P&\242<\236\310\273<\376\000\007<r\302\342<ClM\274\324\310\275<\263_\336<\356J\207<\304<\244<\243[\307<\321\212\371\2748\333\323<Q]\323<\032VZ=\375\250s\275\21298\274V\223\313;3\005\302\275\342,\n\274\217/\350<\366<9=/re;\351\276/\274\274h-\275\016\325\026<C\316F\274\013\353\002\275A\234\001\274\215\262\361:\214\252;\274\251\312\033=p\177X\274\\e\005\275\363\\\310;f\016z\272\372b\032=m\373\203\275\034W?\274\211M\215\275\024\2237\275\010\006\307;\344r\317;\252K^8\275\224\350\272d\022&<\345Ce\274\rcY=\313bo\274\000\311\204<>V\327\274F\256\242\274\247\374=\272G\013\265\271\370\317\017\274I\275\227<\261\244\226\274Q\205\004<\367\362\336\274\246\375\247\274\252\354\330:I\361o<\376 \017\274\354\347\233\273Fv\206\274\217]\231;\316\351\323=\366p6\274\026\311\010=\276\223\263\274,\343\004<\004NG;G\343\023=\357;\330\274X\037_\275\"\332|<3$\372<vV\240<\244\005\356;LT\'=\345B\226\274/,\257\274\006\004C\274\331X-<d!\027\275YI(\274\250p\230\275\200\032\224\274A\210\256<i\244\003\274\364\313\253\274)\362!:r\266\375<aLB\275\215R\247;()\333\274\\\002\237<\327K\006\273\225\372\307\272:\251\276\275\026j\304\274\003\354\231\272u\001i=`\215O<1|v<H,\353\2745\256x;J^\035:\001\024K<\322\370\204<+\255[\275`QI<\271\030>;ql:=:\327X\274Vg\322\274\270\260\317<_\266 \275\225\271\2459\357\270J\274\366\221\2257\270\306\201\274\034?\253\273>jA=\275\000\260\274G\004\377\274\001vO\275\242\262\351;t\277~\275\310\353]\275\324\367p=WB~\274?\354\301<\222\322\245\275F\347\211=\305\031\220\274r\221\245<U\347\034\275\006\004)<w).<\372\277\022\275\370U\026\275\352y\006=\233\302j=N\223\t<bE\014\275\177\341\311\2742W\252:\213\247\262<\373\367;\274\311\257\000\275O\006r\274d\204\363\274\020F\3369\344p\236\274\206\016\221\273\334\244\371\272_:x<T\276\261;\235\363d\272\313\270\342<\232\210\252\273\225\2573=\245\304\010\275\025\005\224\273\214\345\334\273\340\325\245\274\377m\207\273\323\333\213\274\230\330\231<y\341\027=JY\233=\362_b\274\236\224)\275!\355p\274\200\313\252=2\005\375;\2363u<\037\353\234\274\021`\211<!\266\321\273\243\014\006\275\222x\177\273\2078\310\274\274\366\345\274/\254(\274L\201u;\243\331\210<\000:\305\274@\372{\274uQ\343;\036ON\275\177-9<\236\213S\275I\223N=\213\010\352<N\357\211=\2026\317\274\257\236\031\275\313Xf<\026\236v\275\374]\211\273\004F\344\274\377j+\274\262\331\266<H\002E<\007\031;=\246\331\366\274\236\3000<\205\000\216=;>\324\273n_/\274V\254\274\274\252\306\234\274\363\317y<\362\265\202<\037\331$<VS\256<\247\222\327<\003^/\275>^\235=\026\030\247\274*\302\251<i(\177\274\007\020\330\273\301\356\212<\243\031\250\274\215\205u<\331\257\024=\251\2633\274%(\316<C\004\025\275\275\246\356\274aK\026\275&X\245\274b\340\227=}\003\001\275\325-\031<`\t==\217pZ\275XNF=\370\214\311\274O\302\203;\302\301\210\274\364\342\227\273\357E\233=}\\\345\273\333eU<\1772?<\007\204\347;\027\304\275\272\031/`\273hH_\275I;$\275<\366\320;\023\213\036\275(<\3019\326\322\224\274a\313\331<\314j\231\274\004@\262\275\302|\260\274\016\211I\275}\241\025<[O\034\274fr>\274\334\245\377<\026~\212\274\314i\032\275A:\001\275}A{<)`\374\273\375F\002;\204\360\316\274\244\212\252;X\214y\274\225\223\206\275o\343\361\274\222\261\212<o\240]\275\332\225O\275.\320\311\274\320\302~=JvK\275\253B\313\274?\215!=\204\003\000\275)\215\221=\357\3471<\315\363\267<p\254\210=\022\302\252\274q\301\224=\276\316\325\274\212q\240<\335\312$\274\3766\347;\266\202\033=\207\177\020=\276g\223\274\004\215\020\274\032%\252=\003~\210\275\270{\222\275\223\270\274\274\246)\366\274,nS<\242\303\340<\034{]\2749\203\231\273=0\220:\225)\202=~\305\356\274\326\302/=\224\347\334\273\325\250E=\322\322\212\275#\027=\273aF\317<\334\035\025=\320\377X<p\3471;\370/\010\275\350o\227<\256\034+\275g/\341<\240WM=\216\242A=\005\205\206=\257\243\034\274\212\260\207\274\006\311\"<B\025\267\2747_\234\274i|\337\274\'\354\210\272 \336\247;\342\313\363\274\342\334\233<\252\003\301\274\223\252\317\274\265U\320<\221Cx<_J\233<B\007\217\274\0338m\274\237\344S<\305\253\351\273\353X\370\274\314\006Z\274\323\247\250<\251:z=)7$=8\201\321<@<G=a3y<%\321h<p>\206<\231\007&=\016\270z<\367<u\275\257\231g<\274\215(\275c/\266;\230\227\022=\361\302\330\273:\354e<b\221\311;/\264\371\273W\326\332\274/\350\005\272\362\030);W\217!=O\004\223<8\344<\274p\230\355\274\216\247\204;WU\363\274\334h\371\272\332>\235=\t\342\017\272\236\324><,\231\025\275\321\2208<eg\202<\245E(\275\347_R\275\273^/\274J\371\233\273r\005\311\274\257y\265\274\226\177M\273#\346B:\230\327\016\274\260\275c<\233\332\337;\204w\030\275\365#Z\274\326>k\275\0038\315<0+\001\274nD\036:\354\\Y:\236\313y\274\372+\252;\031\0255\274e\241\263<\216\207B<Rg?=\272\323-\274\2738K\275\330\236\241<L\016w\274p90\274F\237\272;\245\202\001\271\264L\032\275\033\026\361<_\233\261:C\216\300\274\346\355b\274zu\311\273\351\032y=\225\242\213:\310\3334;>\352\233\273l\332=\274Y6==\014+\014<\252.!\273E\362\247:\336\361\204=FS\201\274\263}\321<\333\030\3219mq\002\274k\361(<iEZ\274o\013\210\273Sf\365:\007;?\275y\3211<\241\207?\274sN\265\274FV\370\273\263\177\306\273\207dG\274\207r\257<7\2325=\373X\352;c*\002\274\322\247J\274\3679\r\274\213\212\037\274\2011S\274^\007\361;>\231f\274\262T\036=\216\264\224=\254\016\010<\\\035\030;\343\275\246\273\361\201\250\273}Hx\275`@\\<=;\\<O\251\004\275\001\324\204\272\275~\337:\251\2175\274\2100\345;\326r\375<|\314\227\274:/h\273o\266\332<-\223z\272jW\212\273\037\201\333\273\020\247b\273GH[\275\232 g\274\022\000/=\244z\260<h\000Y=\337~\210<\310\276\333<\003\277\024\275\371 \r<\007\\\034\275\351\202\306;\357\215Y\275MHy=c\325\255<\\\000\017=\253\3026\274!s\227<\232\344\201\270Y\362%\275\2327\230\274\032\033\266\273\342QH\275\321\200y<fn\243\272\263\256R=d\332\224\274Hn;\273\215X\003<\261(G\274\370\361\221\272MN\345\273\002J\014\275\\dR\274_\211\006=\255\3326\275\315\032\016=$O\212<\006c\244\274\241\316n=VS\307\272\301\325\034=>\200\233<n\240\220\274\025\307\330\274\2571)=\326\361v<p\302&\274\337d6=\220\330\034\275\310P:\274\220\370\233=\217SE<@&`\274\031\2371<*\213Y<S\003\006<\217d3=|\330\257\274l\347\220\274\024\013\014<\223\313\202\275\265\330W<7|\244\272\202`!=\313\353\206=\325\270\275\274\"\250\236\273\345\217\234<\236\313\237\271\305\032\013=\266\254\224<:\327\215<M\336\030;U\317b=P\270\000\275\364y\250<5b\273<\017\207\347\272\273\242(\274\263]E<#r\355\273\272|\031\275aS\245<\360y]\273YL=\274\324=\221\275\376\357\001\272\231q}:\221R\n\275q\236\202;\355\373\037\273\327\265\036\274\227\322\257<\213\302\t;\212\240\002=\367\301\303\274\305E\305\273MZ/=\351\251\022\274o\244\r<^\023\224<\376\323C\275\002\360@\273\331Q\030=\360Y\371\274_e$;:\327b\274\304\227\210\274~e\331<\361*\220\273\32045\274.\003k=\022\333\2169\034\237\204;\265\347\030<\324\356\251\273\2634\021\274\327\324$=a\254\305\273\223\014T<GR\005=S\343&\273p\231\216\273\027>\230\274h\332{\274\333\303\254\274\361?\213\273\213\277\000\273Q\213\024=\250\340==\027N\333\273\024a\226<\266\240p\273\231I\320<u-8\274\234C0\275\345\217\211<]\311\003\275\275\177\217<\226W?=\313\225\214;:\345\036=\305\311\373:\205]8\274\t\316y=\331\356&<K\000P\275\334x\202<\2309t<Nk\014\275D\274p\274+(\177\273\256\210Z\274\2567\274<\366e\361\274\347\313k\275\203\257\353;b\342\337\273bS\031<\243<\243;\033H\357\2748\312\202<q\334v<w\222\326;{7\240\273}K{<\243\237\204=\361\351]\2759#\266\273\362J\357;A\214}\274\230|H\274[E\207<h\220k\274`\323+:\264\345O\275\354\006\263<\006\254_\274\221\344\206<\307\242\021\274x\245\236\273\030H\024\275\203\261\352;x\247\323\274\351\231\023<\253v\355<\030*R<ae\217\273N{\004=\363e\260<\232A\357;\321 \273<\322\024E<\272)\213<\363`1=d\256\335\274x\n\363\274\355\213\317;\203\r\250;\253\365\377\273\232\323A<\200\227f=\025\352\367<\340\020\210\274\311<m<~\343\327\274\272\213\234<<\232\024\273-\307R\275\002e\031<\357i!=\023\034\223\272\003(:<P|-\275},\347\274\232\200f\272HX\360\273\200\305\220\274e\257\265:\223\342\000<kI\227\2758\326\036\275\212\311^\273\343\375\266\274\\\353\035\274\033\334\306\274\2115\002=\306\206\006=9\'\233\275\210\210\246\274a\223\366<\225\225\273\275\355\037;\274\346\235Z\275z\201o\273&\304W=(!M=!O\272\274u\007\030;\307Nx\274w\036\367\274\245\374\260<\364+-=\336\303\246<\020\245\252<\337\3766=\211\026\350;\220\210\003<\363\265\210=\267\314\215\274\017\370\225\273\344w\024\275$\226\317<\310U\257\274\3018\307<\010\270x9\211\356\377\273\375\035\346;H\0167=\312\303\216<\215\347p<\260M\232\273\006\307\377<\036q\034\274\252\235\256<hSO=8\323?=dk};\314\261n\274\267\374\006;\320\333\374\273\215\323%=\347\rT\275DF\022\275\367\025?;h\2427\275\337)\325;\202\013`\274\025\302d\275{\377\000<\3177%=r\216\242;\005\250\033=\315\201\251<\005\031\221<\360v\207\274Q\313\261\273\245\211=\273;]\214\274\355\257\276<f\344B;by\036\273\240`\242\273c\013a=4\210!;\202L\323<nrv<\"\355\024<\263\232,\273\034\272\264\274\325;\244:\260\303\216;\304F\033=Y\325\327:-\263\222<\305\337\341\273\271\362\303\273\275\314\000\275\352e\326\273\341\215\214=\200;\206\274\'x\304;&\203\024\274f\330\321\274\014@\025\274\r\371\032\274\035G\317\274\257\245\014<DM\355\273\262c\"\274/\342\303\273\234\300/;\2505C;x)@\275I\317!\275\213#\007\274\262N\361\274\315\311\022\2757`\002;oZ8<\232\274\276\274\224\252@=\002\340\255\274o_\2559\215~\272\2734Hs\275\230\202\230<\n(\373\274-\tG\273\216\273\004\274blK\274,\315\004=N\212\246\273g\362/\273J\351|\275\363\3575;m\270\303\274\300\271\223\273!\nK=#L\337<\277\215\t\275+\023\305\274\\t\333\274f>\027\274\317\303+=p\037\"=A-\246<\006\t\331;\2431\036\275\335hm\275`-q\273/\030\010\273\375\237V;M\002G\275\200\033\024<\276\266\340\272\306\331<\274\\2Z=\017k\036\275n7\377\273\3437S<>\247\345\274=\312\335;e\034\235\274\254p7\275\373{\277\274\257hE;\226\342\322\273\023\316%\274\256\214\035\275\272\347U\275\205\235\'\275M\230\211\274?\302\352;\032]\257:\242mb=jp\226\274K\325\310\274\211\336*\275\2130\347\273\334\035J\274\321N\212;\373*\234<\216\360\226\274\373\241,=\016e <\331\253\242=\364\301}< \245\200\275vy`\275\t#\010<\024X7\275\335a\256=%\273\232\274\314\376\202<\r=\254\275\260\367\336;7\252\274;\"^\366<\355 \267\274d\224\241;\373\235/\274\254\354a\274\345\350-\274w\363\r\273\340\201\271\273\313\004\204=\337O\233<0\351\024\274@\345\014=\373CD<\214\270D=\310\322\"\270\341\336_\275>\310\n<\224(\027\275K\206\247\274jVG:|Q\243<j3G<\320\221\260<c&>=\274O:=\320oP\274j\357\246<\355\027\214\274\007_/\272\364c\267<X&\022\275\355r\322\274\333\005\362\273k\247\341<-b\315;}\225\022=It\014=,R\000=\272\324\356\274\3401T<%\340S\274\311^\353\273\257\017\004=\034^\203\275%\356,\272\n\215><\34756\274_;<\274\000\234\254\274w8\270<\232\313}\274\367>\254;-\224G=\017\343-\274\020\314\360;N\357\266\271\234\302s\271\2122\345<J4S<.\024\';;\322\222;\235;\010\275\346\363\037<\242Y\227;\355\247\t\275\036\201$<\354\0341;\255F\241\273\364\311?\274\371\267\017\275\362\2142=\030CV<\243\252H<s\310\333<[\315\256<~\032\244\274\033Ff\275\235\344%=Y(&\275\274\311\243\273\237\302\314\274\n\324#\273\2140\253<\322?\037;\360+8\273s\001\331=\246\221\023\275J\275\207<\367\335\254<\315\370\020\273\ns\244\274\331\332\201<\274\303F\274\007\276h\273\nq\301\273\346\362\017=Z!.;T)6=\320\271\022\273Mt\225<a\266h<\313\231F<\014v\035;\000^\200\273\357-,\275\036pE<\253c\330\274\2246f<\225\257r={\030\220<\227U\231\274\021\\$\273$\032\010\275\032d\262<xyu\275\006M\n;\002\036!;\213\357\374;\313\206\322<\205\247\277<z)\027\275\243\2740<\253\177};\361\201v\273\362`\221\273\207\222\223\274\213u\250:\265\265\313\273\036\024\371\273c\000\245<q<v\274\025\224,\273I\032\312\274\003\226\201\274\024\361\214<\027\267\245\273\314>\026\275rl3=\224\353\344<-\250\252\272X\026\361\274\344#\003\275{\274\215<\025\261\327\274\227/\346<=\004\014\275\3609|<\245I\321<\261~\022<\021g\236\2720\336\207=\276]\275\274m\ro=\001\312f<@\026\371\274$\036\254<[F \274\025\254\335\273?o(\275C_\025<\355\236K\272\346Y\321<\305\234K\274F\026b=\177 (<\276\262\037\274\371\256_\275\221\341o;\331mf:v\nC<x\007\t=L+l<\227\323\010\275\251\343\271\274\006t+\274\303\305\371<\007\354e<\260\362\235\274\237\252\324<t\202\362\274vo\306\272\325\216%<L\2541=\022\250\335\273\343\200\311<\261\266\005=Bz\370\273=\353\304<p\032\260\274\346\030*=M\010[=\267\235\013\274\014&\222\274\002%o;\240O\214\274\315\\\217\274\362\246\016<\373\345\375\274\340\321\002=(\256A=Ai==\003\201\337;C.C\275\016\352\014\275;\277\347<0e\177\274>\331\235\274!l\267\274\334G\255\274\227\001\346<K\256\225\2721ZZ=\315\225\270<\245\220R;\302\"\204;4\025\366<\323\321\"=\177\303\253\273q\261\360;\370\006\027\273\325*,;\310-c=\261\t\353\274\031\357a=\356\016*=\227\177\306\274\014\253\"\273|\216w=d\020Y\275E\002g=\356\363\247\271\334\333\212\271\024\233F:]\347\315<\243\177\353\273\322\363\003=\372xD=\207\277\376\274\225\377\016=)G,\275\013#Z<\2101:\275\264\373G\2748\242\007\274z[\030\275\021+\371;6\355\000\275\3168\236=K`\034=\252\211\023\273\332\213\351<\2749\224;\202\022_\273m\276h<$i\016\275\014JY;\274\323\t\275H\206g;^-\255\273\260\364\020<]\257\204<o~\337\274Y\210 \274\001\322-\274\300\207\362\273\241\363\200<\205\206:\275\326\2749\274l\226\205\273i\222J\275\321~C\274\014\366\t\275M[\347\274@\323d<\340\245\350:e\013\366<Wm \275\366\373\214<\204S\333\2746b\315\274\235\201\345<\021\213\231:\222_K\275\203\264\255\274\"X\262;\036\265\255<\2156E\275\257\260\017\273\177G\206\274\204xs;\013k\344\272\267\003\267<f\241\001\273X\355\234\273\304\372M\275\01794\274\272\223\335;\003\304\272\275}\255\261<\026-&\275\225/\215\274\177.\315<p\200\373<4\370F=k\335\200<.\326\n<8\2679<>\003{<&\003\036<\251\233\0319D\020*=.\327\223\274@\272z=\2116\257<\314Jo<\237^\355\274\260<=\275 \2359=\223\0207=\343\262\306;\277\204b\273\243O\310\274\"\0210\275N-\207<\r.\267:\004\276\204\274\377\265t<\314^\206=\277-\003=8\031\021\274|\301S=\370Uh\275j\0101\274K\\\204=\005k,=\315\\\236\275\031?\206\274\215\255\027\275\010l\357;\303\230\250\273\350:X=\310\2507;\300wS\273\375O\266\273\317\352U=\336:\"<\246\211i\274\222\030\251<\250,\217\274\211\270\017\274\000\224\255\274j\213\347\2744\306\353\274j\354\201<\323\317\263\274\370L\226\274D\264\325<\375|\335<;b\346\274\334j\034<V\303\215;\214\341\252<\0225F\274$Mu\274R\034\277<:\337\003\275\253\316\350<\221\361b\274\262\252\020<\227\224Z=\224J\004=\315\342\213;\270\317\216<l\213\312;\201\251`\274\034C\300\273q\343\221\275WQ+=T\312\207=G-!\275x=\224;\240\361\237\274\307\020\027\275\017^\022;\340]\016<\324\355R<9\320;=x3\347<H\352\214\274\251\216\213\274oJ@\273\372\204\242;rj\231\274\317dk=\237D}=\"\370(\274\364h\177<KL\300<\332T\331=\240\356\311\273i=\r\274W\247\002<\250 \200;4w\200\274Cc\020<\021\024\241\273\327\364(\273\247\325\014=H\277\242\273\032\224\217\274\266\016\335<\215k7\274\325PT\274=4\000=\277\2315\274\317Q\334\274\327\003\237\274\332\025\352<\340\247&\272\256\273^\274\034.\315=NX5\274\215\353\345\273lV1;\330\025[98I;;g\351M=k\272!\2734\226A=Z\321\302\271\345\263\023<U\331\013:`Ug<_\250\2229W\014\317;\214R5\275.\275w\275fq\225\273?\214\336\274\346Y+=\337v\347<\337D\227<\263a\004<\335^\227:\034\251k<\031j\374\273\247j\\<&\273q\273\351Z\236\275\026\006\270<\007\r\003\275\302\002\354<\034]\201=\207\237i\274\325L\273\273v\232\276:-\242\027\275\270-\236<z\232\362;\331M6=_G\025<Lh\013<Zme;\224\276\000=\374FJ<\337\264\207\274\303\232\016:\203\2341\275<\362\036<6m4<\351\037\205\275g\344\321<\341\321;;\205W\214\273\250u\304<\222\024\201\274\240%\255;\332m4\275j+x\274Y\032r<T\031\271<G\204\017\2756\243\217\274\344\335\321\274F[\035=I\313\260;\333i\234\273\221\302\252\273\3220|\273\3170\251<\354\211\252<\304\022^\274y\357\366\274\0215\372\274\002t\264<\373\177~<3\025\355\273<\264\017=\307c\177;\343_\216<~\326\030\2732\024\234<\263\007\337\274\347\023\305;X\233R\275#\242\036\272\371\252\232\275\327\356\2459C\236\013\275T\326\220=i\302F<}\273\207\272v\367F=ss\334\274\027\366\021\274\014p\233<!V,=\312\177\374\274gCM\274\354\247\325\2744\272\225;\370_\275\274\375i\200;WT\204=,&\002\275\357`_\275\316X\025\275\000\217P=\271T\330<K+\327<a\020\201\274\241h\250=\270<{<X\342J<\t2S=1\221\311\275\026J\014=\tn\006\2747\242i;z`\242=P\335~<}\314\243\275r=)\274\316\303\347\272\303/\211<c\231$\274\221B\211\267N\311\021\275\341c\262\274\217\205c<\255t0\275\333\371\271;)\363\210\275\205G4\273KA\t<\"q\036\274*\275\376<c\345\273<\213\343\021:\035\031\300<xP\246\273\021\350R:\016gl\274\333\243\234\274\340\255\010=\213@`<\327\237\261\273\334s\0139\227\322Z=Z\230\032\274\006B,=\222G\211\274\356Pt<\030\007H=\020\234[=\374\324q\273\005!c\275\".\356\274\306Q\212\274\276\331\235\274ZC;<+\264\213<\035[\217\275\232s\000<\206&\367<\030]%<Q3@<\375\240\264\274\240 \356\273\367\342\253\274\013\351\002\275\350W\214<c=s\274[(\210<\251Ji\272\264\034&<5\211\021=\216j\217<\200\3173\275\373\323y\274F$d\275\257\223?;0\236\266=\363+`;\247\007\213;\244\341\316<\276\327\245\274\247W\016=\037\213\317\273\014P\356\274\274K\030\273\nU\010<\225\004\267\275\340\256\306\273\353\006\321<\324\236\005=\274(3\274\017\251\177:\257\212\374\273p\322\240\271\223\377\017\275\352M9<1E\360<\036%\022<(\265\301\274!\336)\275\233\362T<~\2538=\370t\237\274\\\323\t\274\307\216\036\274\303\037\220;~\237\303<=\032\302\274\264\007\376<q\376\227\273?d\375\274\000Lv\275\254\014\343\274\232\216\301\2736\\\006=@\333\006\275\030\207l;\215@r\274K\335\307\274\221\032\320\273J\327\223<\313\365\002\274\244[\262=\230\203\216\274\213Nr=\237\363\"=\200Y ;\373\245j=x\r\233<(\222\n\275G\247\213\274)\005\365\2732d\333\273\303L\377;^<=\274\016\344<=)EK\274\220\321\227\275y5x<zyg;%\277\007\275\270\023\364<:V\036\275\304\377\357<JJ\317\274\006.\247\271!\nT<\034\0327<C?\263\274\261\220\315\274\002\365\030=m\321.\274P\246B\275\237s\2308g\224H<\321u\216\274o\3315<\226\233J<\226\002a\275\031\022\242;2@\334<AZ\353<b\304\204\274\326nn;\357x\221<\347\364\024\274\013\237\222;k\244\214\274\217\240A\274\023Pt<\275\034\217=\335\367E\273R5\232<Z?\275\273\037\337\000\273\254\253\311\273\267\345\313\274\034z0\274\312\213\314=\212q\346\273vM2=\240\244\034\274w&A\275\251\232\215\275\010\374\002<\361\266\026=v^\027\275\034\356g<\340\035\253\274\212\237\277=\341+)=\353\265N\275\203\370\333\274\264\331\270\273\374\207\n=v\300N\274)q\344<l\211\236;\330\264\031\275 \017\031<\275 \245<}\004\267<!5\374\273D\234\365\272c\252z=m\375Z\275\030\262\'\273\322\n~\2746|A:\237\333\254\274\227j\220\270@\237;\275&\233\210\274W~d\275\202\005\276\275\r\353?:Z\362\375;\221\277:\274~A\027\275\201\217T<|Lf\275\226\002\242<\226\322\n;\025\354\211=2z\305<\373C*\273\300\3069<Ky\032<D\254\265<iw\304\274W)P\275\315\276X\274G*a=m\210t=l\n\236\274\211\260%=u\235\335;\017=\267\274\235\236\020;\002\207g\275\010>\353\274\332\224\213;a?\305=\246/\204\274\247/F;\267;=<w\216%=qP\251<\030\242\025=\357\363\206;G\254\021\274\260\373!\275,z\234\274\205\261\245\274x\223\364\273\275\231\037\273K\373S\274\327\006K\274\344r\352\274\347[\n=\332<\221\274\002g\211\274\004\270;=\200U\000<\273\305\017:B\202\313<\266;b;4E\202;Hb\030\274\031?\303\271_\024\253<S\206\251;vO\236\274V\300\034<\373\225D=\215\211\204<\034\032\200\274\363\357\222\274&2\204<\376*+\275N\3471\274m\361\330<W\034\003\275\201\366g:\305Y\335\273\267\371\262;\231\304~\274x\313\341<\325\212\024=l\220\333\273\304\247\033\274(%\277\273\017\207\325<\007\327\316=\277r\232\275\005;\206<\003\274\257<\0209\252<4^\362\274\354f\255\270\246\374\027=\322\202\217:\240\257\036\274J4\300< $\350\274\036\001?\274B\032\244\273\362(+=\343\350Q<\350\030\026=\013u\3119q\0047\274-.\271\273\361\325\273;O\333\356\274\364\t^=C\311 =\213\311\024=\240g\355;\307\023\227\273\361)\001\274\331pK=\236\330\210<>\231\321\274\200\342\365\274\303\007\326\274\227\'\225<\235<,<B{\362<\2421\006=\261\025\n=\201\374\261\272\335c\006=\232[1<\332(\017\274\373\272@<\251&\206<\214\244\275\274$\2461\273A\"2;\357L\362;C\357\273<\312\337\244;\001\032\021:\260%\326;p0i:\322U\207;\020\306\260<\206\265\357<\220r\033\275\273\205\024<\301\304c\272\376u\013\275\263e}=\216/\001<\365\360E=a\021/\275\014\260\212\274H,\271\274R\014\257</\243\276:\027\226c\274\264\025a\274\031\310?\274\376\306\025\275\216y\003;\2065M\273\216\270\275\274\'a\026:\304b\370\273\001\2251\275c\215\031<\322OM=\370\237\\=w\341\274\273\375\246\014\274_\230@\2751m\201<\341\3508\272\362\3449\275n\257\227\273<O\332<\031[\373\273\353\363]\275k\264\247\274f\341x<R\377B=\235\365\311<\302\251\010=\252K\204=\344\331\374\274\302\331\213<3\214\025\275\304\243\016<Es9\275\\\nb\274q \314<\260\243z<\223\242\222\271\326:\301\274\210\026k<G\321\371;\263\360\237\274\315\003\303;wR\221\274\251\021\320<\024Gp\275PM\244\272{\3317=o\nJ\274`\315\230\274\266\274\3547\225\345l;\324;K=\353\273C\275\246\204g\274\277\251\210\274g\252\371\2738\253\261\274\244\274\014<\2150Q\274\224} \275\204\312\021=x\303.\272z\022\237;\006+~:\225\240\326;\213BP\275\251\337k\275\300\224\272:fqt\272\rJ!\275(M\321\274\t_\3779c\330[=\270\367\361\274\263\342\336;e\264\315<C\031\020=\203\251\032\2756\305\225\272P\227\203\273\326\220\030\275\204\212\334<\n\010\217=\037\261P\274/\265a<\2268\231\275\312b\005=!\006\342<\0004\326<i\326\367\274IjE<\252\372\021\275\345\364\"\275\032\236\244<\261\315\325\274\014\r\254\273\374\217\215\274+\320\302<|,\034=)\236\r;w\360h<W\252\370<h,\215<*G1<\215\377\265<\370\363\274\274\214\001\222\274\025\300\036\275A\2409\274\312\201\275\274\234\343[\27385\346\274Y\316\006=\217bk=X\251\033<\\k\3027\302}\352\274\300\327;=\361\347T;.J+<\307?\217\274\344L\211<\304\254\354\274\366\206::s\362\n\275\313G\223\274\247|\203\274\216.p=\017\357J\275\007F\344\274\264\205}\273D\354\372\273\033\227\265\274\013_6=\035\231\005=\351r\357;\356\025\231<\027o\275\274\006\343+<\335\267t\274\340\021B=u\"Q\275(^W\275W}\340;\3534\341\274\216W\245\274\036\252\323\273\027\345l;\214y\314<eY\202\273G\235\262;\250\335^\274\272Z\031=\3223\024\275\003<|\274\000I6\274~\321\217\274C0n\2756\314\246\274\254PS=\0255\230<\363\031\350;\341\341\261\2746\311\362<\367\221\0009\037B\323\274\262\014W<d\325\306<~8\271\275\246\344\204\274\225$\024\274c\302\337\274\002|0\275\332\355\227\273\022@\356<\271E\275\273\344\303 =\243\271\033=|\275*\274\334|\235=\210(F\275\374\371\310\272\005L\325;\372J\250\273\013\251\002=/O\033\274\267A+=\025\221f=}\000\361;\233\211@\274\344\342\272\274tE|\275q?K=\353\374><H\205\317\274R\310b<\021\005\247:e\371\311\273\327\247\213\274\250\361V\272z\036\245\274\234+\247\274\310bW=\233\014\237<\207\231\000<1\177\203\275$~w9\367j\357\274\252&u<\333\275K\275=o\000\275$\307\243;\353\275\254<\303\250\327<\226<\274\274\301\016\315<J\304\215<\207\003\270<N\010\371;\364\"\206=\016\361B\274\001)\352\2742i\000=\301\275Q=\023\250\t\275\010\273\374\273\243\004\211\274\030 \214=\272\0337\274\357O\257;\366\027\362\274\r\274\035=}~\374\272\364Oc\272Ay{\274<3\t\2751\310\004=\337\345\302\273\300:\225<\366\264\224<\363\2322<\272`\035\272)\002\335<4\332B:\216\351\302;\356*^=\'\343\262\273\304\0144=\234\256c<\322\'$\275\314\034j;\013\253\203\274\206\354#<\265j1<l\260.=\265,:=\2178\321\274\345!\324\273\337\344\234\274EV\362\274\034:\371;\022u\364\274=o\211<e\317\355;\036\263\206;r\227\301\273c7\260\273\310\232\020\274$\'\177=<\361\337<\243m\376<DT\306\274\317(\343=u\207j\274^1s<\333\374-<\220\360\177\275\017\315\260=\353\233}<l\3219\275\254\375\260\273`tD=1~\245\274kx\240;\337\241\005\274\274\350\001=c\335\241;\226\3278:\000\315n\274k/\215\274P\206\245\274\343\236\205\275\230\276w\274\325~\207\275\270\341\304\274\023\272X;\006\321\347\274^n\031\275\253Y\r\274\357P\317\274\036\267h\274\005\030u\274\032&E\2742\013\236\273)\355\201\275nw\202\274\216\204\013\274\\\371\3428\314\327\010<\321V\230\273i\021}\275\325\231\036\274\236\3027\275s\017^\275\327\2376<\333\354\207<?\266\203\274\346\014\260;a\266Q<<\225\037\273\364\343U<\253J\233<\256\313b\274f/\022\275\203\007\247\274\274\311@<\346\213:=Pq`\274Z\035\347\272Wh\303\273@\010,\275Sa$\273\343}\341\273\226\275\244\274^10\275\2601\352<w\351\312\274\226\257\017=\001$d\274G\035\313\274\033^\024<\005\3303:\177\013\310\273\377\032P\2749\273/=w\251\220\274\006\3567\274\210\033\036<\267\355\377;FC\032\275\005\360\323<\225\343\352=\364(-;\220\310\267;^\246\272\274:\216\362\273\213\254r\272\235\367\303<x\033\320\274#mu\275\230\256#=\333J\224\274\223^\004<\234E\206;:\277\"\275-J\226\274\330\220\327\273\337w<\275\n-\254<\360\235\213<\005fJ\274\314\0315=\342/\326<~\370\345\274\372\370\252\275\334!\n\275\342\2477\274\376\310H=\333\365\036\274E\264\204\274\361\207 \270\313\253\017<\213\346\312\274<f\242=\210\024\367\274\37457\272\362\256\300\274\303.\320\274\205\023#<|\367\"=\242+\274=h\347\247\274}\nC\273\227\354\233<g\326b=7&\026\275\344\372\206\274\271$\034=\355\0275\274\337\367\013\275qOe<\327l\236\272\345.4=\025(|\275\265\373\r\275C\313%\275\357\347x<Ls\377\273l\245x<\302\204\\<\034\235\206\274v\240M\275\2031\t=\320\326(\273\201\212\275\271\215{\003\275\256\2366<\t\223=\274\313\023\220<\331\2215\275\230\001h<\233R\021\272\253\t\366:\023)\017\274\022\222\255\271\026\241B\274\346\023\032\275\260\342E<f\214\332\272|\371\037;\376\356\312\274\373\344m\274\365G\343\274c`\271\274\362\346\007=\320\266T\275v \266<\037\267r<\215\301\273\273\022\356R\274\221w\003\275D\350\355\274\354`v\2740\335\226\274\2015+=\014\223\235\274\016\020m;5o\374\274\007\376/=8M\001=\265\005\002\275S\310\007:J\3764\274_\322&:f\204\334;\r\367V<\' \023=B\270\215<\352L\240<x\371\244\274\305\301\317\274\226\343\377\274\254\336k\274\020\021\211\274\316\353\335<e\215-\275\377\341\227\272\270m\340<\035\212\240;$p\312\273\227\350\351<^\201\350:\rH\020\274\355\203\202<\261k\321<\200\370\220<\252\251\360<\326y\211<\364i\356:\376)\252\274\323C\346:\021\326\204\274\033\300I\274\262)\'\273,\334\374<8\323\006\273\246\003\325\273D\033H\275\007y\310\2742\323\233\274_\327\232<\270\357\314:>\032;\274m\032\177<\373z\026<$\367\335\273\032\374\266\273\2317\206\274\336/\340\273}\004h\273J\351\375\274\330\302s\274\005\360G<Rn7\274\3549\215\274\315/\207<\"{\303;\356\277\007\274\225\214\t=9\326.\274\247\230\356<\223\321M<\205\013\020=G\00349\223\324\363\273\371s\027\274\013\370\3416\267\342\337\274m\212\304\274\336\201=<\t\314/<`\332U<\236\216d;!\212\317\274\253\201S\273\020k!<\014\305\314<\242\230\232\274.\231\031<s\027\217=\375\200\227;\341\002*\275u\332\016\275e\247\032=\033\013\007<[\306\257\275u\022\025=\342&\007=\370H\316\273\254\023s=&W0\275\021\264\376</\034\373<\177\014\036\275Q?\361\273\226\321\211\274!\346I\274\313]\233<\314\222C\274*\006\262=c\033;=\316\236]\274\303b\324\274w\223\232\273\240\022Q\275\362\007\224<\276\225\007=c\371\024\273\016\267w\275\274-\204<\357 j\275\361su\274$`\351\274@`\016=\210\252\003\274\322\n\310<\334\337L\272\226\252\270<YB\202=k\244\365<w.\334\274\373\031u\273\336\302g<\360\312\312<\351\2250=\024\022\203\275\253\317\241\275\343\233\220\274\232e\232=E\317\t\275\"\210!=P\236\342\274\366\302\005=\025\261F\272\254\210\234\274E\014]\273d\037\217\274\364\234\277\274K\261E;\261\303C=3IT=]\326\036<\337\215\002\275\314\203\352<\017\337I\274\341`E<\243\231\214\273j:\260<\370\224\022\275\243\326/<u\303a<\361~1=\243\225\2518\254X\221\274\333\227\030=F\334{=\310vE\274b\017\257\275#\271\233\274\205\322\177\274\351\312\303;\224J\224<\364\r\227\274GR\026\2749`\312\274\230\201\020\275WJ\374\274d\342\252<\370\210\n\275\005\016\264<l\265\003<N\352\327\274O\305\212<Yh\023=\272\005r=\371\016\211\274$d\373:\002\354\213\274U[\301\274\212\326\207<7\305\032<\002\255#\275\326\\\245\273r\r]<\264M\013\275\210k\";\3041.\274\323\3274\2747)\354:x\360!<?o\264\274\374\302\310\273\333\016\256\274\355\213\000\275{\315\210\274\326\253\322;\316\235\245\274\217$F<.z\334<\342\2253=R\300\324\273\3524\\\274\257\tB;\261\206\221;l\204a\274\2073\211\274\350D\377\274\"\224\333;\275%\231<B\276\201=\3041\317<\375\263\211;\362\n\221:\341/\301<&\034\036\274\245P\016\274\316\314\370;a\300\340;\320{\300\270\023_\"=@N\214;5\004Z=\354X\351\274\367s\003=\230\223\363;\341\271O\273\202]\003\275\306\226j\274\033\351u\274\303R\206\275\215>\253\274\266C\246<? \232\274\343V%\272\220\212\275\274\251\230G<}\314\260<\366P\215<W\366\243\272,j\235:=\220\341\2737\371\200\274\332I\217\275B\0263\275\013u\307<\343\234\377\274\211M\323<\250=\236=r\262;\274G@\347\273\305\357j<V\276\353\273#X \275\356 `<\200\361\341;q\020\356;!\360\323\274\3625\227<\261\022\323\274\006\306\316\274\'\224\036=!\372\360;\302\262\017\274\277(d\274?,\204=3\363\t\275\224\033\307:T`\005\274\215X\275\274\331\243$\274\345\277\200=w\035\013<OV\315<l\'\262\274R)J\271p\r.\274C\252\232:gx\362\274\005\325\274\274\364\243\370<\230\255=\274h-\341;\210\022\275;j\\*=\250\335\005=\264O\217<\026\336G\272\311\253\311\272-\3742=\306\033\272;\023q`\274!~\221\274\265\310\267\272\334\366\017=OF_\275\024I-\272W\014\315\274\201\3417=\346\2655<\362\035\206\274d\265\325<\246m\266\274\022\232\037=\354$]\274\000g\376\273}\365\333\274\021\001\035\273\220\265\256<\347\233\267\274\223;\013=mY\274<\354\353\n\275\271\247\307\274j\266\340<$\276^\274\rJ\234<\320\013I\274\023\203\317\274/\300\217<\205w\243\274go\370< \355W\273mY\324;\265$}\273_\343\n\275G\033K<aM\303<\024\n\345\274\356\025E\274S\003\214\274b\351\217\274\2646\225\274\200\206{=\3467\302\273G\357\023<\367\026\220:\217:\263\274IfP=R\343\355<\260\260\324\274\305Z5\274.&\250\274f+\017\274\036c\364;\340\3663<\371\245G=\3024\376\274#y\275<\263\231y<\030\220U\275?v\'\274\"\004\226\2743\206\310<v\022\004=\220K\235\274F<=\275w\264@<\026\250\331\274\352\213F<2\007W\274\303i\331\274\332\316M\274\221\037\360;\246\t\331<\006c\265\271\276\226\031\275f\267\000=\245\010\213;\'\347\246\274\031<\312;\017U\277;\022\025\373<\366\372\000\275\375\355z\274\2279\027\274m\230~9\247@\337<f\277$\273\266\005\244\2721\212D<\027\355\343<\001\377\035\274\330\204\243<\215\301\002\274\270\362\237<\256R\252<\301\341U\274#q#<\205\030\264<\243\325v\271\242yi\274\313e\226\273:\340\250\274\001\1770=\375\311c<H\360\217<\016!\205\274n\342\246\272Jj\277<w29<\001\270\021\275g\000H\275\307\201\256\274y\345\225\274\037\377\221\273r\357\226;6\267\370\273\214\025\242\274\367k\024=\361\306\017\274\354\205\036\275\250B\361<\310\303\223\274\267p\\\274\224\327\001\275\023J3=o\332F\275J\326!=\216\310\006=3E@\275W\\\276\274\366}\277\274m_\007\275U\004\327:Y7\270\274\026n\023\2746_>=\222\200\226\274\230sB\275\251\361\256\274\223\014\364\274c\361\001=GK\006\275phZ=\233=\370<4[4<A\217\227\267\235\325\366<\254q\010;d\034\006\275FHA\275\345\324\204<\241K\312\274\017\257\212;\201\260\033\270\236\207Y\274\234\233|\274\337\246\022\275o!T=b,w:\370s\231\274`\027A\272\347\270+<7\267\270\273(@(\275{\324\r=\247je=\232\365\002=Y*\300\274A\260\306\273@@\256<\245}><\245\357\202\274\276\314\037=t\212\246\274\240\2222=+f\026=\000$\264<\251\301\022=\301Ip\274Q\304\264\274?\340\004\2742\217\363<\347vQ\273\337\2742\274\020\274F<Cj-\274?\210\252<O\321#;7[\271\274\3400P=\324f\001\275\305k\031\275\203\'l<\215G\270\274\300\031\242<V\360|\274\034x\320:\217\206L\274\020\364\027\274\267?\010\275\225\2344\272\301\251\247:\313\323\352\274\245\"\207\2731F\225<\r\361\207\274\375,\311\272w\3735=\325#\t<l\022\202;\351\247\004<\024$\363\274O\222\242\275\274\211\256<\246\372\340;\3765\030\274_\224e\274\314TO<-\234\252=\344\310_<\003\257\333<\202m\177:\t\240\001=p\205\347;J&\013=\202O\230=\241\363G\272\216\217\010\275\333\273\"\274s\323\022\275\245)\021\274\370\344\307;f\325]=\rK\024=\355X\227<T\265\034=\t\240\207<\220[\204=\234uN;5\256_\275\314\331L\275\023\321\007\275\274\305\376\274\255\215\006<\2017\032\275P\310\t=S\353q\274\360f\036\274\226\317\256<\370\343+\275\006>\033<d\337)\275;~\t;\330\305\243<&\305C\273\016\332\322;\206z\025=\007\007\340\273\351`\201\275\265O\007\275\376v\306\274&\034M=J\330.\274\253\001\202<\377\005\027\274\021M\032\274\273\260`<\331f\277\274\351\033\272\274f\322[;\005q<\275_)>=\274\266\016\275\343\327\374\274}\370m=\253\333\332\274?50\275\026\253\206=\324P\360\273\353u\233<\300\207o\274\312\013A\273\320\260\n\275/\302\226=*\265A\273(\302\341\274\217\246\004\274\030\007L\274\233\275\014\275)\364;\274\r\376\003=$`\024=\203*O\273\362\001C\275So}\275L\002\035\274\021\336\311\274\243\243B\275\213)7\274\0364\220<G\316\302<\265\273\371<\306=\214\274*\276\203<\3413w=-\351\r<`\315f=;+f<je\003=\035M>\274\035\320\021\275\027b;\271x\345k\275\324\241\026=\317\020r=P\324=\275@\004\200\274\215\002C<\034Lc\275E\336\323\274\236P:\275C\343s\274r@\267\274a6 \275\343\312\257;\306\275)<\247\212\210\274\332`\232;\270l\240;\022d\037<\212\344A\275\213\232\014\275\363\026\035\274\026;\221<\007M:\275IL\225\274\377\0160\275\206\3260\275\023\371\216<v\214\254\275\2029\240;\366\326\014<\236B<\275\211}\033\275\276\220\313;z\n_=\000Dc\273~\260\253<W\367\021\275\0037{\274\377\021\244\273a\007\372\274c\355x\274n\352a\274;\203\331;\317}\027\275`\327\346\274r\243f;\017_\215\274\216c\333\274\202\273A\273\024\236\021=>`\252\274U\352V<\372)\230<\275\304\024=D\231\310\274{\212\004<2\371)\274\"qu=\325\266\266=Q\375\327;\350\305\244=0s\227:\204H0<\207zB<\010\237\332<\356\266\022;\005C\000\274`\366i:\201q\310\274\233\2518=\216\r\275<\0348\271<,\306\326:\235\241\373<\255\006\311\273\245R\374:`\307\020\274\254\277l\274\225\274_\275o\010\214\274(\222\014\275~I\222<M\2350=#\314x\274\236x\265<\177n\320<>\035<\275\263\337=\274h;\223\274\363\307<=\273!c=\004\244\342<\344,\240\274?\263p;cF\272<\210\233a<\372\376S\274w\340\003\275\265<\315;\202\353\350<\250\267@=\005\331\351\273\313\347\361\272\305\274<\274\224\312Q\274( \274\026\255\212\274\231\005\374<h\267\352\274\231S\007\275\334L\325;\030%%\274[\203\200\274)\000\004\275\201?\231\274\000@\202;\214\277\230<&,\264<\252\371\231<\001\024\006\274\214\200\267<;R\342\272\301$\344<2\340\352\274\022\211\203<\243\375\\;\"U\321\273\314\205J\275z\223\365\273fvP\2751\332&=\262\212$<@\344\270<u\235\230:\001\200B;]\370\r\275\207y\364;@\255\333\273\347\223\026;\346\202\265\274\353\010\217<\250\205\227\274;\3060<\227\236\005<|\355)\273\341\310\007\274\n\307\335<\344\n\370\274\306\226g\274\375I#=_\356\224=v\252\277;\223#\310\274\251U\317;\252ui\275H\300-\275\332\241\253\274\377xI=\245\212\033\275\377\332.;\351\025#\274\322xd< \356\312\274\274n\227\273\356-\r=V\274\310\274\002\236\350<\334\007\253\273w\242,<{\374b<\tR\322\273\225t\003<\236\305\007\275\210-S\272\236\367\267<_\337\263\274\215\324\213\274\023\334\325\273\262\267,;\252\320.\274\207\260B<wI\250;\n\230\201\274W \237\274\352\"\004=!\241\304\272\253H\201\275\332!r\274\270\365\n<\376\002|=\371\324D\275\'\307w\275\260\366B\275\006\000e\273V\270\201=\232\021\324<\226\304\233;;\206\362<\350\351\027=\2308\031<o\2512<\217`\017\274p\242\014\275\2212W=9>r\274\230\336\235\274\364Z\216\2739d8\274\0249\247\274\372on\2747[1\275}\335\024\275a\302\303\2735]\323\274\350w\263<[Hv=\035\325M\273JV@<>\235\222\273\301[\374;\341\321\240\274B6,\275\266\\j\274\354\036\353\274h\216H;\212\363/\273\313\202==Mh\330;\336\221\315\274\254\201\220\275\3519\345<5YB\274\017#]\274\262C\233\273\020\236A\274w1\007<E\200\260\273\267\351<\275\321\210\302<NL\242\273\020\213U<\374\240\232\274\026\344\337;\205\273f\273\027!\245\275\356es<\362\305\033\274\022\004\030<C\325\010\274\356\300~\274\265\002\227\275\363\263&\273\022W;\274\311\265N=\r\375V\275\331\243\215\273(\010z\274\227G3\275\246\240\033\274\335\020[\274Y\014\321:\254c\370<\365\033\211\274r\000\010\274\236M6\274\216\2022=\216a,\274\343\274\267\274\33454\275.!.\274\220\024X\274\036\374\262\274\\\025\227<\3024;<\312\300\006=T>\224<\333( \274\330M0\274z\322\026<(\353A\274\"\005@\272\324\242\312;;pA\275\242N\032\275w\223\270;\314\005\020=r7\342;\247r@\275l\241\375\273q\254\365<tx\002=s\272\221=\370\005\033\274\003\021\250\275\276\247H;4\233\324\274\377_*\274T@\013\275F\257\325\273\256\251\342<\312d{<\373\271B\273\322\242\330<\rG+\274Ver\273\033\177\243;\031]i<\266\227\255<\207\263\t<\210\301\250\275A6y\274\223_\226\272nV0=\223\254\376\274\244\253\"\275\354\356\333;\320\004\325\274\371\024\025\2736UV\275\274g\241\274\n \025\274r\252\246\274\257\006Q<\032!5<N\\\033;\251 \344\274\032a =\344+s\274w\364\262\274\222[\t\275B\334\354\273\223kT=\320$\t\274\366\331c<\223\276\376\274\315\301D; P\234\274l\361/\274~\325\005<^\365\013\2745\335\324<\211\026\017\274\265z\212\273h\2333=\226N\351\272\254\246\342\2728\257\360;\001GT<#W\346\273\236\006\311<\016\025B\274V\317J<PH\240\273\257\253\017=\365\354\001\274\336E \272Z\302\347;l\330`\275\357\323\256\274\244\262\032=\202`\227<f%\357\274\312\263\256\273b\331\'=\240\014~\274\376XK\275\276\344\303\274\257\263\006<\336\177\256\274\374\003]\275\0222\200\274\377:\026\275\273\314\227\274\226l \275\017*\321;`\236\034=\357W\342<\301\333!\274!\342l=\320\214\264;\313\307\267\274Zy\274<K\236P\275\357\014\345\273\254o\032=\355\310\375\273\031\275\227<o\3410\274\265\2264<\27204\274\205l>\275\010\025\216\274\006`{<*M\344\274\250\302t<m\244`<!\201\206\274\314Z\210\275\277\030[<AK\327\274\3252g\274\353\215\267\274*>\314\274\362\237H\274\t#\035=ih\261<\003\240\004=\256\342\026\273\375\036B\274\016\006\033\275g\265\272\275\324)4=\376\035\305<k\013\004\275P\353\351<B\330\251\274\262\237@<@\276\"\275\235\003r<4\252\2029\221\264@\2755\246\223\274\305F\303\274 9\033\272\341\356&<\233\312d=\031\372\334\273\352~y\275\023\310\251\274\230*\347<5\201\004\274\2559\251<\216!S\274\363`\302\274H\007\333\273\273c\375<\357\303\001\275\302\314\265:\261\033\307<?\'\265<=%l\273n[[\274h5\253\275\254P\264\274jR>:\307\265\222<\274\036\226\275H\321\255<K\360\271\274\215\025\216\275M\3321\275\026k\'==b\223<\022_\257<\344\'>\275O\311\037\2750\214\"=\304MD\272d\035*;\026\036y=y\304\001<x\3357\275u\366,<\367\316\254;\3076\214;\270\005&=\252\267\375<\255u\270\273dJ\025\275[\"\213<\037k\341<\335\241\306\274p\346\037;1\272-=n\346\020;*\352K<.\3010<@Mh\274c\024\202;)\"(<QC\031\274\262|\320<\341\315\226<\'\263\350\272t\214\205;o\007\234\275A\343\232<\277\346-:7!\207<M!M<=L\232\272\310O2=\030y\260\274\316\355\243\273\276\016\314<\311Y\246:R\026+\275\335zt\274\022@J=]AY\273\350\226\202<*\030\220\274\215\310\365\274\372(\241<3\364\355;\214\247\254<\003S\312\274\377^\246;\334\241\316\273\002\000\220</!\215<.r\345\273\242\371\311<\344~C=\317\351\273<N\252\205<gM8\274%\233\262\274 I\022=h\360\032\274\346\344:=D\255\204\274\304\375\014\275-\035#=\341wY=2\260S\275\331|\017\275\271(\337;|\247\264<\202Z(\275)\261\300\274&H\004\275\377K\'<\n\276\255\273\332\036\r:\t\316\203=\004\316\363\274\244!\253;\214\036(<\321\306k\275\360H\362\274\243\207x<\275I\022\275\252\'\341\274\r\002\215;D\217\257\274\302^a<i\353s=\034\325+\273\216eq\275t/4<b/\344<\215\316\300\273W\270\035=\255\3518\274G\033\244;,3\t\274\014\316\010=2\027\030\275\213\0214=\374\341\332<\265CY\275\346\304:;LQ\236<\227\032\033=\370\243z<\277(\022=\005\356\367:\237\022\005<\032H\265;_\365z=\035\226%<\272\373\374<\343\350]<\206\202n\274\302\234\017\275\232\350/=m\336H<\345G\023<\356\211\234\273\r\275\025\273\346h\023\275\327\323Z=\263\362\\<\0143E<z\222\230\274\306 $\275\203\351\233\274\277\376]=\36434\275\034\037,:\264\022e=\240(\203\274\332\247\016=\324\265g\275\024\253^<\364 \016=\372\2271<\217\r\022=\235\210\032=\351\221\242\274\301Kl<\375\273\210:\236W\327<\013\266\030=#\211\276\274@\276\261;h\345\370<\206\255(;\333\023\232\274\233\2063<\240\320\343\273eU\245<\315/\346<\363\005X;9\204\215\274\232F\317;n\306V=\005\000\324\273\236\237\252<\010\215\232<\324\204\315<\003\360\212;>v\022\275\221\234\350<-\022\006<Uxi<bXF\275\266\017\024\2756i\265\274P\235\212\275\343\263\n\275i\360J\274#\203\243<kq\305<x\030\r\275\322/\357:\331O\345;0\306\014=\r\273\335<#\237\216\274a\0353\275\017\344<\272\250\374\003<Rr\337\274\021BJ<\232i\207<\364\341\242<\320\251\r\275g\227\330\274\266cm;zi\310\274=1\231;\230\263\324\2740&/\272\227\030\245;\220\364&=\213\344\016=ly+\273u\242>=\314.\235<\2259\016<\211h\302\275\3748Y<\233\256\001=*\217\201\274\205\230\245<B\345V\275\240\017\257<\265FJ\274\344P#<\004H&=-\010Z<%A(\275\375~\323\273p3V\275+E\020\274\367\334q\274\276\236,\274e\210\263\274\310\367p\274\335D\333\274\254\261\234\274\336\254\232\274\02186\275\210\205\024=\021\355\344\274c\016;<Ba\034\274\362\321\007=\2304\242\274p\374\236\274\225\242\377\272\264\312C\274\036n\231;\345\001\351\274\331\3050=_\337\221\273\360\323\217\274\213\362\230\273\214k\032\274\302:K\272\267\314\210<#\030\022;\365\264\'\274\314)\327<\2763<\273\330O\026\275\031\033\363\274\333\252}<E\326\346<W\030\342<\'1\273\274\354W\346<e \010=C9\037\275\355\020\242\274\323\002.<\235\313\277\274\010\310\017\275\320N\201<\017\034\224\273/R\300:T9!\275\237{\023=\362\361D<\265\026%\273N\236\002\275^\t\"\275\347\224~\274a\374\256\273Z\013\013<\370\350\246;\221\242\354;\332\3218\274\253T\205<\177\337\300\273\002\263\260\274\240\0317\275\1770\024\275y\305\304\273d\307\305\274M\347\025\275\334W\027<\260\300\273\274\312\007\r\275\355\023;<\316qU:\315\347\017=z\226\2629$\214\004=\355\211\254<}e\227\274\362D|\274z\376e<R\245\t\275\035\257\251=\205\rh<\334\212Z=W\n\310\274t\266@;.:\236\273\377\224\370;T\220\357<_\3267\275y\323\202=\034\304:9\374~\377;\357z(\274\014\017L\272\265}\016=\244;\322:l\347\246\274M\315\256\275\213\247\342;\036\307\232\274G\347\010\275f4\204\274F\214\357\274\272b\227\274\3477O<\371\352\241<ni\013=\312\3041\274\372k\013=pU\013\275qh{<~\352A<\031\276\200\274\213\206\221\274N\'g=\212\256F\270\365\264\204\273\313/\300;\365\264\312\274\351\017\365\274s\355\343<2\325\241=B\013\n\273\027\031@\275\250\357\332;\315o\006\275\356\3703=\"\377\363\274;#\346<|\"H\275\267k\005=\216\005N;b\244\273<\030\347\t=\303\331\372\273!\2671\275\214\032\207;\315\322\013<\252\323e\274_\030\205:2\365*\274K\332\036\273K\326\007\275@\333\332\27400r<\331~\006=\234\224f<\224p-<\350e\344;\r\265$<U\204P\274\215\034z;\372\353\300;\342\273O\274\244\361K\275\371\024:\275\314\312\203\274a\223\025\275\222\351\340\2732\376\034=a\346\037\274\203\213M;\275\234\327<\325\353\326\274\200\376\026=\257?\356\273\265\222\225\2754I\000=\225o\366\274\320\310\232\274xg\3216\005\304\366\274OC\036=\353\022\204\273\330\261j\274\'4q\275J\220%\273z\034\243;(\023P\274\374\357\013<\336\037\311\273\304\274\205<\2736L\275\324\315\021<?\346\217<\031s[<\237}]\274K\240O\275\214\265O<\032\365<\272\223\372B\274\226\357}<\336\223$<P\371J<\361$\334<\274\335\221;\351\t4\274\213\200o\275_Z\213:\374\020\236<\252\367\004;\321\377\314\274\315k3\275x\275\036;\013$\226;f\346\336<o\032\030\274|!\236<\235{:\273\300\367\205<\212\217r\274\346\302\204\274\307\304\006\274)\344G\274Lyj\274\024|y\274\237 \035<(\374P\274\357\024\243\274\337)\375\274\367\221v\274\305?\014\275\342S\303\274u\253\265\274\215\333i\274y\352\204=\242$\r=\337I\214=,\246\233\273\357\277\021\275u\t\360\274\251\355A=\377U\263\274\023\275-=\356\020\315\274,\330\200:o1\206<>Lr<6\377\342<\336\320\r\274\2073\037\275\016u\246\274u\244S<2\373\004<y\273\333<Y\025\017\274\031z\000=\021\035)\274\007#\243<[\262==h\273\274\274yI\225\272\036T\010<H\032c\272\244m\223=>\307\003=}<\224<\035\261\340\274\245\326\227\274\231]\326;l9z<\033\022t<\034\004%\274\000w\324<\223\307\344<t\373z<\224\276\230\274Y\373\371\274\334\333\303;\364\345\034<\024\004\':\334N\313<H\257?=l\324\250\2744\362\277\275<>\375<c\246\320<\020q\356<\227yK=O\001\232<\347\265\036\274\005\247`=\007\226O=\304p\024\275\026\342\364\274\271\253E;S\363\312\270\360v\264\2740\365\241\274]\374\303\273dx\020<\364\267U\274M\\\221\275h\217\253\2744\320\354<I\345G\2755\262\316\273 0\246\274\202\260.\272\310\267\354<\254C\227\274M\262\255\274\256\346.<\313\353\'\274\253Q\216;6|\317;\352\002N;\223\300\263<\2624\n<\355Q\2019T\241P\273\210\331\261\272\177\205\320:\3759\014<\342\261\272\274]M-=\204\346O<\306\255\265\274\024\305\272\274\236\251\226;h)\272\2736c\\<\361s\037<\223\036#\274\301\265@\274\301\210w\274\220\224\233\2733i\276=E*Z<]\236Q<\217|\252\274\370i\244\273=\255\362<`\331\250\272\371\246$\274\025\3147<B\201\322;\017\346^<\240\225\260\274\315\035\334\274!\2376\273!\330\275\274\344R\004<\302c\320;\006\224\022=~\365\r\2757N:=\245x\036\274\\\272\203\275\031\227\017=\027\204\023\275\215\250\354;\347\370+\275\306\032\332\273JR\005<\266\320\225<\300[\305\273#o\276\271gx\363\274\321\257\r\275\202\205\360\27437\004\275~\023\311\274\235\310\211\273\262\000\242<@F\317\274\226\367\363;F[\035=\324\331\230<\224!\022<\304\346\n\275\273\031\217\274$\254\354\273\321\2065\275f\2508=\236\031\370;0&\022\275fpb\274\345\211K<\234\035\230<\037 \034<o)\237\274\037\345\314\274\346\254\300\274@\010\204\274\003 $<=~\027\274\342\236\376<=d\006\271\340\030\002=7f@;\345`\r\274I\252E\275\226\231\032\274\004b\006\275&{A=\017\257H=K6\303\274I\221 =b8\025\275A\216\235;\230\216\226\274\267\360\333;W\272\244\274-\210\366\2749\330n\274pN\312=\304\325\357\274\007P\211\274\247$\312<\346Q\233=\305\017\036\275\000}\030\275\334~5=\352h\316;\006\007v:\027\363\223\272\317]:\275\324\244\330\273F\211\331\273,\321\"=4\354e\274\363+\013\275\000\363Y<\017\037\355\274%\356\001<\207\247\270\274`\356\r\275{\3510<\315!\227\274\247\324\020=\207\254J=\276\241f\272lX\251:\036\342R<\367\234#\275\345\347\304;\267^\335;\251xp\275}F\206;\225\325\023;~\026\224\275q\361\314<\336\305w:\246\323\032\274\337\372@\275\013q\016<\367L\001\275\254\214\323<|\215>\273Ol\345<qm\017<\370}h\274\267\342{<\244E\004< W\316<~A\363\274\255$w=\360~3\275\3276\323\273\030\364i=t\233\231=\2220\270\273\r\202\240\274o7^;\323\025\004\275NT\312\273\360[\266\274\253<\230<p\306m=z\331\261\274:2\277<\202\2130<G\364\261\274:\005\307\274\374\\\224\274%B]\274\307\324\236<d0s\274x!\265\274\307\207T\274\353\235c=*\336\r\273\276I\276<\205y\316\274p\364\232\274k$P;L\304\206\273\031\261p\274\026?\242\274\264s\301\274\007\203U\274\221\246\225\266=}\037;C\200\215\273\211ja\273\0023\244<\205\030$=|\0161\275Ik\335\274Kf\351<1\302\253;a\367p\274j\332\335:\204[\355<\267\267\207<\357\251(\274\347ZT;\n\334|;jv8=\220D\007=\305\267e<&Nt<\"\242\224<\356\245\372\274\200\307,<%\007z\274\237\361<\274S\337\323;aK\315=\244\234D<\034\264\276\274\224\276\342<\252\303\255<&\216s\275\007t\351\274\246\'h\273\026\200\321\274\310\276\211\274\204\227\205\274\307z;\273f\324\324\274A[\252\272\035\355\335;f\325\232<)\302\000\275\360\035\313\274\221\236v=37)\272\301g\267\274\344*6\2757\370\016\274\350\203/=\265\326\014\275\347\372\211<33\202\274\026B-\274%M\024\275\333\217\240\274\312\007\316\273\240\350\034=\343\335&;\247\366\244\274\357\316\367\274\035\351\341\274 W[\275\333\300\277;\237\327,=\363$\211\275G\352o;\212\275\242\274\035-\305\274\027@\210<+x\376\274\270\r\010\274\226_\263;9\374\310\274\240H\250\274d\377\233\273\206\211\016=)\032?\273\312b\031=\221\314\033<\353X\371\273Q\312,\27384\357\274\351\024\236<m\010\211:\223\332\r\274\306\246\374<\0032\367;\261\375\242<\2317\007<\r\017\356\273f\307\033<\3759c\274\207\206\303\271\n\366\023\273\305N\033\271\235\231\374<\007Pv<P\324\301<\245v\267\274xsv<b\311\322\274\240\312&;\211\225\245<\230\305\267\274G\364\235<\306b\022\274\311\273\t\275s\365\t<\241\336]\275\316\261@\273ht\267\274\365\026G<\313\325F\275\262\334\376;C\220,<}N<\275\227\351\372\274\207OU=G\004\225\274%\013\204\273r\001Q<\264Z!\275\210\rO\274\013\220\t;T\221\323\274\035C\241\274\234\336\001\275p\t]\274\270\030\362\273!\270}\275^E\312\274\231q =7\201D\275\rma\274\370\376v\274T4\326\274S\263\235\275I\2557\275%\345m\274(\342\324\274n\277\0058\343|\335\272\005\246\361\274\326.$<V\273\260<|\020\333\274&\221\221\272\351<>\275\303W\200\274\257j\003\275\226\030\204;h\211\202=\026,s\273\033\031\377\271\256\307\t9\313\324\232<\010\033\211\273\333\223\221<\216\264\017\273\337f&\274\311\007\307<+\177.\275\354\3665\274(fO\275a\233\217\27523\021\275\247<\\\274\034\276\362;\255\261J\275\002[\321<\220\020P\275\035Y<\275\351\271\337\272%\2731=b\177^\274\232_T<N4\223=\204\333@\272\336b\317<+\370d<\031Gv=\346@\302\274V\350[=;\346\236\274\344nR\274\320\273\"\275+(\371;\353\3471\274y\217\241\2735}\342\274>#\327\274P\3732<|\025\201;\r\\\016\275\321\332\244<\013\263\266<x\r\261=\t$(\272\377\377\226\274\240\241r\275tO@\274\275\007\257\273\336\2133=8@\207\273/^%<D\027\264\270^\303\201<\2716\024\274S\035\261;\nF!;%\212\236\273\331K\020\275\306?G\275\275\254\363;\310p\225<&\235~\275\017Q\356\273O\213&\274D \014\273\t\275\266<\327\233\327\273\352~\272\272g\026\202<\202\265Z\274\343\351\n\275\001\366~<\004\330\002\274\357Q\261<\"\274\206\274Y\325\305\273\000\365d\275G\361\376\273\224\2573< \020S<\206\201\270<\300\023\030=\217\233\353;pr\026<k (\274\276r\200\274&_\206\275\026\206\\=s}C\274.\214\323\273\300\345r\275cPQ\275LA\022\272\026\372\214\274\220W\023\2749\001c==j3\273\234\226#\274\017\373e<\023\355\'=\243\240\373;q]\234\274K\0239\275|\240\002\273\342\2127\275r\345\243<\027C\216\2739\236\332<\005P\221\274\245\001\246\274f(\335;\314s\231\274\247\002\035\275\3068\035\275y%L\274u\265}\273R\355F\274\037ex\274\242K$\275\346\312\1779\003\300\2149]\246\210\275\022\022a\273w\317\206:\310_\334\272\357\322\213=,\341i<\"\366\277\274\200\335\343\274X\2345=o\036|\274\tL\231\274\2672\236\274L\251D\275\224\354m\274\273R&<eZ\373\274bj*\273\265Z\201\274p1\254<DQ\342\274 \350\023=\3258<=\232\232\212\274\207u*\274\325\312\251<\'\263B\275z\361\3227\354RH<\200\316\255\272\317Z\024\275;D\254<\274\031\261\274I\230\021=\240\252\353\274\313\343#\274z\300\207<\036[A<V\230\366;\204\014S\274\013\226\024<\014\347t=TF\257<\307u\214=\377\242\373;\003\326\217\272\227\324\365\273\013u\204;U\177\261\273@\207\212<]\232\337\274\210\013(\275\313?$\274J\025]<\014\372\247\274\365\037p\275\224\277\204<\027.F\274\261\300\215\272\022\336\024=\213\024\254\274;>\214\2731\307\341\274\330\334Q\273\n\226\317\274{_\036\274\375\237\021\274\372\244.\274,\267\006<\255\207\003=\301f3<\372\220#=\001\230\006=\376\261\347\2748m(=\337\223\251<\201_\302<\351UU;\037\223\257<~\200\000\275\007\363=<\353\232\325;\034=\372\274\263\346\227\273\323\022;=\227\003\273\274ilQ<\245\310\246\273\272\372\224\272\022M\276\274\n\247\222=\t0\005\274\0030\033=\331\222\341<\034H\217\275}2\340\2740\3400<6\223O<si*\275\343\032\"\273\256\374!:g\221\236<\207\322\257;\233\225\031\275\274\234\201\273I\017\321<\235\251\250;O\033\215;o\265^\274\275\253\255\274\272\240S\274\356h\301<\325G\203\274\302\223\271\274\177\2735\273\371\207\202\274lT\351<c\204\234;\027\276?\275\370\324\262;\367-,\275U\305\270\274Q\373\211<\321\247\275<%\227f<t]\241<\003g&\275\263\307\000\276f+\253\274\"l\262=\350\342\r\275\255\376Z<\301\034\232\275\017&\226\274\320\342};\255&\357<\260\'\251\274_x\274;\326$l=\315\027:<\256\347\032=\335>*=`\363\263<\221Ao\275\263\371z=\343A\227\273\223\027\253<O\204>\274$\316l\274\303\"\272<W\244\370;\371\007\363\274\005\216\214<\265\213I<^\252R\274\330\250\001<8\210\241=\274\256\027\273\034E\000\275\236\275w\2743\255\036\273d?\203\2745\273:\2748m\255:\354I\273<Ga\020\275\023z\221<MI\272<\206\004\262<\010;\257<\240\301\006\274Vb\n=\032.\330<\234\026\333\274\025\033X<:\204\037<\211y\016<{\306\323\272,\335\263\274-\013\023\275%\355\326<2\276M<\215Cl;\231\232T\273\005\n\261;\251P&\275\037\203 \274u\227v\274\244\333\250\274\265\300\220\2742/\211<\330\024\022\275\362\245\273\273\303\"Y<\336\260\246;\215 \360\273\204\270\246\274\243\214\357\274\243\335\346\2724\330\225=\354\032\370<\\\322_\273\330\3066=1\226\316\273Y8?;2\342\364;\3651\311\274\364\305\260\274i~\032;\234\312\331<\361\022\226\273\260\225\365\273y\217\000\274\014\007\233\274\007\276^<J\033\273\273\274\311\307\274\334W\236\274\304\225\207<\221\031#\275\211Pk;x\0355<\312w\003=\035(\253\273\364\243\266<B\223\014\274\277\010\234;\213\034\241\274\2358\325;\205\327C\274P\245P\275T\203\026\273\177\255\212<\314\263\214\274\200\003%;\375\354)=\336[\234\274\264\261\024\273\035\366\007;e\361\341\274w\330[:7V\257\2746\303\204\273I/\260\275A)\227\274\234Q\"=\313p\233\274o_j\273\336<\313<\353\250\032\275\304\224\235\274\032\270\322\274\2671g\274\2217\235\274\3349T<\300\272,=\315\217\304<v\332s\275\316\\\000\2749gI<\206\352\210;3M5;_\\\003<\252t\010\275\366A\307\274\303\211d;D\257\357<D\264\253<\201|\020\275wr$\271\2111\262\271\273\252<=\033O\024\2735\372\203<\302\020\264=\206\340}9p-\222<U\372\257\274G\177\260\274\257\304\236\274P\356\031\274n\320\251\273\306\302h<\204Z\254\273\331\330\344;\343\224\304<\356\307\004\274\021\253\333<\343\032^<\377Z;<\244?\277<r#\003\275\313\220\205\275\216\214\273;Ht\340\273\004\301\311\273RV\346<4\321%\275\274\222\334<\267o\004\275XM\025\274\214\370\244<\026)\027\275\276\017\204<\353\026\227\274\255d\003\275\034CR\275r\225/\274]\t\\\273{\315\351\274\205\270\347<\236\253\220<\021\273\375;*uf=\020\335\2579\310\034\023<=nF<\374\316\257\274\334H\321\274,~\022\274\202\277\310:S\335\330<\357\310\302\274 \347\225<\206\034\272<\370!\316\274\345\330\033<\300\377O\275\334\n\255\274\307\226a\274\354\023K<\276\362o;V\206\026\275\222\032\246</\344A<\275H|:\305ir<\210\2564=\331\375\000\275\331`<<\307-\245\274Cq\014\275\306\302\005\275S\211\244\274\022\350J;\370\n\340<\030u[<\320\353\326\274\224\267\270;\241o\203<\246\351l;\231Q\331\274\220Zn;\031A@\274a[\245;\215r\333<[\270\025\275Cr\313<D\265\200\274=\316R<\314Jk\274!\232C\274\215\354\331<\335u\217;d\312\211\274*+U\275\356\271\327<h\317\311<w\006`;\362`\374\273\210\010\355\274#\244C\274\317\207\025<CV\034\275\275\0055\274z\211Q\274m\270\313\274\250F\303\273\375\304\334<\362\336C\275\224\303\006<\212\262\221<\037!\270<1?\"<(\316\206\274m?O\274\217\252 \273\240\372\266\273\034\221(=\003\3037=\002\255~:\202\241\351\274T\265\207\274]\205i\274\026!\244<\347\360Q<\257\226\306\274J\'7\274\346\301V\274&;\311;\203/=\274 \036#\274\252\231e\274:u\241:\316\352\252\274\301)\201\274\304\022z\274\201\376\010<&j\027=d\353\323<\245L\032;a\331\227<r?_;\215B\330\274\223\264\314\274\276\035\347<mR#=Y\234\232\275\233h6=\225P\202<\342Hn\275\274\357M\275\253\326\370\270\t\256(\273T+\320\273{~-\274\326\010$:f5\363<>A\244\2744\327\254\274\344\276\021\274\317\234k<\320v\236<\237-I<E\225\'=H\006\023=L\313\t\273\347\347\224\273\032\254~<}7\007\275rq\030\2758/\003<\277\256\347<\217\222M\274&)\317<\233x\265<\364\026\253;\206\363\363\274#8\020\275?\005|=le\024\274E\202p\274\352^\223;\r\343\024\273\245\360\n\274p\342\241<\337K\021<\355\356\214=/\361}=I\016\335\274\007$\006\273l\230\364\273\026\273\377\273\251F \275\346\036;<\024#\344\274\037\2009\275\217@\332<I\213\327;\004\367\322<\272\214(=\213\236\355\2741\303\001\275V\314s<\022a:\273e\345\341;^\316`\274i\375m;/X\376<n\276\207\274\037\272e;\032J\005=\002\210\335<(\362\221\274\240\227\021\2732m\353<lZ?=\251Y\376;\013e\022<B\nZ<*\242\340<\304..<\341\026\274<\237\314v\273\345]\212\274\335\210Q\275k T=5%\014<\014\025\310\274?\0231<\311\237\023\274A\373\215<\253q\310<;\033#\275\310\353\342\274\320\270\227\273\336L\316;{\026\345\2746\254*\274u\206\000=\217^\013=\206\004\267<\025\207\260<\234A\007<\177\303}<%\244\240<0\006};FL\370<\014j\250;\374\377\341;\361^<\273m\331\372\274e\255\217;Hh\224<N\247-\274M\327&<K\013?=TK}=0\321\206\274\0341Q=\304\265\253<\354\242\032\275\203\227o\275\344\265\216;t\364\";\231\006\230\272\347\360\'<\250D==\262\356\330\272\t\224\302\274\373U\252<\271\264\333:\020)\036<\207\225\n\275\030Z\250\274\017\302-<\017(\337\274+\333\006\275\362\243\313<>\255\357<8\"\205\274\301\330Q=\002\304\372\274\303\217\342\274B~\365\274\362 \323<\030\237\277\274\231\315\204\274\362\037\235\274\326u\354;\330\332\024\275\226\230\307;\3246\226;\300&\260<{{\246\274\001]$9\250\004\210=\334)^\274t2\177\274\311\3109=[,\235\274\332uL=\371\253\007\274\267\334\261\274\300\372\366;\327W\177<\017\330\216\2733\344:\275V\366\325\273;\361\3208\010\\\014\275\223\365\315<\351m\212<{c\336\275\024\241*\274\214\006H\275\267\035\243\275\330\263\210\274\224\214\351\2748z<\274\024\n\006\274Ua\363\273\342l\013\274J\267\036\274WZ\252<\221@;\272t\243\373:\274\357%\274\006\205\257;\370\254\325<\344\303Q\273j5\336\273\336\243\010\274\261\022\254:\231M~\275\273\260=<\277\030\341;\327\214\202\273\177\257\210\274\222\266\r<\223\021\310\274\226J\'\275\320\360\000\275jEZ=\030V\260;\0045\232\274E\363\005=p<\236<^\202\023<\302\022\007=\032\263 \274\206\231\352<w\014\247\274\247\0252\275A\215\246\2725\311J\274\202m\231\274m&N\275\266\3354\275\344\240\346\274\331\273\233<%\330v\275\240\313f\274\343UL\274\333\357\312\274c\360\207\275Q\340\204;;y\301\273\242\251S<\311`\376;\276\r\240\273lB\271\274?\366m\274\264\010@;\357<\361\273\336\261/\274I\342\234:\2125%\274gk\035\275t,\234\274r\360l\273\210!\257\273\343\250\033\275\205\373\035=\276\326\020\274s3\364<EE2<\357\000\205<\255\036=<\332\222q\274\232\332\205<\354\276\312<*\254[=\302\233\320\273\037\351\220;\017\344\"<\265#\205=\302\014\"\275q\357\306<-\205\325\273|K$;\275\361\225<=\\\332\274(: \275\001\200\203\274hm\335<IG\256<[\261\341<*\206\336\273G\276\212<)\311o\274\\\261\257<\315\305\353\274M3N=\005\316\206<\251V\276\274\237B\033:\312\020a\274\003t\270\273\202\336(\275n\336\223\274\242\313\342<\355\374\361\274z\201\247=\376\250 <\327\362\035\275e\362D\273\325$\014\275O\332\032\274\250\211O\274\251\256H\274\255hy\273\331HS<X\352R=(s\363<3fk<E\330\271<n\013\223\274\352\354\032=\215\361d\274\031o\377\273\247\242\317<\345\325\242\274\313W\351\274M\234\215;%M\251\273\373\353\341\274\351\251\034\275\250R$\274\227\347\357\274g\361^<6\345\320\273\213X6=\221\374b\273%?D\274\261\320\223<q\2614\275\320\311\340\273\360\332P;+/\337\274\355\337\275\274(\341+\275\025\374Y;!\210\266\274\206\207?\275v0%\274?\300\234<\326G1\275\221\261m\274\232\336\315:?\234$\274\026\252\032\275pH\014;;\262f<\'\025\254<\034:\327;\305\250\255<\332.\256;\242-\234;_s5;\215\023\267\274\020x\331\273\251\303\010\274\2200C<\245;\262<\204E\230\274\202\270\020\275o\307\010=\030\355\216\274\227\335 \275:-\001\274S\265\331<\264\362\220\274B\243\254\274\013\247\365;\177*<\272\204\376\251\274\362\250\325\274\266\366\271<\353=\341\273\207\036\300<\343/P\274p\037\267<\264k3\274\325e\'\273\235\020`\2712\3004\273Md\r\274\254K\265<\331LT\274\347\202\224<\320U\235\274\250U-\275YeJ\274\002W\241\274\316\277\033\274 \352\256\274PQ4\275\216\256\203<\230\005D<\222g\362\274M\r\315\273\270\340|;)\035(<30\220\274\253w\021\275M[\335\274\021\261\226;\034\222L\274\224\376\240<\304\222!<p\225F\273\230\017T<r9\253;\tG]<\003l$<3\353\347\274T\226\245<\200\257\322\274GY\030\275\273A\260\274\225\277\037\2732\222I<\274\273\3208\215\337\230\274n;;\274\231f\361\272\207~\027\275\340\346\017\274 7%=\332\357\270\273\2719\213<\227(\302<lD\314<\230\024C\273\037\332\223<\017\307\303:\353\0265\275\236\237\232\273Y8/\274\264\334<\274R\257r=%\3041\275\351\216\352\274\000\3249<\237\226D<\212\346\307;\231\\R\274>\314\003\274\033\340\031\275\2442\361:\360\333P\275r\222Y=/\243\026<l\351\330\2744\272>\274\273\211\265:\302\276\252;\t6\033\275-\000\354<\2225\305\274\300\311\277<U\022\252\274;y\201<\201\373\224\273=Y\017<\332hk\272%Ee<\034\270\202\275\234\351\224\274\307\307\317\2742\203\007\275\007\345\346<\217\006!\274|\266P\274\236\300\253\272\325\020@\274Y\305m<J(\260<\324\264\376<\375?\213<\214\362q<\014\252:\275\367\342\261\274OD\344;e\342#\275\305\244\224<mA\023;\017\275\316<\177n[<\016\260$<b[\357\273\305}\207;\352\213\366\274?\322\211\271(\375i\273\261\n\302\274\342\226\277\274\357\245\r\275\2745\267;K\363\343\272\260PC\275H\327;\274\363\253\256\273\262[2=\253\240~=\342\202\252\273\221\262X\275a\215\325<\2431\225\274\364{\022=kT\270\274\315\201\223\274\014\005\233:\001\177\024:F\336c\275\352\261\220<\336k\211\273\207s\250\274\201\275*;/\303\253\274\236%\376\273#D\223\273\220\245a\275\037\031\014\273u\021\233\274\341^\221<\004z\014:\007\026\032\274\225Y\257\273\200@l;\230?\013\275\201\337G\275\313\253\310<N|9<\213:t\274,\355X<\325\311\306<\3666\237\274\231g\325\274\224\340\255<\004\354\201\274\326BT\27537\030\273\"\242)\275?_\000\274L\341\256\274\251\263\020\274\247\3443\275\036\255\362\273\272\244\227\274{`\255\274\002\2336\274\270;\325\273B+\305;y\352[\274\235r\177\2742\027^=\262p\356\274\340\262\221\274z\302\016<\346\342n<\365\236\227\274\357\341\223<\266e\022\274\316t\311\274\231\005\254:\352\355\035<\3669\342\2749\017\001<N\313E\271C\230:\275\'%\321\274\370\241\352<\240v\371;\214\037\t\272\256\200\247\272\372P\232<\200\004\201\273f\333\200\275\337d\037=Fe\222\2745\003\264<*v\032\273\254\275\224\274\236\3265\275>\213N\275\317\027\212\275l=M=VL\216\274\354\321\245<\215p\370\274\267VX<,g\271\274y*\203\275\243\245\335;\271\303\210\275\263Z\321\274\241\377\261\272s\275\351\274\361r\250<\304s\340<E\257 <py\203<T\255\227\274\261<\345<a\027>\274\300\177\304\274\247\355l=S7f\274\"~R\274\217\227\224<\253\207a<,\327\251<\221\335\'\273tp>\275\226\270\024\274q\364\371\274\202\210\212<\230:\364<W\002\273<\332\033#\275\243\270\264<\262q/\275\346\342U\275\005\3101<\212\227\245<\315\367\233\275&\321m<I\247\263\274\310\370\233<\237b\'\275\240+\022<\214\344\'\275\330C\364\274S\260\004\274`,\373\274\177\354\374\273\200f\340\274Ma4\275Y^t:\377L\322;\236\000\020\275y\\\002=\005\330,\274\303C\377<\233\324J\274t\306\230\274\264\321\351\274\260$\007=7@9\274\362\300\265:\310\020V\272\211\302\001=Y|\233;\302/!=\247\312\225\275<l?\275\000w\345\273l\316;\273\256\254\212\275R\177m<}\224\362<v\216\"\275~\037\217\274Q\260\377;\225\tA<4fs\274\035Et\274\234h\203\274\206\253\022=\335\337n<`\225k\274\207\014\222=\2174\301\273\334\014\202\274\277\252\306\274}\277\326<\234\377\214<\202\000d<\350\303\357\273\353\037\244\272\335\245\037;\213\254\204;\016\352o<_\331\035\275-\2115\274\006\275_=\024^\370;e\236`=\315\221\013=\001o\033\273\322\025\245<\356Z\363\273\312Q9\274\002az<\375\375\014\275\234\210\321\274z\213\235<\215\202\274\275\334,\003<\211\336&<0\267\324<\221\n\351\272\205\250\334\272\353R\024\275SB\235\274\364\237\024\274[C\357<E\217\206;y\232P\274Q\340\005\274\310 #=\3656\262;%\017\010\275=\2744\275\246l\241\273\377\213\014\274%D\002;?\002\264\274P\035t\273\276\202\033;\225\370!\275\200\321\213\274\364\030\013=kS\264;WF@\274\036\035\306<\302c\021=\252s\026=\036\310\310\273\355\367\301\274#\2473=\353\243\360<\376\353\253<\264Nr\274\036\275\276\272\002\314\274\274\364\300\010=\000\206\2279\036r\265<(_\326<F\271\326<W\243f;\365\246\374\272t\244\377\274Z\313\275<\273\235\024\274$\372o<\373|\224\273R\304\204\274\255)\250<\233;\001:\031\t\223\274 h0\274(&\352\274\224aQ\274\n\"K\275>\2746<9O\356\273;j!\275\331\334\034<_:%;7V\'\2754\\\032<a\307\026=0\267\236\274\371\177W\274\221\306\r\274;\007\376;\237\336\251<\300\024\256;\214\027s\275\017\374\021;\310\367\340<\203{\203\275\023\237\207<\231\005Z\273\257\344.<!\247R<L\246\365\274\030o\216\274\330\225\213\274\2423G<h\022\335\274\252\220\300\272\301\321p<e0\206<\262*\317\274O\021\204\275\353\347\205\274L\'\227<C\275p;\221\n\036\275z\361d\274>\n\302\274\231\016\"=h\317\327<0Yr:\031_$=\024\270\307\273@&\275\273\330\032g=?\220\227\274\277\261{\274K\235\212=\261I\311\2742\270\266<.U\036\275\233\237\213\274L}\330;\017\363\230<\037F\200\274\33057=\374\026C\274<\003\002\273\334\260r=\257\032\204\275\033\177\230<\"\244\004\275\232\230/\274\207\372S=\336z\362\273\253(\235\274\303\234s=\274\'M<\340V;<\232\212J=_\006\322\274\332V\272\273\277\215\354;\245f\265=\322;\"\274j\014\001=\n\276\353:o(<=\235\000\022\273t5x:g\215\310;E\226{<\"\010\240:g\360\316\273|\265\251\274\346Y\033\275I\225\347:Y\235I<\221\023T\274\261K\204:\2422\241\275\275\001\001\274$\033\026\274\373\010\237\274\037\370\253<\312\004\245<0.\376;\302\334\266\274\342\244\013\275!+ =#\327\252\274\366\010\251<\275\314\254\274\255\360\334\274\'\0147\275J?,\274\375Y\023;8\353\350\274{\203&\274@Y;<J\334\341\273\223M\033:\251\260\202<}c\033=\232b\311\274\004\343C=&\227\001;=\374[<Oj\250\273\010Z\r=\331\325\246<\260\000N\275)\322\306<\320\371\032<w\371\320<\264\237\214\275\313\263\345\272Z\016\207<*v\234<q\233\240\274pa\254\273l\004\204\275\3026\251\274\264\205\000\275\226\375\344<X\367\344\274\351\342\256\274\201t}\274\310A\247\274\254\301c\275Z\313#\275\246\362\226=\226y_\273b\025l<E\t\253:\'\325*\275\306u\225\273gg\026\274\213W\"=\276+\275\274wd\234<<\372\226\273K1\260<\001\035\355\274)I\226=\266<.\274\2302\013;\270Y(<.L\242=\274\010\330\270L7t\273\335\246\'=}\315$=e\035D\275\310\010\266\273\000\243\027\274#3\267\274\206\244\260<\356\326\245\274\316\215\n=\330\334\365\273[Q\214\273\200w9\273\360\2458<\021\251\035=cg\n\272s;\235<\321\213\005\273\213\223\261<\226T\317\274\341e\376\272\305\020\010\274Ei\277\274\361\304\276:\332\310\216\275\210\376S\274=\366\n=\2542D\274\211\321\007\2741K=;\343\343\304\274\310}\200<\320\020\"\273)\342 \275\312\367\241\2757\210\240<\377\360\201<\013\245\237\273zR\252\274\2665\335;8\273\373\273\325j)<\331\020D<\364\025\200\274\206-\271<\305\301\250<gj\324;f\312%=q9`\273\035u\021\274$\276s<%\032@\275\272C\010=`\014}<\002\366\317\274!L\225\274\331\241\034\275\352\201\020=\005\241\025\275\004\233\000\274\222_\371\274$N\222\273\0368C=\246\034\336<\254\315i;qB\n<\ty,=\236^\204\274\266\214&\274\333\231\333\275<\257\202<\001\235\254\274\251\314s\275\r\330\371\274n\277x;O\026\020<\314\377\213:M\363\334:\2278J<\250\347\017\275Q8\277\274\n\266\262\274e\251\363<\313\275\035\274^\020 ;\320\301\246\274\ty\017<\025\177\247\2747\305g<\301C\305<\016\360[\2758\210\202\274O\303\350\272\014.\201<\261h\204<\326\010/\275V)]\272\2240\035\2755P\266<\261P%\275\t\210\271;d\332\224\275\317\271\034=\212\304\310\273W\221F\272\363\245\316<~\230\331\273\002\372v\275\2557&<\266\324\0309\217+\204;\263P\315<\305\2777=m\2666\274\217Z\037\275\255\310\270\274B\325E\274GL%=\016dB\275\212wt\273\324\\\n\275\202\'\312<\370\026=\275\251\t\341\273\303\024\231\274\303\243:;r\200\357\274\200\246`\274!8!=\377\215\355\274r\241*\275\017\304\374<\270\376\262\273\032\313_\275\336\314?=\374\253<;iH\010=W;.<\306\234\016\275S\354\254<\242\375\003\275.\245F\274\322\311\354;IXe;WK\200<\177\376\216\271\037\020\'\275\227\300\352\274\370g\227\274\310\025\222<\r%\224\274\236\020\010\2752\025V\273at\267\273l\"\206\275\210\266\353\274e\216-\275\004\302\253\275\256\370\203<\331\367\210\274\032Y\271\272\016\362\374;\364m;\275[W\366\274\346\304\332:DJ.=bW\n=\235\232\372\273\025O\330<M?\026\275\353l\2678\255G\212\275_=\213\274\036\t\256;\356\214\177\275\375.\210<\344]9<\304{\366<\315 \236\274\277\336f<\235<\244\274\035N\r=4\365>\275D\225\352\273\366,P\275\261\013\317\273\302L\361<hP\026;\367p\r\274\030.\025\274oz\217;E\024\303\274&\314\272\274\301\271\270\274oT\226\273\261\356\263\273e4\226\274WoQ=zuc<\325\007\363<GCS<g\303\226\274\313\217\037\275\310\243\300<\263[D\274\2049\215;fh\217\274\310\215\251\273\301\221\267<\217\374p\274\343\254\325:\344^\364<\247\226\'\275\247\347\354\274(\244.\274\226\266/\273\237\321\325<\213%A\274\022\243\001=%\212u\272\341\267L<\ne\257=k\013\332\274\243\177\326\274+\375\304;\004\324!\274<\371\033=\251\276J=\314\371\r=\252\030\336\273\315\203\227\274`(<<\224\276\361\274.}\025\274x/\031\274 \211\345\271\034\326\003\274\014\200\263;\017/?\275\302*\327\274\216_\000\275\201n-\274\177\224\301<!C\254<\306\236\014\275\307w\320\274\021p\031\275nl\245;\241\030\013\272\247\272B<\203\301*\275\016\023\033\272\203\233\242\274\\\000\r<r\274\212=\222\024\025\274\220y\321\274\005\t\013=y\216\220\274\351\253\202\274h\020w\274\\dP\275\376\2144<\312\254(;CI\002\275\244X\371\274\213\230\307\274#\204\200\273\330#\273;\274\365\014\275P}W;\031 \t\275_\223\001\275\215\323*\275q\314\237\274\357\234\370;\225J\254\274\000\214\346\273\253*K<H\376Z=\315\324\251<\352z\214<\3727Q\274\274\2769\27464\n\275\375_\371\274\216\256\363\2744\356M=%\217_<o\376\263\273m\274\347<\335\307\217\274\207\257F;\247^\234<\\U\313\272\266\251\200;\366\365\230<\317\320\273<\203\r&\271B\263\316\272w>M;\370(T<Zn\033<)\310v;\374\273\n\2754\320T\274z\002\003=\301\310q<6\360s9\230\026\376<\030(\302:s\213\324:\245\266d=\354\n\373\274\250l\331\271\206\312]<k\202E<&/\322\274\005/a\273\033\\\024\274\311\3217\275h\204\006=\255$r\274u\245\010=\214\331\004\275\351\262\360\274\230\344\203<BYh<\370Ew\274\210\213\276<IX\225\275\221\022)\274\021g\263;\345\3004\274\233\222\312\273\030\3511<x\271\313\272\t\001\034\275\264\274\307\273\r\007\036\274\362\376$\274\020\000\255\274\017\302\355<K\013W;v\363-\274\343\321\024\275\360\007\213\273\313\323\306<\205\245F\275:T\234<\n\317\017\275\035\210\364<[\034\220;F\020\200\273\203\361,\274#\325M\275\234\236P\274\020\033\366;\026\225R\274\031\347w9\031\2546;6\335\303<k\376,<:+\226\274\330\261\"\274_&\214;\243\264\304\274\277Q\024<\302D\375<\214Y\271\274w\026\\=\321\341\033\275\357r\234<u\350\233<:#F\275\267\226\301<4\371\327<\307\327 \274\323u\'=\263[\230\274\205\006\240<\267\302&=s\340\226<\335\205\232\275\013\274!<\0027\027=\355\0103;\027$\345\274^\233\020\275\000Y\215;\351\177\237<\276{\034\274\203!\264<\232\013^\274\305\316J:I\021\005=\360H\341\27323\334\274\002\004\366;!\\\343\272\303@M\274u\243\004<\303\327\250<\215\007\215<\245\354\377<\206\252\025<g\264B\274+!;\275\331\216\230;\224\343\371;\354\031\205\275\362\232\265\274ziW\273\024\372\357\275\346\026\377;\204{\"\273\005\235<\273euA\275\230\205\007=Y\021\256\275\254\3122;\337$4<\006\230F;\234\236\034;\260[S;t\257\262;\217K\021<=^\213\275\257\371\243;\313F@=$r\006\274\333\227\305;\241\223\242\274\306\346\017=\343F\344<pZ\032\275\231\211\355\273\304#X\274\342\222\222<\372\t\205\274\213\224H\274m\263-<\n\014\206<\264\342\014;\306\220\210<\213\370S\272\rMa<\232\313\243\273\205o\217\274\025\205\320<\246x\177\274\037\274\205<\2546\367\274\024\t\254;\320\255\322\274\336ZW\275\272T\376\274O\357@\274\251{\341<\250\340\030=\013W\306\274\376\367\315\274ya\266\274\374\023G\274C\267w\274l\222\234\274U\026\000\275\023\376\304\273Y\250\\=\224.\025:\306\346M\274al\345\274?<\224<\037\200T<$\036\035\274>\245\330;\347\351\004\273\\\236\304<\371\013\316\274\370\275\t\275\240. \273\261?\003==\001\217<\366\334\224\274\201Ii=\035\211\004:;)\004<\325\370\023<\010$\204\273\025i\326<\324\317\2169\232\346C\274\'\323\244\274\006?\256\274~\347\274\273*G\342:\236\234\206\274\227\220+\274FU\363;2w\326\2742&r\274\005\3608\275\271\261\016\274i\210L\274\300\245\037=\275\224\005\275\276\313F<\003r>\274(iB\275H\001\217<\023\034E\275\006\213\\\274\331q(\275\201\213\305\274j\242\030<d\302 \275\033\274\026;\363\232\320\274\351\2611=wR\030=\034r\006\275\234\2240<\032]6=\026\232k\2744F\311:q_}\274]\347L\274\334\027\251<\026n\236\274\313\372\212<\327\002V\27586\001\274\210[D\274yG\244<\357\034\373<o\344\257\274\002\374J\273{\0105\274\305A\n\275z\212\303\274M\030l<\374\n\211=Q\254\340;jq{=G\300\201<E\no\273\254\326\361;\325\276\013\275\2178\216<\277\364x\275\2266\t\271\037_\007=\352m\366<&y\316\274I\273P<\177~&<\210\327\036<R\030c<_\034!<\037\310\253\274z\335\006<\326\3241<#\245\\\273\246\243\024=-\252\007\274\r\r\013\275 \210-\274\231?\352<[\327\304<\317\313\312\274\316\235W\275d\262;\272\256\017\221\275\303<\"<W\021\027\275\000\342\304\273\016\317\271\272\225\002\316\273\300\345\260\274\221&\204\2741\310\313\273\326`\200\275my\206\27570\267;\333\242\t\275)\347\030\27469\206<\201\005Q\274\254\272D\274=5\363<\r\273:\274\263\235\250;\250\006\220\273@\272\023=\022\033\336\274Zg\272\272\277\364A\2759(\026=\020C\211\275Q\227\255\274\343\324\305;\215\000\321\274\333-\250\270\375\224\243\274-c\005<\274\0231\275\224v\227;\213\345\007\275\000\266\000\275@|\027\274\244\323\034\275\006-=:0\265\241\272r]\006\275\243h\205\273\257\236\304\274C\366\203\274=\360\204\271\371\207x\273M\r\2769\027\345\005\275\t\374\327\274 \361`<m_I\274\013\317\n=\250P\370;Q\314\311<\340Vq\275\221\317E<%7&\275\255\231;\275\227`\270\274#\304\031\2754G\373\273b\030\037\275\rq\307;.|p\275\233\006\027\275bcl<=\014\030=\346I\t\2752\373\351\273\310\177w<\016\244\201\274H\374\276<O7\036=\326\274B=\345\232\312\274=px=\203\024\n<zH\302<v\375\214\275\3018W=N.\004<\221_\257\274\235\360\r<l&\"\275E\322\317<D,\334<Z\206\307\274\034\272\347\2742g~\272\216\014\231<{c\030;\324\346r\274j\035h\275\365uh<\240\\\025<\265F\020=\004#U=)=\003<\364o\023\275\347\312\362\271\305/\035;\331\022\206\275=x\r=\225\022\260\274;\306$\274=\337m\275\032\315F<\270\327s\274\371\357\226\275\0144\267:\003\235\246\275:$\274\274\031\206\327\2733<\204\274$\0073\275\335\306\301\274\004\242\244;sf\204\275\237\3071<\3646\035\275\325\007\014=\300RU=fP\256<!\023\212\275\332u\372\273\265\217@=\352\367\276<\300\217\020<84\177<\224\373\030\275\262\241\212\274\303\345\354\274\272p\214\273\347\272\215\275\027\214\216=x\277\220\273g\303\033\273\306\345\007\275\272\207?\274\250V\007\274\006\205R<\350\271\374<\331\\\204=K\303\000=\344\257B\275\333L\224<U\301\243;\274\216\024\274\222\255{\274\226\262?\275\302\360\320\274\2616d\275\0057Z;\235X\265<\256\366\024=\341\332M\275N\025\213;e\327\270;\377?\306<UN\311\274\236\232.\275\244\\Z\274\274_a\275\233\320\025\273H\267,\274\225m/=\357\027\257\274a\327\223\274\002\266\264;\247YB\274\343\233\324<<|e;{!`=\340\212\334;\005\004\245\274\205\333\014\275\367\030\304<&\301 \275\006\322\234\274\226.\177\274>O\207\275\333\203\345\274\201.\311\274\255\377\363\274g2\265\274\354Q\002\275!\341\014=\367\221\355\273\324\3214:\304\317t=\2625\320\273\231\007\033\275\341\202\264\273\264\320\226<\014\3074\274~d\264\2741\203\202;\037\017G\274X\002\236<\n!\217;\017\376\023=,\242/\275f\352\345\273\374N\370< H\352;\026\337\002\274&a\014=\020\n\325<%\341g=(\262.<\270\t/=w\277\251\274\270@\241\274>d\323\271(P\014\274\371{\225\274\010\224\254\272?Vm\275t}\307\2749)f\274\206\323\n=\271\262\215<\313 \256\274\020\021==\301\256m\274\244\326\002;2\"\370;\020D\327\274aa\275\2745\300\202\274\351\331\224<\024\254\007\274\214/K<\0208\333;\230=\013<\257\302\264<\344\360\320<k\211S\273\245*\000\275\334\246\331\274\212\302C\275\332\305\000=\233\300\206\274/\237|<\tz\214<\321?\256<L\247F\275\225\220\365\273\"l\001\275\221h\346\274\364\235\215\274\232\243\314:sv\032\274\311p\006=y\260+\275(\330\354\274\371]\307;D\211-<\253w\350\274\206\336d=hM\035<\200k\224\275!]c\275\335xS\274\326 \313\272\376\364M\275<\317\362\274\365\036,\274a\374M<6\006\007=\214\210L\275\221\352\010\275\353\223G=\341z\333\274\274o\233\274C\240\r\2752t\201<\207`Z\2751a\371;qf$<\353t:\274\224SJ\274\034L\023\275%\263\210;\266s\240\274+l\265\275_\304\322\273\373a\225\275\227\267X\275Fq#\275\214G\361\273\256\'\370<\216\340\001;\252G\233:\324\354\003\276\211\353\004\275\355}<<5\324\250<\035T\206\273\320\335\217\275d\014\347\273\334\021\223;\362\376\002\274\351\261\004\274\362\306\013;X\354\211<\354f\305\273^\312\342\274w@,=\253H\332:\325XI\275\337oX\274c\010f<\330U\003=\205>\276\274\315g-\275\031\311\022=\025F6<<a\310\274cB\305\273[\\G\274(\326\301:B\003\004\275S\3059=\247\255\222\274\360\305\236;\204\273.\275\321\276P\274\276\301\202\274\353\374Y\275\345\220\257\274j$\025\272k\332\n\275\217\020\t=}I\253:\244\346\213<\330\312\315\2747U\031\275G%\003\275\3535\205<\261S\204;\250\320\221\274v\227\350\273\357\323\217<\334\352\266\274\331\272J\275,\320\255\274\276\013\266\273o\372\023=\rb}=\230\002\253\274\313\3327<\212j-\275\351\036\"\275\332\264\025\274&\035\353\273-\206K\274\365\320\241<~w\210\275R\270\347\273y:\335<k`A\274\342~\016<\022\n%\275\334\243\336<E\355!\274olQ=\223\327\227;\306{\374\2741\2516=D\235\211<t\317\221<)\255\233<{\320\021\275\204\2105<s#>\275\256\363\316\273\257\3420\275\235M\034\275\346d\315\274\010\316\'\275\276\374\215\274W\026\324<R\237\354\274VY \274]\234\210\274\3041\324\274P\247W\274\233\"\257;Z>\210\273\311\027\315<H\366e\272\2223Y\273i=O<<\022\205\274\345\246\344<\300\025\017=\245\267\237<\323\312\214\274\260\n\023\274\001\0218\274u\321\260<\333\032\226;\261\217\346\273q\314\212\274s\3022;\365\272C<\3410\206<\177\027\223\274\230\334P\273\360\002\215\275\355*\232\274\242\333\010<\203\016\371\273\323&1\275AG\213\274\327\224%\275\3664\365;{\030Q\275\315C\323\274\204I\372\274\251\313\344;5\242I=\240\007c<\341\325\371\274\250\347i\274i\364&=\327\202\236<c\307\303\274\241\204\232;\212\356\2559\334\306,\275@M\000\274\257\371\221<3\324\007=\010\274\225\275&m\227<1\204\014\274\272\204\232<\233\336@\275\260\317\334;\367~\233=d\361\270<\246\003\027=$\272)\2758Y\232\274U`\025\274\317!\357\274E\2249;\254\300\2439\320y-\274_mc\274\234Y\321<\307\035\222\275\312\233\t<9E_<\366\265\005\275\260md\272\252\203\313\273\211\313\347\274Z\211\335;\326\256/\275\2162\331<)^e\273\315\202\025\275\037\323\247<\243\256j\274#X\257\274\340\037\\\274\nt\355\274\267\022\222<vY\014<t6\031\274\333\262m\275\340\253\247\274\317#\034\274\235\206\360\273F\334\261\274EKT\273\355\217\016=\334m\246\274l\213P;\322\310\\<\2531\001\273\025\016\315\274\016<\220;\372\272\014\275\233\227\350\2745\204\345\274ay\337\274\227\327\337<\037\021\222=\362\0019\274\002\031-;\201\216c\275\303\014\302\273\235\334\353\274\354\327\024:z\352\236\274\020\337P<P\312\3529 f\351<\025Q\212\272V\273f\274Q^@=\373\213\374\274g\371\214<\010_\251<3\326o\2750\017\025\275\362b\316\274$\2621\275\327j\021<\001\205\376\274\370\237\035\274.\211\303\271\334U\235<\236\232.=\333]_\275/9\252\273R:\212\272S\364\3319\322\'\025=eP\021\275_%\247\274\350\266\313\274k\222\n\275\207\274:\274\213\304\316<\316\266!<c\347\362\274\367\201\376\274[\032*\275\nqV<\231&%\275\021\274\372\273E\300/\274W\371\2629\250\354L<O\2039<\367\005\317;\243\254\232\273i\212\224\274-i\323::\037\003=\305,\000=\031Q\017\275\005A?\274\311|8\273\207g,<:\255\316\274\224~\270\274\312&\245\274\3556J<^\260H<<\212\201<\330\243\014\274\264\334E\273\3074i\275Z\036\335\274\201e\002\275\203\0076<\330\201@;\306\271\266\273\375\255\255;)\000\265\2743\240\225\274\276n\276\274d-\211\274V\316\022\273\361\276\177<\377\312\310\273B\376\376\273\270\255\333;\200{\357<?\270\202=N*$;\277\177\267\273\3764\215<J;x\274\357\010\\\274\272\306\023\275\030\362R=\277\344\277\273(\245`\275l\034\246<\350\242\"=\224\374\351\274\023\013.\275\372@\316\274\277\031W\274\027\r\036\275\332\2738<\202\346j<\203\337\363\274+\235\021\275\303\344\262\2737\360z<\265XI\273\207J\263\271\003\322m;M\313\212<\260B\277\273\014\004\3069\377\310!\274\225\315\265\274\351\251p<\3073h\274\260s\212\274\206\214\206<\355\006S\274\341\320k<\317h==\270\222\300<\205u\373\273T\242\342\274\212zk=T\310\035\274Y\207v\274q\230;;\324\213\351\274\034\007V\274_P\355\273\220c\241\274\337/\307<@.\001=W:\206\274\351ze\274\352f\030\273\333]\005\275@u9\275]\254\231<\201\206\371\274\307\216?\275z\373\'\274\244\304A;\373G\"=yh\253=\220d\370\274V\272\204\274\315\347<\273o\014\312<v6R<\35096\274Q\332t\273\343\263E=M\3210\275\256\213\244;r\263\274\273y\373\035=g%\201\274L\324\334;]@P\273\254\323o=\312\240 \272\014_j<\3013\254<\317>\332<\274\262H<5\343)=\244+\220;X\204\271\272lG\'\275\304\250D=H}\264<\227\267\350\274\201\353\014\275\332\232\224\274(PP=$\336z=\260\336\257\272\327\236!\273\265\331\242\274\317Yr\274\332\371\325\274\233\353N\274\215N;<\346\321\025:j\307\311<\025\364\303;\266 \002=c@\241\273\\&\207<v}\322\274>\024t\273\307Qd;<\272\216=HC\230\274J\324q\274\302XJ:\0249M\274G\364-\275D\274\232\274p\343\013=\257k\025=\341\210\255\274\336\014=<\354\013\033\275\362{\3129\363y\333\274\037\214#=\240\\\216;\312\247\242:\255\332\223<,b\276;\230\241m<]\323S\275R\311\223<\\\004\334<\337e\t<\352\303&\275\2465;\274%\355\215:{\002<\275\3541b;\343\200]<\037\237\314\274\026^\214=mf\233=7\005?\2758\021K\275\262\375D\272$V\341<\327\213\027;\nG\360\272\031)\240\274IF\247\274\016\365?\275\254\200_;u\024t\273\177\245V\275\367PN\274n!\277<Ll\341\274W\010\245\274oh\177<_\362\205\275\0347\016\275\325\036b<QDY\274\252\253m\274\312\325h=\210-\035\275\334Y\301\274\277\272I\275\340\301\300\2737\374\275<\036j\372\274\224\223\253\274\330T\346;2\\\032\275\024m\326\274\332\352\'\275\203$\006\275Y\216\272\272\"\375\216\273\243\305a\274\264\2159\272\331~\350\274\236\277]\274G\307[\275\336O\031=\300\212\010\275\\\"\325\274T\246\316\274\207i\302\274\346\361\336<\351$\340\274\220\274\250:\231\373\261<\023\262k\274:\205\263\275\330G\365\273\231F\222\275\235,)=\006\343r\274V\311O=\311\\\230<\315\313|\274N\022\314\274\327\006t=\223\376\211\274{\331\230\2738U\207<\331i\n\273g\277B<\t\364U\275\305\307\251\274\3308\224<-\365\362<\302\320o\273\306\007\002<\326\352\022\275\210\203\250\274\375{\230\273\363Rc\274`\253\\: \271\260<\372\026\330\274\373\2356<\\\303\005<\355*Y<\3335\010\275\360\023\320\273?\327E\275\374&\270<\207\200\347\273\242\232\034\274d8]\274\376\353\202\274\027\200\032<\264G*\274g\275\225;\322\031H\273\206J\013=\320\346\333\274\260\337\033\274\211&\221\275\234\216_\272\216+\317\272\272\010\035\274\1771\351\274\013\260\264\273\037\225\205\274 \004\241;\325B\324\274:\026o\273\373\253j\273\2576\002\274X5\324:\335\300\214<\321,\001\275\240?6\274\236\327\227<\334\215\216<8\245\244<8Eg\274\223\336\226<\216bf\274\006\251\354<\374\271Y;vN9\275\304\022\346<\207\246\221;#\216\341<\006\202\301<=DQ\273h\246\020\274\310w\307<5\346\010\275c\374E<\2018\266\274\273\305!\275\253\367\313\273\007\014\345\274\312\276\336\274PM\016\275\370!\023\275\210\305@\274S:\035\275\342\326J=Y\251\362\274\334\257\207\271ByL\274\035\311\000\275\273z\270\274\016oX\274\341W{\273\370\222}\274\3779f<\020\005^<Y\257\221\273\317\341)=\351h\203<\310\226\245\274v\352\006=\210\277\003\275\364\r\224\273\247 \322<\376\027\275\274o\005\252\273\033>\232\2747\003\302\272\203D\376;v\216\235\274A\032\261\273\2331;\275\251\3521\273\275\200\266\274\275\307c\274\255h\207;\223\263\202\275\020H\313<\273\r\"\275\343\275H\272\310\213h\274aa|\274\232\0376\275\274\361\013\275\371s\276;b\340\246\274-\264h\275Tt\251\274\3409\373;\361\024\202\274\013\344\221\274~|\216<\235\224\001\274sV\346\274\026\277\303<}\216\t=\027\330\025<\320\031]<\037\276\272<\263\322%\275*>H<\027\317\305<f\233\014\275=\203!\274\225\215\016\274YC\343\274\033+\226\274&MX\275\373$\361\274\010\2145=l^\021\274\024\002\001;iX\347\274uCX\274\267M\006\275\371Yy<@\210+<\301$\330;H\027#\275\360\217\304\274Im#<\322\252[\274\352ac<\253\224\r\273\303\204&=:&\205\274\256\177\367:\231\372\355<}\016\261\273-\275k\274\2732j<\202W\273\274\277\274\006=/\353%\275K9\224\275I\004\240\274\367\2008<9L\023\275f\312\"<\010\253(\275Q\230\252\274\242\271\263\272\276\336k=\365\335\241<w\313 <\375\003\344\274\340\331\374:\227uy\274\312\257~\275\257\312\215\274\315\330v\275\315\t\365<\222\321\336\274\207+\301\274n\014\204\274\242\212\033;H\027r<\364\000\035<\365\330\344\274\313A<\275\225\326l<\2510\351\274x\265\263\274\267\023\023\274\003\302\t=ky\n=\033\2263<\306N!={\251c\274Fm\035\275q\254\312\273\207\020\251<\312\302\257\273\003T\235;B\334B=y\336\006\275R%\203\274\221\005\241=\355\202\370<\241L5\275\347_E<\3217\337\274?\\\376\274\352\016\225=W\324W\275\260\"H=C^\322\274\250qk<\327I\014\274\201\227 \274\241k\346\273B/s\275\266\354\244\274\301c,\275sk\037\274\036\227\270<o,\321<\307\354,\274#J\357\272\275ct<\n\361\001<o3.<&\212\031\275 \2331<L\216<\274\232\320\206<k\224F=W\037B<\232[\203\273\331\234\177\274\005\000!\275\316\243\036=I\3101\275\341.\345\274\021&)=\3534\001\274\241X\031\275\351\212\360\273\020\372\215\271\225ll<H\243\211;\330z\320\274\222\000\313;\3405\235\274@\275M\275\313=\025=\3456X\274\250%{\275\377\237\014\274>\265\203<\273\n\267\273\331\374\274<\t\203\t=\001\3059\274\375\020\251</\3136\275\371\366\326\274\370\177\227\274y\254\373\274s\245!\274\005\361B\275\3452\200;\003\274\222=\245(:\275\271GI\274qy\250\274\364\023\340<p\200\235<\\\355v\274Lc\230\274}\366\243<\245\207\027\273$\255D=c\374\211\274\363L\273\274\220\3366<\207\325\215\274F\253U\275\322\257\215:\026&s\274\346`?\275<\'\267:\322\030\034\275\311\245\212\274\303\267\026\2749?\336\274\240\375?;\326\3773\275\023\311\266\274E\342\273;a&\312\274\3663\225\273\026\324X<\026\026*\275$+\210\273\350J =\357\346\237\273\340\312\326\271\2514\327\274b@\356<>\237\242\274O\336>\275\202\310:\274ad\377;\246\023D\275x:\214\274t#\034\275\203:(\275\362\236\367\273t\333\344\274\260D\257\274\376\200\n\2740\232\252;4\361}\274J$\241;\372\256\341\273v\203Y\274\371\303.\2735\357 ;Cx\336<\302\340\037\275py8\274?\260\257:0\244\304\274\256\3461\274\342\013\223<\304\237\332\273\354J\024\275g)\251;\316\251\307:CA\320\274Q,\004<H\364@<\364\014\244\274!O-\275\n\246N<m\031\226\274\334\342\247\274\313\354\342;y\336(\275G\362\243\274\305\236\033\275$&P=\262,(\272$\223\n=\177\370\205<\344G\371\274\251\2224\275p\001\002<\225\275t\2753\003\242;\307\270#\275\000\331\215\274<\347m\275\246T\340\273\264\230\306\274(\345c\275\362\207\326\274\304\345/\272\022\236\027\275\016\330\021\273R\300]\275\230\304\344\273\200~\027=\375\375\036<+a\221<\030\317\372;x\205\312<\207\035\217\274\355\324\244<\002\313\026=Wy3\275\331\350\250\274\367\021\276<^\347\265\274\217y\366\274\244\226\272\274\247}\301\274\034\273\272<DGL\273\362~\306;e\326\235\273\341\204\024\275\373lO;\023\316\360<\026-\002\275Q\031\307\273\302Si\274\220\017\254;`\247C\275@\241\370\272\206i\204\274$\223\363;\201`:\275 \345h\275\231pO\275\257W\033\274\361\351 \275\272\311h\275\360\335j\2749\346*\275\212\023(\275\207P\006<\331\255\272=pZ\010<h\035\317<\006\006-\275\277M\305\274m\204\203\274\016\'\317\274\311\314!\275i=\341;{2\025\273\247L\005\274^P\322<$T\033=\263\366\361\274\256R\274<\207\221d\274_nx\275\014\224\002<\003T\003\275Pw\\\275\272\265);\233\003,=\021p\266\272k\375{=\310\005\003\275i\322\345\274\016(\337\273(\033N\274\266\t\232\270yD\035=\300\336\327\274\274\210h\273<\350\240\274bb\316\273\007\320r<\025\007\360\274W\212\"<\020\207\357<6{\t\275\270X\356\274\352\305\005<\345\216\211<T(\221\273-\264A<~\376^\275\330n\t\275C^,=+\321\352\274G@<=\306e\000=\036\317\016=\032\325\314<\370\352!;~6\217\274s\245;<\327f\177\275\306\016\031\275\255\023h<u\002\005\275D\256\230\274g\234\247<\234\307\201<\ry\031\275\300\344\225\273\r\354\252\273\216^\032\275\365\341`<w\326\333<\034\030\254\272\256\2737=|\367\253;\212\227\257<\324\206r<u\257\010\275\300\003Q\275](\256<R\014p\274\373\235m\274\371\243B\275^R\217\274aR\307\273\363#\337\274\371e\004\275\037\237\023<\027\225.<\036\003>\275\005T\022\274\263L\311\274\020o\305<y\223\314\273\307w\026\2740\327\333;o\010\211<\253\315\263\274\313\221M\274\223\037y\273&\027\220\275@2\236\272\373\370\211<\333X\000=\006\274-=s\241|<\354\"\017=9-\330\272\224l\227\274wq\020=\262\336*\273\246\030\245<\221\224M\275\334\177w\274Gp\252<\363\3610\274\307s\336\274R\"\250;\265u\030\275:5h\274\273J\317\274_$\014<#;\032\274\272\306g\275\350#(\274?\230\021;^\\\377\274M\3611<\246H\326<#\304R\275m\024\273\274a\240;\272\2043v\275\313\301&=}\310\t<M\222\024\275\323\251\036=\030i\202\274\365a\334\274\213I\317<d\331\020\2754Y&\274`\030\033\273\361\364\231\275\000 r\274y\347@=\340g\313<\025\363N\275\'\201o<p\221Q\274O+\236\274\307E\027=C\220G\275\007\277m\274T\036\212\274\302R\324\274\t\246\014\274!\364k\274I3\321:\245\254\021=\336\336\232<-\244\232;\347\2146=\377iH<\354+\253\274G\252\370;\313\032\220\274d;\343\272n3\023\275\364\006\266\274\207\346\305\273\236Z\215\273\3618\032\275\252]\273\274\342\177p\275\017\236\227\275\270%\n=\202\376,\275u\215\231\272\277\300\204<;\340[\274bA\255<\363<\263\273XA\370<| \346<\221\220\377\274\002&|\273\016\327\n<m2\245\273\nh\245<\225\nd<m p\273\240{5;\022\370s=\215?/=\332\365\201\273$\270\246\275\316\262T\274-\300\210<e\316\257\274Ev*\275T\007\360\274\013\000\373<\213\253\224\274\331\033==\343\030\214\274\201\354\324\274\267cH=\310\323\331<R\320\n\275,3\021\275\245\251\333\275\333\032\006<\360\221\204;\3744\267\274o\362#\275\270\263\232\271\345\314\206\274\"]\324</\346\332\274L@\020=*n\004\274\274\201R;\255\270\260\274\261\201\001\274@\031\264\274>:\202<\377\366\204\274Y\347\223\274 \022\324\272\332\035\217<\365d\021\274\006`\331;\246\207\355\2743\322\037<$\377e\274\342\013\254<jdT\274\245\256u=/\220\227=-\330\246=\261\t\307;\320\225\200\275\331N\345\274\363\326\244=\270\361\331\274\2152\205\274\215\244\366\274>|U:\302\325s<\217\351+\272\276\010Y<\333\021?\275\377\363\005\275\246\025\243\274\231\\\207;+\270\r\275\352C\274\2743\373E\274*\331\217\274|\277O=I\236\346\2731\332\\=\215\335\240\272n\327\353\273z\300\'9\372s\211\275\235\206\366;\301\005\250<\351_\347\271\217\241W\273FV\345<\320}\214<F\275P\273\207\007\265\274\224\300\204=\374s&\274\236\200\010\274\013j\214:}v%=\361\225\213<A\t\n\275\313m\360\273\360p\344\274\241\2446\275\232\264\234<\326\276\330\273f\245\014\275\221\035B=\\rt\274\"\314\344<.\232\330\274\231\235\272;\325\215\r;\347hc;\203js=\307<\224\274K\024d\274\334:f\273\367\\X=yc\376\274\376\016\211\272kvQ\275\237\032\257\275\272k\302\272\231\000d\275\322\231\245<1\227;=\200\327\347\274g\n\265< \027F=\003\343\360\274\321\302\241\273Q\177\376;\263m<\275\245\005\200\274\307\210\354<C\306\010<l\232\305\273P\020\253\274\350\017\025\272\033r\277\274 \255\243:%b\230;\335\226F\275on\312\274\274U\334;\010[@<uHP=\251\024\031=\312\267\024<s\266:<\333)\275\274\320\244\025\274\313\313O\274\222\347\001\275SV\t\275\350(}\275\235;\007=\272\351\006\275\313\371\243\274\275\013\361\274\372\200-\274\332\220l\274\237_\272;;8\340:\003\022\003\274q\307\024\274 \361\303\274\354\236\017<\266\306O\275\031\023\367<\247q|;\271\275j\275\360>\236\274\016\273\025<\007\266\372;\216\326&<\317\"\000<\247\007\345\274I\314\217\274\336\206<;\231@\235<\236\212.\274\202\252\340;\243\307\306\274r\314\203;\331\0140\274\257\354\037\274\020F\252<\206\363\025<!Q\342\274\nv\257<~u\356\274Xg\310\274\336\260\225\274\243c\342\274c:\253\273\342\236\034\275\033\332Q<3\307J\274(0\200\274\300\277=\274\302\3508=L<\202\274=i<\275?\205\300\271x3\361\273U\220\255\275\206\341\247\274\254\260\010\274\373\231\274\272\237\307\205\274P\310\253=3~\344\274\016;&\275\306,\267\274\345\007/\273\237\306\266\273\352N$\275\034\026\315;&\201\370\274\217\032\277\273\230)\252\275\352\207\201\274\345\357\325\274\227\361(<\334\321\254;\2223\232:\250G\354;\201\316\273\272\261\335#\275Z,,<\216 D\272\371=S\275v\rR\274\254\034\r=\252\216\377\273ZQ\001\273d\274\037\275\345\272\373\274\355\026\022\275m\2566=\320.\223\274\320\335\031=\357\177/<\263\373\245;]\257\314\274\n\010\253<\\\233Z\275Q_\213:r\2235\275\002\353\310\274t\031\202;u\302\317\274\333\2702\275VNT\275:c\226\275\347\346\214\275\247w\207<\031\372\027=\241\014;\272\0109\222<\324\2575\275\321F\371\274\363\r\321\274PF\217<F\210\260<\341\314\214\273\320\005\005=\2726\026\274%\353k:k/\220\275\203\314\360\274\004\316\005=\347n\355\274qd\270<z\257\315\274\273\026\001=L\004\365\274\310:\347<\336%\177\275\247\301\242<Y\241\"\275\371\323\350;\320[}\275\202\216\034;\275\364\225=\026Q\234;M\252\353\272\023m\370\274[P\007\275\251\205\017\275\221`\330\274:\002\256\274\340\014q=\351\322\213\274\276i/\2744\321w<;\322\026\274\235\2259\273\362\247\033=\253\355\222\273\030\350\206\273\343sH=\026V\272\274_\262\030\275M\273\301:_\221J\275<\222v<Z\325\032\275\037\346\211\274\377\021\255<+\177\306\274\277V\342\274\255\215\016\275u\004\303\273LL\302:\372\0225\274\336\005y\274\347\312\201\273TF\r=\357e\325=\211\020\234\274\260\2019\275d\326v;*\006L<\237zP<m\005\236<\264\357\320<\266\220\005<\360\3731\275\000HO<~z\004\275|y\210\274\261\321\211\274\356\r~\272v%@\274;|\316<\354\016\017\275\364\247\213<\227\344E\274G\302\234\273\257\336h<\366\033\007\275\304\341\267\274\265\267\246;r\365f\275\236\312\020\274xU\001=X\014\024=\261\256\'\275R_\212;N\004\326\273\366\256#\274\036TH\272M6\006\274 \273\030\275\364\2102\273-\025\237\274%w\'\274\016\267i;\246E\303\2741c\224<Yv\247<\210J\345\274\265\244\361\274\322~\025\275\376\345F\274\353\230\274<?\252\001\275\024O2=@\247\026\275\\d\035<_Y\271\274\221\037\224\274\366\272\312;\206?\013\275\374\357\365<\304\255\374\273#~$=\262\2415\273\225\263\323<$\364\303\274=[7<\004\003\364\274V\217\350;\032]\002\275\301\337/=Hb\312;\004\3012\273rf5==\311\005\275=\214\200\274\360\026\245<\204,}\273\n\335\222:\303\246\225<g~\237<\237\177\300\273\311\016\320\274\345\026\031\274\303\370W\274[\202j<lQd=\374qi\274\224K\223;\014\236\311<\324rh;\017iz<\013\206\254<\272\262_\275#\010T;\342\362{=\220\201\031\275\'Wq\275\032\327\017<Um\\\275\322\214\210<\277+\025\274\332\264\004\275\372\371P\275\004\222Y=\027\345\316\2749\362\343;\317\347[<\310q\035\275\326\013R\274\213_r\271CDw\2743\300r\271\257|s\275beq:v/(=\017\227p\274@\301\304\274\272\312\207\274\305x \2746d\254\274\216\273\237\273\314\217\202\274\234\247E\275*\"2\274k$\003<\314Q\327; \374\330\271U\274\307\2746\001\360\274\371x\253\274Hi?\275U\242\003\275BF\014\274\235\342\256<\267\263r;\260\031\235\273_\340\272;X\202\210\275\365Fo\274WO\271\271#\006\035<\002\221\213\274\260\205\010\274I@\017<\277b\'<L)G\275\232\242s<\275\177\246\274\026@\356\274G\230\237<J\372\226<y%\263<\363\333\213<\310\260\177\275E\360\\\2744{!=-\004\223\275\347V\014=\363h\031=\360\350\244\274\033\226\243<s\212\255\274\370,\225\274\345`\355<h\312@\275\366Q\005\275Py\364<\r\350\222\272\212\342\301\272\3135)\273\345}\t=\344\316\262\274\347RL=F\267y\274\337\263P<\013j\237\272o\025K\273\211S\005\275\020T\337\274\007\003\244;iM<<\374\317&\275\006\307\251\274a\237<=W\375\321:\024=\272<\240\275U\273\202\232%\275Y\256\035\274\366\350\361\274\376\355\310;w\242;\274\032\356\256\275J\376\270<\014\262\027\274\332]\247\275:\310R=\322#\344\274C\210X\274\214]\342\274\351\262f<a\231L\275H.q\273\004\263\257<)[Q<R\271\247\274bt\217<\250\355\316<\3448\016<\234g\260\275\316f\307\273\306\261>=\004P\335\274{\343g\274\215\031R\275\027\327\227<\366i\246<\247\202\025\275\022\003\365\273\"\002\206<\035Z\307;\225\257,<C\206q\274YY\247\272uf\250\273T\'\277\273\'\\\001=\224qt;\201\351\333;\354\276\343\274cY\372\274,I\334<U+\230\274\\\004\017=y\232\345\274q\375\356\274K\030\203;\0236\016\275\'+\271\2740\341\204<\242)\004=\'\224\027=E\365\331\274:\204\220<\203\312\266\274@%\024<s+\263\274\022\200(\275\312\036\365\274\273\345A\275\227\n\361<\27251\274\253\345\035\271\322P\204\274\333]8<]\373N\271\010\035\t\275\243\327\257<\036]\022;\246\364\032\274\327\2447\274\376\3642\275c\230\226<\212\313\314\273AN\271<\014U>\275\305\360\037=B\231\'\270\214\374\325<\205\250\203;\276\325S=\251:\322:\n\224\373\273\247\\ \275\326y\024\275\361c\026\274\260H2\275\243\277#\275\271\351 =\250I*\274\213\343x:\000\023>\275\240\310\306:x\237\206\275\247\033\365\274\223j\177\274<\235\277<\004\331\222\275n\270\341;\220G&:\226\256\252\274Y\276Z\275\342(\346\274](x\274\3632K\274r\000\300\273S\307\240<\337\263D\275\026\007\247\274\346\300\210\274\300\311\032=2v\222<\241\004T\275\256\342\036\273\002\337\366<Z_3\275\026\036\371;7\273\245<\306\226Z< \267\302<\\=\261\274\312\260\261\2730\363t\274\227\215C<B\261^\274\253\325#\273\311<\031=k]<\275\tJ(\274\263[\323\274\334@P\274\014\220\311\274\232\370\214<\236\220b=\004 \250\274\321\215\373<\241\022~\273\302\221\342\267i\315\241\274\246n\362\274pr.\273*\304Y\275\242\212\242\274]j\241<6_\217\273\345\'\367\2748mL<d\235\030<]c\356:\234\244 <\214~\373:\205*S\275\201\033\271\274\r\034\001=\333\355\237\274\r-\252<\351\204\230:`\2406\275\272dq=\035\356\266<\304\3345\275$\230e\275Y\236\272\273/\337\200<)\2126\272\306O\217\274\r_w;\234@\353\273\274\322\354\274\223q\323\274B\231\227=Tz\010\275%&@\274\221*\214\275\332?\227\275a\221\240\2744\376\227\273\260\0216<\177s\222<L*\356<\004Y\245;\266(:<\354\272\201<\250\343\300<\243\221\273\274\032\000\014=\205\324\361\274\177\025\355<\007\266q\275\016=\035<\270\250]\275\321\304&\271|\370\267<\350`\272\274\321u\352<\354\201\r=\2378G=\'D\204=0S\317\273U\222\213\275\262\325%\275\004\235d\274\220)\020\275\324\3564=\034\254\260\272\027\357\236<\257\255\000\273\\\030\272\274r\"\"\275\310\210\276\274\023\274?\274\234*\341\274\246o)\275\315\365\224;\371\232m;b1e\275\326\240\003\274\276\365y<\021\230\303<\374\365\322\272E\314\277;\017\216\020\274\210\245\263\273\024\257\217\273\251\373\n\275\375_\037\275\222K\2708%\177\272\272s\341\337\274\224Ij\274\342\323\t=Z\343\362\2732\257C\274r\035~\274\245\325(\274\201\324\"\274\202P\224<\246\227\233<|\002\370<)\344&\274w\266_=b\177\350<\222iB\274\341\030\311<\227\365\210=\227y\330\273\250^u\274J\273a=\301\350\322\274\003\'\261;\\\367\223<\003+f<\2131A\275\257\340\243\274\370\r\t\275W+\010\273\210\361\354:y\262\207\275\010k\371<<\203\034<\2510\351\273\202{M=\260\204\261\274,\001,\275o\242:<\n\275\313\272\243N\037\275\320\272Q=\321\312\373\274\326\302\207;\001aH\275B\275e;I%u<\367\004\275\272y.\027<\212\306\261\275\001\201\335\274$\342\235\274\200I\033\274\211:\031\275\220\236\272\274\230\202\250<\374\007\237\275L\312p<Y\326\005\275\253\356`=\210\323\201=\311\253\201<\033!\020=\ta\031\275\362[\204=*\372\314<5v\346;V\223^\274\233c\264\274\242S\232\274\367\323\007\275\365e\313<\365\317H\275DlS=M\232\356;\266\365\313;]\257\311;\364\315\"<\275o3\2744V\006<\356\034v<A\246\227\2747\ri<\003\016\202\275dg\312<\226\321q\275\365\033\302\270\362\322\253;GK\006\275\273\306!\275\345T\220\274\216b}\274\344_\204=\2754Y\274v\211\324\274\021d\032\274\221/\026\275\026\340\'\270\"F\025\274\266F\367\274YiH<H\376\025\275\240\254l\274F\257\030=\021\010\332<\027\322\374;\365\232\270\274\366\363J8,\211Y\274D\'\364<\221\337\325<\3033\315<\355\225\303\272\360J\013<c\003\344\274\230\362\005\274\307F\213\274<\301\250\274\3111\343\274c}\n\275P\223;\275\257\303\030\275\346\t\343;\014\326\206<\255V:\2730-\222<\322\323L;\226\315\016\274-\2304=%\3742<G\253\t\275\245o@<\320,\303<Ja\363;\3714]\275(y\320;\263\256\256;\351a\231<\245\245\311;\307\237\327<\023\275\006\275\337\277\203\274(\024<<\016\276\350\273%\'\007\275mG\023<\202\264H\271V\033-=\356\020J<\200\323\002=\355\321\265\274\037^*\274Y\177z\273O?\273\273JX:<Q@!\275wW9\273`,R\273\0040w:\010\255\022=\250\005\031=\032[\374<@\214a=\234u\211\273nC/<v\372=<\301\016%\274\226\322#<l\342\200<\266\212,=\365\353q<I\244\010=\351%_\274\240\370::\346\346\320<\017<\347<K@\312\2739v\201;\355\020\r\275\213\002\255\272\250\014\204\274%\300\277\274\353\001\024\274\340\375\223<\354\270\354<*\266\274\275\005\352\215<A\267\235\273\323g\003<k\312\202\2749\274p<\021\014=;+\243}=\213\r\235\274\257j3\275\263\347\025=.Nh\274\324[$\275\26228=\250\004\033\274\024\266J<!\023\360\274,^\267;\346\340\214<\200\322v\274-\254,\275\322\214\"\274N$c<\370\263\023\273\253\337:=\025\272\035\275`\201h\274\364<\237;\256Q\360\274\010B\357\274R\272\032=/\251W\275r>\210\274\270\316\342;\332,n;q1\234<\343MX\273p\335>\273\343\346\245\274\207\357\355\274\252\334\027\275\347L\220\275\327B?:\3777\004\275m\r\224;\000\240n<\273\327\306\271\367\375f<Dc#\276\367\214T\274\235\232H\275\r\204,=\001V#;\341\210\230<g\322&<\223,j;\326!D=<(3==\207N\274\322\355.=~mC\274\372\234q\273y>\215=\226I\213<\330+\013\275$\227)\275\202^\336;\355_n=\236\257\367;\232\242\r\2754\235\333<\035[\234=)\224T\274k\270\307\272N\255W\274\036\246\257<r&\326;nP\246\272\350\034\236<e}\217<\363\361\322\274\213\302\375<1c\000\275\371\033>\275\002:\337\274{\301`\2745At<e\377\026=\276\322D=\211\323\244\274\223)\351\274\277\r\022\275\330\365L\275.Q\002=\034~Q<\007\261\330\274\001\266\316:j\353U<p;\"\275\334F\024\275s(~\2745b\003<2\034\005=\370\234G<\t\377\n\274}`\236\274_\0036<\207\265\234\274\213\273\213\274\003U\323:^\335\321<\246\321V\273\010|\207\275\036\301h<\316U\261=NX\256\274m<\322\273\016~\331\273K5\013=\261yH\274\367 \350<5_\300;=\230\361\273\276o3=\265\216k<\240*\342<\t\352\222<\254#\350\274\013\375`=\222\007G\275\271\323\314\273\237\3555\275\275&4\275X\035\177<\303\270)\275ND\260:j$\343<\304\376s<\301\234\314<\277\244\001\275.\226\017;\316\226\235\273HW\315\273\367-\213\275}[b<yY\200\273p\216\231;\372\\Y<\013\356=\273\326\344\003<|?\020=Q\'\344<\230\350\203\2743\037\256\273d]\235\274\215\016\026<\024I\027\275\306\334\353:\371f\370;\213G\255:\024VK={\242X<\262\273\024<\300\202\262\272\013\036\031\275\035\226\260\274F\350\324\274\321\202n<p\005\310\274I\370\274\273\271\006\275\274n%\216\274\356\253\247\274?\003\264\273\320\211\301\274J]1\274\266\257\206=\372X\021\271\362Z\275\274\017/\213\274g\356\320<\021v\321\273\262\020\237\274\220\344\026\273p\201\251<\364\210\024\275\253\220\320:E\223G\274\306`q9\247|\244<V-\261:\340\333\274\273\271\203\302<\010\272p\274\374J\355:\375\234\254\274o.\260<GC\346<\324=\321\274\\P\271\273N\262G\273\263%\253<\312\360\\9\307\017\367\272%\271\335<\235_\024\274b!\224<Wu\217\274Z\361\2429\370y\247<\330\335\240\274\267e\031\274:\n\252:F%B\273Q9\253\273\252O\320\273\t\242\343<\031\204\322\274\037\255\230<\304\244\343;\236\024\346<\220\243E\273(i\202\274\211\207\311\274\335Rx;\300E3=4L\003=\035[7=\252\253\200\274=\006K;\235\226\230;\203\305\272\274\004m+\274\241\242\220;x\023I\275e\010\301\274;v\303<!U?<K\256\354\274\372\260\300\274\224\353+\274\336n\302\274\234w\262\275M\365+\274]\227\001<\345!l=}\362\227<^%\2038\361\316\3079\222\270\216;JX\310\274`f\305\274\217\225\022\274\021\014\021=\317\226\353\272\324.\255<\334\264\231\2748\300z;\201\313\367<\244\361\333\273\333E\357<\007\306\017=\226\036\007\2752\267\237;\300\327\023=X\344\001\275\375X\001\274\"\225o<\250M\265\273\260k\242\273Q\240\031<\226\3255=~\275J\275Q \345\273\264\207(<\323\241\350\273\275\313\215\274\253x\261\274\017Q\n=w\024\033\274\251s\005\275Y\022B\275\313\210\351:\310\227\032=\334O\371\274\000\035\315\274\013\031\023\273\004\244\227<3R\230<\034\376\314\274\313\255\254<\350+%<\253hk;\232k\213<\260\365u<\203L\035=H\231\343<\252r\336;\335\266\302\274\247\",=r\325\006;5\331\230\274\223\253\216;\266^\r\274\267\265\314\274@\023\241\274\"\324\r\274d{0\274\217\330i<\254\023\241\272\215\356D\274Gus:\307\005\225\273\365n\013\274;\265g\273\225EZ<\303N\367\273\240\306\366:\244\374t<c\301\237\274T\253$\274\314\353{\273A\325\'\274\230/\037<\273\303\007=\006~\001\273K\233\204<\344\307\264\274\345\005\263<\305\374O=K\017\300<C\315\351<C\321\251<\212%\233\274\343\001&<l \255\274\010D\346<\'\213m\273\225&\375\273OH \274\222\345F=j\243\013\275\320\346N\275\372^\2769XC\264;*\204\034\275\314\200\253<>\tU<\000\003=\274\234\334\363\272\315\006\340<?\364\212<\025\250\310\274\260\346\303<\366C\246\274\337\211\267<\216r1\2744\261R;tyV\273M\2530\275\327\273#=OY\026;)=\023\275-sI<\323\226\205;\365\334-<\202\355\353<A,\216<Md\002<\022\251D9\2417f<J:&\274!\220\357\273\303\245\315\274FGY\274\263\314\265\273\017\033Y\275\225\362\226\273\302f\213\274\247M$<\02429\272\322\333\036\2747}\225<U\014\'\275^{\364\274\235+\016;>Q-<\360\005\342\274n\271\005\275&\205\232;\265\223\263<\037L\205=mr\255\274\023Z\256;\205\016\001\273\336NY=9\235\367\272\020\235\257\2741d\376\274\201\207\213<\245\3527\275\240\204\254;\335\231I\274:\304\263<\247w}\272\204\355\364<^\304\343\270:\001`=+\0149\274\237\027\032<N{5<\234\242,=\376\220\203<\323z\013=\276\220\231<g`\332<d0\345\273\204<\013=\340^\014:};\247\274m\255\222\275[\256F\274\343\256\020=\'q\214=\340\304\240<@\340\310<$\254\327\274u\261y\273*Q\024\275r\310`\274gb\252;\203|\342\274\264t\272:\232\373;:F\007\370\273\314\001\177\274X\014\340\271\374\335\021\274~\235\374\273-x\316:M>\333= SX\274\202\254\014<$\341\213\274\256\036\213\275\372\'\\<.\263\022\274\251\305\027;\353:\242\272cx)\274\020\2047\274e\026\231\275\n\035\002\273\014F-=\276\267\361<\322\201\333\274\346\316#\274D_\322<W$\224\274\206A(=\257K\373\274\030\2543<\335\322\332<n\023l=\027\002\013\275\321P[\274?\315l<\322\346\"\275\272\242+;?_\222\274\302y\024\275\371\317\'=\212v\375:U\273\003\275\273.\235\274M\240\010\274O\371.=\315\350}\273\211\331\325<\313\346\334\274\323\250\034\275\333\216\"\274v\332}\272\202\207%=\014,\273\275\270\330\024\274\002\251\002<\230VJ\275\345\004\211\274r\220N;\374\035\301\274\023\236:\275(3\230\273\336\013\317:S\314&\274?E\204<\303\266}\275\212\201\220\274Z\223\203\275h\377\230\274D\352\177<\305[4\274u\3410\275r\255\034<i\362@;\350\317\260\2749In\274,0\267;\354\204\031<]\300<<\352F\013\275\234R\377\273\005\002\213\274\231\252\370:\236\010x\274^\364\263\274\200\035@\273\300L\370;\245\247\373\274\371\373\006\274\265\274\373<y\244\365\274\307\006c<`&\017\274>\306\301\274\'\266\241\275QFx<\033\020\274\275ZnH=.\252\243\274\307\345\030=\254\365\204=#\"\230\272\225q\370\274\322U\350<*;\200;G\364\336<)L\t\274\255\346\202\274\346\006\007\275\242\364\276\275j^\022\274q\277\207\273P\266\324<\264\0014\274\311\023e<\301\2175\274Nv\257\274\r\322];\034R\367\273\026\351X\273_\032+=\204\202\257\274\253\230\002=\337\365\032\275k\275\276\274\\\241\246\267\302}5\274\3138 \275OK\002\272)\272W:\333\304\241\274f\032\037\275\203\215\307\273 L&<3$[=\351!\234;m\330e\273j\227\233<\240\206v\274XvU\272\341\227U\274\001\001\035=\024\253\206;u\3225\2751\300\005\275\350\230\021\274\251$\263<\241\321;<\325\n\367\273\r\304N<a\n\037\274\277\332E\2744-\337\273\374bQ<hm\240\274#<O\273\354\233]<\215\357\304<\317\312K=\"\211S<Y\016\303\273-2\377\274pD:<U\331\276<1\255\315\274\251\306\337<\323M2=vJ5<\274\203r;\305&\276<\023\013C\274\223\274\260;\006i\332\274\210|\352<\272*m\275\013E\257\274\014\355/\273ze\254\274\321)\001<\323\201\206<%\034\241\275\344\203\022\275N\320\243\274\215\305\327<f\256#\275F\244P<\243a@\274\252\356\303<]\2048:T\353\326\274+\353\376:\320\303n<\000\235\200<\363\254:\274\263\'\263<\341\200K=W\263\262<|s\257\273_\001\237<\216}\215<TF\350\271\030\265T;\030S\266\273\335\244w<\006P\312\274x?j==\320C\274r2\213\270\034\2179\274\303o \274\345h\304\272\014-|<\367X\230\275\371`j\273%|\203\275\177\361\270<\320\330\253\274\033\246)\274\'\027\005;\n)\236\274\031\002\265\274\274L8\274\375H\374\272u\355\034\275\320\022\030\274\223Fy\274s\264\350\273\223y\234\274\350\302\022\275\347\020\035<\275Z\244<N-\275\274(\230\236< \224O<\227\264!;\242\311F\273J\251\225<\212\020\004\275Xl\335;?WO<l\036\351\273\242@+\274\233\017\215\274x ^\274s\277\003\275<{\323\274\031\257\n\275\242\300/<\366`\027\273\330\021\330<pD\267\274\224\375\"\275\304}\257\274\314/\312;g\342=;\331\344J<\212\217\236\274\000\251a<\345\221\201<&\310M<\2153\307\273\231\'\375\273\010\013^<\217q\031;\342*\t<\332\267\264\274H\002\265<\370\266a\274L5\245<QD\300\273sM\021\274\376\225\032\275\326\214/\275a~J\274\220?&=\225\314(\275\267\205<\274\315;\251\274\360@\033\274nR\331\274\"\327\367;\331Q\276<&\313W<L-\200\274\320X\305<HUw<\201\004\033\275\022y\031\275\217\035_\275\2567I=\352\n\023\275b\332i\274\327\301\021\275\276\005\255\273\347S\3329t9+\273a\350-;p\205\273\274\204\344\016=\305\221\204;B\222\253\271\216c\320\274L\235\310<\370\227\334<\373\277\245;\357\374\332;\327VL<\374\272\034\275\215\036H<\237\202\272<?\352h\274@\257\313\274\377\222H=&H\300\274O!\301;\210\323g\274\251\263 \2749]_\275j;Z<\226b-\272\037\301\334\271\035\331F<8V9\275#\366\361;\267\004\024\275\306\225\320;\230\022.\275\302\376\303;j\217|\273g\333\267\275\260\006\020;\326r\t\275B\317\000\274\301\206\236\274\205\244u=\203\274P<6\310\017<U\310z\274\013\357\201<\377\021\242\274,\032\247\274t\206\330\274o\032E<\2365)=um\357<\336\276\023:s\\i\272\242\274\345:U8\367\274\242\030\226=S\027F\275\235\217\250\274\220\000%=\321f\212<Q\271\332\272[\263\206\2724\326\302\273\t\\\210\2720\225K\274\023\245F\275&\266\3259\222\010\310\274C\302\"\275\016\343$=bZo\274\001\253\r\275k\2412\2730\367_\274R\242\371\273\325D<\274i\343g=Q\347\363\274\201\\\245<\032\344<\275\251\362\006\275\270\030\255\274\243\033\275\274\021M\262<\364\031\221\274\005\322^<\'\3121<\275\266\r\275m\000\353;\206\371\241;\243&b\274\340\244\006=q\031\t<\033\300G\273\323a\214<\rJm\274z\343T\274P\366\000\275\271\232{\274it@\274\343\3073<\346\325\267\274\315\276\375\274\017\035\372\273\202\220\277\274\"s\247<\001\225\214\274-\264\227\274C\310\0059\327\326\215\274B\246\266<\316\235\'\274p\272a\272\365\377\245\274Y\332,\275\271\3702\273\003\307[9\353\243\025;\374\223\202<c\363\377<6\225\'\274\362m\206;\316\n\341\274\270<\250<\327J\203\274\343\323\010\275\2702-\274\246\240\014=\331\362\037\275\235\237\345\274\200\030v<<\3258;\236\222\276<\253\345\'\274\277L\347;\363\340\246\274\375R\231;\223\324\n\275\226\2370<\252\211L\274\016t\354;dM\260\273\244N4<D\316\007=\034\361\250\2740v\252\272s\352!\274\3063\367\274\200\344\263\274\212\rN<\214\031\365\274\317\336\243\274\364]\224;.\351\346\274C\017\3469\206z\350;\233\244$\274q\274\317\274\332w\222\274\026\337\272:/A\026\275,\303\274\274\231\300y<\315\t(\274\235\305M\274T\354\030<\027\3359=\311\352/=\347\227\264\274\313\177\021<\256\003\363\274\2231h\275\270#\207<\0222\200\273@&1;,\237\253\274\227\0365\274o\342\021\275\031\270\346;\026\317\265\274M\267\206\274\034\232\344\274d\317(=\021\322\353\273aj-\275\340tu\275.\214\266\273}\001k=#\021Z<\303\2162\274\222\002\023;\276`k:\342\"\351\273\301T\203;\306j\250<\016Z~<\3763\360<\0366\262\273\020\323.\275\003\220\206\2742\026I\274\024%\212;\327\214\213<\217*\220<\217,_\274\202\220\231\274\253\204\247\275\372\004\262:\330G/=\343xG\274(\341\202\274\214\362\017<\035\355\251<\342\003\263<\260\305\371\272O`G\275\252]\314;\313J\010<\362q(\275\251\245\255\274\n\363\223<}{t\275\245\3006\275\2117\316\273s\004\205\273\207F\364\273\3766\325;\334\224\274<TX\210<\264_\326\273\313\375\357\274\350l\351\274\277\344\211<\240\274\223\273!\302\205\274\276\306\361<sm\253<y9\007\274\264k\204<\030\360k<b3&\274\254\371\254<\325\273\222\272\351\374B\275\357q\341<\320\"\213:\356\004\214\272T\001\276:\336\305\366<z4\311\272j\0274=\274h\274\274H\261\237\274fa\020\273\275\356y\272\177[\000\274\312\205G=\325\217\372\274;\332\360;\243\257_\275\322\3366;\267\247b<\205\205O\274\357ZU;\347\335\024=\205\023\001\275H\231\221\274\254\2314\273\222\226\037=\336\007!<w\277\360<I\223\206\274\026&Q\275-n3=\010y\202\275Ix\030\274k\267\013<\031\300\324\274bo\031\274\361\251\"=\347,\006:\344\220\352<>o9\275\220\300$\274\267\275\006=Y`\333\274m-*\2755\306-<\244\351\313\274O\263\246\274\343sn:.\032%=\250t\304\274Cp\221<\340\005\\<3oc\274[\311\r=\377\206\264\272\\\232\365<\032o5\273P[r\274\346\037\215\272t\256\214\275\227\305\010:@Pt\274QH\250\274\227\370\373\274\002--\274\236\343\020\274-\363,\273[\236K\274d=\005\273H\332\350\274~>\335\274(\250W\275\035\277Y<\262\302\217\273tW\271\273\020]\034<\244x\232<\211,;\275\361\311\010;\201\352L\273\001\246Y\275#\241s<9\242\005\274s\03339\003\321\230=\2735\";,\263\233<\223t\303<\322\356\177\274\026\351\350<V\233\212<\031]\007=\216\016l\275\253e\267\273l\273\304;\320^\316\274\315\351\257\274O\231\n=\266\202\257\2745\325\220\274\305\305\222\274\037ey\273a.\264\274\240/\274\2742}\366\274\252\211\314<\013\022P\275\224 m<\316\257\225<hFQ\275\003\256\317\274\370G0;\264\r\211\275\240\264t=\007\217\027=b]#=\354\261\360\273\273v\230<3\252Y<#\373l<\235\376^\275r\236\010:\t\027O=F\332\234\274@\016\360\274\270bz=\324\2544=c/\227\271\273d\216<\273\207\352;G\017\211\274\366\273\256\272\351\234\\<\254.\004\275\373\370\217\274\227G\021<\334m:\274\252\303\210;\022\316\333:\217Ul<m\226\266\273\0221\331\274\254\341\000=\027$)\274_2j<\246\342\201\2752\343\207\272f\014=\274m|t\274\325\365\304;\214\323\356<\236\333\370;,\304K\275\337*\371\274\213\337Y\271\032|V\275\022D\271;\030J\003\275\205\036I<\312!p<\325\377\\\274\263b\017=\037\365\304<\007\026\211\274`J\213=\361v>\272\332\033\257;\036\207\255<\375\216\023\2757\352\374<\301+\252\274.\210\017=S\276\"\274b\023\220\273\'\010\327:\370\002\270\273)\021C\275\031\275E\274O\004\350;\024C \274\223\224\254<\367l\333\2743P\203\273\334qN\274n\034\241=\355[\031=\360\016\223;6\"#<<g\360;\361\353\345<\303Z%\275\366\207J\275%\376\n<\034t\006=\246V\265;\253Fe<8Vl<}\326F\274\351\252\242=p\216\306\274\352\215\004=\031\233$\275\362c\225<\373\273\026=\007\350\213\274\377\271H<\215+k\273W\0104<\020pM\274\022\230\365\273\325m7<u\010\207\274P5\266<QE\337\273\361\264\272\274\263\270\016<\002\203\276<JMG<u%\210=?\0376=\226[\230=\210\3644<77D\275ei\354\274\233[N=Hkj\275\204\367\275\274](+<d\310V\271\233j\372<+~\014\273\330\213 \274j\347\236\273\200\304\001\275\203\037j<\304@\017\274\325ib\274\013\242|<\331\027q<\304\207u\274\3225\345<\3525v<\016\322\221<h\020\227<P\017\344\273e\370\361\273\001\0167\275\025\376w<\'\263\r=/\353%\275\033\014\216;\344\313\330\273w\304\275;\246+\004=\n\267&\275\265\364`;q\017\211\274\346\034\020\275\337n\221\274\326\265\304<\252:\277:\177\346\352\272\201\315\006\273~\220\231\275\245\366\r\275\037\247\360\274.\341;=\337\n\276\274\263\212\030=F\3734<Y=\023=(\356|8M\265=;f\312e\273\261J\230\274\254[1=\256\313f\275\356U\330\274\221\252\230:~\244[=\324\240?\274+\322\002=M\307\222\275F\221f\275\335g\276\274K\310b\274\026\020\270<\231\363\023=\2253F\275\346M\034<q9\006\274\237\266\270\274\212\375\025\274\275;\305<c-9\275c\264f<Q\\\260\2733Yr\274\003v\320\274n\312`\274\'\320p;/\214\010<\256\017\261:\201\027\302\272\362o\202=\333E\362\273\200e\n<(\\\210<\321V\203\274\333*\371\272,\324\317<\333\342\254<\231\240\230<]\203\373\274\323\247\377;\366$\340\273I.\200\274\242_o<\235W\202;\300\264\324\274\221\np\273\247V\357;L\337\375<*9`\275\232\267\026\274\243\016\n\271U\366\003\275\223;\314\275\314n\014\275\037\351w<X\334\267\274\224\200\265<\362=\244\273?z\327\274fkL\274\236h\226\2748\340\362;\245\3451=\315\234\323\274\324\000\373\274\376u\256\274\242:\352<B\000\014=\257Y\354\274\377\003\027=A\342\305\274\221#\221<7\210\275\273\244\362\340;\n\n\265<\235\r.\273\255$\003=*\\\377;{U\264\274\220\207\375\273\351\273\376\273\235k\317\273\326.\217\274\363\273\t\275\243\"\215;a\254R;\215\016\220;\306\027\257=\321\375&=J_I\274W\202L\275f\243\017<\270\373,\274\333~{\275A\227\264\274\0020\032<\312@?\274\325\020\232\275d\321s=\262\004\242\274\177\357|\274\310{\274\274\320\2766;\376\211\232\274\221\3607<j\337\244<-\313*\275\317J\025\274\244\030\344\274\211z\013;\023\225\033\274\236\023\274<\027\025\003\273p=\343\274_\026Q<\361h\'\273\271=\244<\210\311\374<\2153.\273\t\370\360\272\316E;\275\220\023\235<.3I\275xk\200\274\256xM\275>\306/\275K\352V\275\010a0=\254\233-\275V\372\017\274n\215\037;Q\325X<\210*3=j\315\246:\377!\027\275y\212\006\275\362\2072\275\340\205-\274\327P\242\271\377\332\253\274\211\233\274\274\335\365\331\274\3244\206\275\235\333\345<\206\300\037\275V\265m=5p\"\272^g\330<\007\017\276<R@\243\274\202\334\022<\242\030\232\273\'\265\t=\216TZ;5+\034\275\2674\240;Q.\n<\202\365L\275A:\005:x\342w<^-X\274\226:\022=B\214\302\273\317\273\242<1{\304\274\013\273\237<zd\202\275]3-\272\244V\330\274\244\215\002=\0340\260\273\025\303\222<\233\365\217=\266\210\001\2731P\021\274\315\333\363\274G\313\007\2759\304\234\273|\"\243\273,\306\322\273\345\240\023=\261\302\033\275v\325\254\272\346\270\';\034\024\300\274\332#I\273\202\254a=\323\363\201:\016LU<O\314\177=\312\177\225\274\323^\003\275\006\261\355<\ns)\275\037\352\014=;y\003\275\357\007M\274\355\235Z\274\210\277!;<B\237\274-\346\202\274\207\205.\275\0276&\273\213\013\234\271\332\335\022\275\247\217\320\273l\275\370<Jr\322<\251L=\274\326\372g\275\337\313\257;\3445\330;\276#\321<]\225Y<\202\245E\272K\240F<\'\373\"\275\226xa<\001Dp<\230\301\363\273\210C\265\274\343L\245<v\371$\273\321\004\230<\335N\343<\034\033\250;\360M\333;\326N\246\273Gm\233<A\250k\275\236d\361\273\333\304\377<\036\362V\275p21\274\232\021\315\273O\013\006=\rk\220\274\230\244A<D\244\371;\023\307\241\273\220\t\213\274\341\225\321<\327\223\370\274\024\304\243\274\264\252M\274b\325f\274\251\035\277<7\323\354\274\306!\254\273\340\221\023=V\203\014\275\037i>\274l\331\026\275\230\267U\274\337\206&;\340\366\314;X\254+=J\337\204:\254y:<k\274\261<\350\3174\272x\362\333\274Q=\202;\302\223C=\252\332\363\274\250\264\352\274\n\021\266\273\337\010\006<\010C\014=\210\214\035<\315i\027<\'9\262\274J\026+\2753\240\223\2728G\375;\214\352\233<\254G\254<\267\223\362\274\371\020\240\273\316>u<\245\010\253;\377\007\302:\"\007\244<\025\tO=\323i\030\274\352\250\366\274\024Z\204:\346\260e\274EI\375<[\310\024=\2533\354<8\233\210\273\025\203\215<e\030\026\273\213\030\036<\324\324g\274\321\236\304\274\222I\371\274\304\307\244\272\213.-\275\350w\211\2752\216\201;g\376\020\275\315^?=\033\200v\274\'/\336\274\005\254 \2750\376\030=\312\2705\275U\216\377\273\215@:<\020\000\r\275,\352\321\274x\"3\274P\250\340\274\353\014j;\322\217\303\274\253\264\304:\233}\260<\221\230\244\273\244u\302\274\2418\025\275V\003\233\274S\237\276\273\233\035\264;\351\272\274<.l\n\275FP\317;\335\032\203\272\333\247J\274!8b\274\356\030\225\274\326\255Q<4e\010\275\336\275\262\274`\315\334<\324\241\223\273\017\324\267\273\342\335\237\273\243\360\226\271:\rJ\274S\023\225\275H\004W\274\224b]<\023~\314<\256d\227\274\304\324\352:3P\224\274\023R(;5\371\355\274\371\177\000<9\022\251\274ZT\257\274X\251w\274Q%\312<\350*Q=H\276\247\273-\205Z\275\330+\017\275>\200\324<A$#\2743>j<\343s\001\274\007Q\353<f\237p=\022\001\213\274\357\023\366\274\251\276Y<Dq)\275|\023\221\274\252J\236<\212=\300\2732\370\2639\314\027*\274W#/=\275dh\274\313\345\314<\2215.<\224\321\000=B\311\317<GO\256\2749\361(\275\017\241\362\274)\237\226<G\3756<\317\247\240\274\207u\317\274\013jX=\377g\036\273M\322\207=\320\'\031\275\'\257b\274\370\337<<\353\372\023\275d\216\006=znD\273hk\002\274\230\330\356\274-_\216\274c\337P\275C\037\206<fV\0219\t\032[\273\314\326t<\t\370\250<\023\212\215\275}P\203\267m\264\222=G\357\264<\343];\274\024Z\353\274d\303==l\334\014:\000\262\374<\277\355\266\274sT$==\311\035\275\371\330>\273\"\264\034\275\320y\305<\331\255\242;%\265\005\275\365\237\211\274((\"<\317\255\r\274\267\200\325<\224s/=S\244\021;yi\343\274-*\310;BJT\274\211\367\253\273o\'|;\362\367\"\275\203\366&\275J\303\332<\230\350i;\304\2500==\317\234\274\227\362(\273\307\322\236<$\216\204<h\014C\275O\302\3079\0178?=\317y2\275\225e\316\274\247h#\274(n*\275\277u7\274I/\212\274\006\212\313\274\315\347%\273~Zg\274 I\244<\277\024\031\275\3749\340\273\267\030 \275\326\254\255\273\252\0321\273\005\324\n\275\366I\'=\223N\014\274\312\005\023\275\375\304\305;5w\273\274\264t\240<\247\021\022\275\033r*\272Z\317\243\274\031^\261<\3669(\274\r\\\254<\3030b<\204\342\244=\315q\272;\20401\273L\277\377;oD\215\274W\253\375<:\335\325\274\353C\021\2754C\351<\277 #\273\264\367\204\272\036 p\275\311\261\n=\017\017\036\275\251 x;\253L\223\274\213-U\275\001Ag\275\217\016\334<:\201\203<\026\336\010=\307YA\2754d\003=f\271*\272\000\201\030\275\237\010\007=W\351\013=\207\212W\275\0139\325\273Y/[\2748\241\260<\004\001g\274E\260X\275t>A\274\321L\232<7\307\353\274 \267+<66u<W\216\t=\275\324\346<\275\362\203\273+%\366\274 \276l<[b\306<L\333\010\275\365\315\\<\340\203\333<\315\317\354\274\203\013\313\2739\335\277\274\361\343O<o\314\017\274\373\207\313<*-\244=\360\261\273\274W,\317;\347\\_\274\255p\002=\306[\007\275\212\013b\274T\310\204\274\314\333\312\2748\347\341\274\274/\237<(\243\\\275!\327\027\274\314\214\360;\233\005\206\274\001\r\371\273h9p<\266I\0359\230\336\034\273\222n#\275\014\315(=a\236\241\274\302\243\367<\350g\200<\256\254\214\274\"vc<d\r\261:Wx\300;\246KV\275\214\254u=\326\330\374<0\362\364<\323\243c\273+\323\237<E\016\347\274G\000\212;f\226\360\274\345]\204=\310\337\020\275\343\233\001\275\022t\346\2746\037\305\274C\032\226\274\251\312\301<i\372W<K\277\257<\n\031\333;(\370\3119\350\221@;/U\203<H\036\252<u\255\177\274\\?\236<\3642\350\274J\374\224:3\014\315\273CI\220\274\310\352\376\273\213\371\204\273|R <e\016\261\2745\343\326\273\\\346e<\347\212H=\253\337\313<\220\267\007\275\355O\033\275q>\023\275_\354&:\033<\025;h\261\016=\200\371\304\272\274\374{<<\321\201\274\371M\254\274\201\310\250\2749\272b\270qZX\274\272\363\036\273v\232\345\274\253\270\211\274\315H0<\316:\244\275\220 \206<c\340\351\272\272\003\352<\203R\204=\262\0309\272!V\010=4&\016=\246\270i\273\345\202M\273\241\360\\\275\345\375\231\274\344\317\316;\275>\336\273\024iw\274\361\023!<\303\301H\273\304\272\340<\332m\225<Y,o\274\273\203\223\274\334V\211\274_\nX;\272\263*\274\264P\000;/\274\306;.\316\014<\303jO\275r\276\324;fJJ=\2679*\274u2%\273C\324\213=\223\334?<\212\200\255\274\366\300F<\303 \227\274\034B\031<*\225|\274\243\2752\273\301{J\274l\303\265:K\302\202\275\245\233\001<\014^\314\274\024}9\275w\237\024<\226V*\274\323\304\024<\236V\206<\2320\3609\303\014\323<=\263U=\353\247\201\274\2000\317\274\274l\r\275Yf\367\273\2127\031=\217V\250\273r\314\353;\037\342\204==\267\272\274\332c\334\274\263ZF\274\266/2\273u\034\232:\307p$\274\360\356\224\275s\nd=R}X\275t\250\254<\342\351\t=x[\344<\203\271G=\223\031\201\275\345\263\001>?B\007=\337\\\203=\207U\026\275,\363\261\273\246D\333\274\233\3440\275\372v\273:T@4\275\225b\206<m\tb<\255\2274\274\031\016\033\273\212\345\315\273\253\213\252\274,\273*\273\354\375\220<\037`5\275DE\310\274\036\262@\2749\306\267<\257}\037\275~.\035<\264\266\352<h(\214\2743\340\220\274\341\217\200<\313\333\260<)\330z=>\350\213\274U\224{\274o-\272\2749T\272\274\n!\224\273\370\007\370\274W\234K;?y\n=\275\375\033=v\217;\274z\352X<\366V\251\274QS\023=g\204\204<mc\355\273L\027\014\275\217\025H<K#,\274\313\224\353\274\231\2211\273\341\335\024\274\356\337\\\274\306\213\276<\326A\'=&\370\226\274\333q\216<\220\000\222\274\003\362\335\274\201[\344\273\275\3003=\347hp\274U\260\215:\006\250\240<\307\354\211=\233\324x\274\270;|=\352C\363:\241(C\275\303\337\304\274c_\212<\333\"\206\274Y\251\214\275\033\214\230\274\024\033;=\225\010,\274\256WX:\2325\260\273\230\271\303\274\365\203@\274\021\341\205\274/E\201\274\200[\037\275\"%\201\274H\275\210\274\336I\";\226\373\360:1\242\274;^\262\013=O\261;\275\335e\342\273\254\324\360\274e\203\242;\016\217\235\274\210v\322;\270\261\321<\243%\210<\347\2154<N#\203\273\027\037\272\2742-0=7T\356<\202\334\224\274\365\225\223=\332W\360\274\214\032\036\275\340\273\365<\276\222\n=\332\340\000=\024`Z<\207\031S\274\321a!\273\253\233\321\272r\217\"=\036_\366\273K\275\\=u\202\000\275\273\211a\274\242]5<Bx7;\030\201\005\275tH\342;\247\031\023=u\377\240\275\304X$=\325@e<\235\036\034\2741\010\026\275\276\317 \275Vo\234\274\243\250(=\364\362>\273\366\3174;\255&+<\267\021;\273R\030\027\274\177\271\177=\214Q\010<@\305?<Not;\205U[<\001\021Z;\'\337\205<\014\302\205;\2527\035\273h\014\006\275;\027\276\2746\337\025\274O\332\334\274\236\352\303\274@K3={U\032\275\267\034\350\274\035]\005=\245g\010\275\310\006\034=U)\233\273/\n\216<%\215\035<\317\272\205;\305\034_;\275d\216\274\352\373\203=\321\005\273\274\221-\200\2751\324\032=d\n\2719\350\2749<v\352\232<\361EI=\253\300N<~\\\033\276\247?\224\273\200\r3<\236A\022=\365\357\223<\334\363Y=\002i\004=\312/\020\274\302.\225=\332,\367;y\355L:\253R\216<\276!\202<\344\206\206;\353C^=B\"\324<\025\177\030\275Gqi<\372\264\001<\r\226\304\272\373\005\334;\003\303?\275\023W\266<\260\201^=\333oP=:\001\246<\271\001\021\274\336\210\004\273\250\006\245\274\360\255\244\274u\276;<\003\276+<\333\n\373\273}Za<\251\341\213\274\215\261\373;\341\252?\274\001\216\314\274&\311\303<\317\035\242<\032\217\272\274\257}\266\274\331\300)\274\307,\260\274+\355\0009\373D\201\274\310\221M\2735\3054;@\346\253<\323D\271<1\306!\273\243\271\330\274\007\016\004<\301\327P<\273\215%=\244\311N\274\211\2511<\367\306\'\275O\213M\274\343^\375\273lH\372;\2543+\273-$\200<\306p\016\275\373[D\275\310\335\256<\032*4=9\357B\274\020\366)\274\233\301\227\273T\347\231<\005\253\210\2746\354\373;#\177\227<\300Cb;/\340\007=\373\032\325<T\366\343:\3268!<\344\014\344\274_~J=B\251\217\274-4\244\273.\031%\275\000@B\274\363Y\244<\340\3169\274n\002\203\274\233\350\273\272\336\324\277<\205 \275;\022$O< \r>\274j\261\032\274 L)\273\006c\030\275.{\344\273\030ge\274\373\034E\274\243+\256;\210\361\226\274\031\0042\275\342\371O\274!\301\037<\210\345\257<\305\254\357<\367T\210\274P\000\256\274\372\rl<\224,$\274\217\361\'\274\213\200\311\272\311\373\004<n!\244\274\300\236\023=\016\302\240\274/\321\025<\240\3456\274\310\277\210\274\205\305\336<\313\314v<(\322\355\270NW\241\274\254Q\'\2758N\236<w\325J\272\016\304^<\232\035\001;\352\204\215=\307xM\274\255z\211\274gG\204\274\227;\233;Q\004\223\2736\370\327\274\302\3169\273l\013\r\274\t\377L\275\252\366\203\274g0\345\274~\246\337\2742]\014=\366\311\364\274\326\220\267;\037u\240<\350\341\"=\315R\036\274)\006b\275T+\260\274\326\336\001\273\214\337.\274X\3659=\014\360\267\273p\363\007<\317\021\350</\372\346<\310\363\325<O\252\225\273b\002\317<\213\037\210=\222\207b\274\314$\006<\350h\017\274\321\317\275\274Z^\230\274\254\331\010<\261n/\274\366\203\241<\276\223};\264\021(\275wV:\273S`d<_\354\204<\2311w8+M\377\272\373\210\315\273\002\214-\274\320\032_<iM\251<\270T}=\266\325\302<l+P<\263_\026<\3446\013\274\343\230o<<\252\303\274\343\340\334<\031g\025\275\257\273\031=+|\300<\331\2626\274r\261\300\274\022\226\013\273\343\300\213\273<\366K\275\213F\257;\300\n\214<\244\231\n\275\312\353=<\307:N;W\371\032<\362,\350\273\013w\201\273p\254\235<W.;\274\364s\330<\306t\304<\220tn\2740\331\262\273rj\324<8\307\313<\037Q\030\275\307\347\002<\035-)\275ev#=\036c\300<;\363\251<p\216\026=\303\013\033;\342\3347;\205\005J<\341\255\006\275$\267K\274\003\026\217\274>o\205\274\367?T\274\246\022*\273T\013\224<\2470\254\273\301\300\202\274\366\313\252;\370eA\275}*\017=\256\330\305\274\035\245\333\274\033\031\362<\221\027\310\274\371\021\001\275\231aA:\307@\205<\210Yw=\252r\355\274\345\227\004<;9\026<\245_\220=~\023\013=\255S\007=Q\212,=\253\244\203\272\3177\024\274`\335\210\272\367\261\010=\337\\$=!\303\320\273SU\032\275\254^\302;\243\262\222\273U`\351\274^\306R:\253\360\256\274J\335\254\274\347X!\2753\007\263<\362\261\277;y\350\026\275\200jZ<\246\361\302\274\233c\327<\200A\233;OG\021\274\310\316S<\251\002\334:K\n\202<E\n\307\271\002dY\274\245\031d=2\313\013=e\370\222\274\223\354\255\273\314\306\025=\343s\031\274N\327(\274\333\033\246\273&+\317;K\302T\274\332_\035\274\303a9\273\377~\020=\311\266\223\2743j\014<bq\346<\356\336X;\327n\271\274\372\332\243\274\326!&\273\023\244\224;\310O\"=m\016Q\275C\004e\273\344I}<\232\370\236\274JY\277;.\345d\273\001\251\364;\340\014\325<\306\n\n\2731v\312\274!\325\303<\224\343\267\273\272\026\245<\207\265\243<iYM\275Y?\203<\210\022S\275\222\013.=\024\010\330<A\327O<M\302\223;\274\224@\274\303\330Q<\347C5=\375\226\210\272 *\274\273Cb\311<\340\335p<\335\3324\275\326\350\312:\343\223\342\273\274\023\245\272\274\204\364\273\021\324\241<\036\317\n;\340\024\n\274?\362\372<\000\342\022:\323\007&\275\327\274\302\273P\2663<\002\325o</\344\235\274F\271\260\274i>z\274\343\342\345;\022\333q<\013\342\245\274\242\0227;\306A\024\274\010W(\274d\360e\274\005j\"\275\001\231\255\274\037\247\311\273uK\264\274\321\214\352\273C\220\220\274DD\004\274>\243\254;\301\250\032\273:\225.\274\2137\212<\301\332\346\273 \205\003=3\260;\274\007\326\357<@\231\233<\372$\315:?\272\300;\310\"\315<\331\003\006\2741\276i=\317v\243<+.H\275F`w\275j\325g\274xC@<\246\030\255\273t\000y\273c\254;=\314wF=\025\r.\274\377$\351\274\037\360\270\274\216B{\273\266\335C\275\347\010\264\273\325\325&\274\330\2203\275W\251=\274\002\273\\\274K\356/\274f\013\372\274\2269,;\363\362\242=\326\225\356\273Yz\017=\350\357\010\274\377\035\224\275p\254K<\232\014\230\274\253 y\273iL\230\275U\305\363\274o\006T\275:2`\275^[\020\275f*3\275\200,r<KL\t\272\216\247\031<\034lh\274P\227\213\2700_?<\306`\374\274i\324\276<\003\023\013<\353\260\256=\231\231F:\203\330T<\305\000\013=\365\353{\273?\227\007<\275\263a<d\246s\273\206!\330\274\245+\222\274\363\006\353\274\325\037\217\274R\204F\274\007\207\376<\203\273\231\274fi)=\374T1\274\271q\351\274\014\342\r\274\035\301\013\274\204\3756=`\223\263\274\354\325\'\274WU\214<\350\237\022\274\r\035\301\273G\374\323\274\032\323\204=\206\217\377\274.Y\346;\350\005\216\273F\200\243\273\371|D\274I\375\240\275\004;\211\273{\2350\274k\202\020\275\033\310\030\272\316\005\001\275\'\251\322\274\272Uh<\211\234\203\2754\213\026=\237mU<\362C\223<\363 \341;k*\262\274\001{#\275\233\334\233\273\245\020g<\010\003`\274\347}1=\234b-\275\256.\215\273bhG=\032d\314\273!J\002=\023\260\231;\226\0072<h\227\271\272^)Y\275\320\362e\274\250\312\202\275\247\343\032=.\357\010\275\n.*=+Ng\274]\26029AQ#=2_\211\273~0\370\274\353\364\230\274)\031M=\254\377\347<\317\035\233\274\367:\016\274\017\347\3169\202\314i\274\005<\203;N\316\206\274\364&\203\272\0057\300\274W\265\002\273\324\2153\274\035\303\272\274R\353\211\274\342O\240:L\356\001\274\272}D<\216\216_;Uf\2278\337$W\274\002\031\r\271_\n\252\274\234 \334\273\316\212\217\273U\007\004\275\246\020*<\241\3747\274\314\301\233\273{a\273\273<|\337;\0103\275=:}*\273\351Mx\273q\260\331\274\266Pb;}\"\013\274\235\021\240:\247\341\243<\036\342\225\274\215`\360\274\223\312 \275\251\026\007\275\271O\303;\034\322\036\274\325\332:=\247\312E<\202\034\327<\303o\272;\212\005\223<Z\323\005<\245\267\324;\037\341\243<\024i\320\274\202\373\244;\204\002B=\024\332T<v\366\260\274\334\235w\275u\331}\274F\204K<\253\207,\274\r\360\320;.\314\240<\033\004\274\2740H_\275<9\027\273\207\261\326:;\347\364\273S\242\314\2740\274\203<\264\003\365\274\210s\242\273]\212\r<g\262\031\274\337w\032=I\357\005=\3714\321\273\346\303\204\275\370\236\313\273\301\236\235<B\035\321<\204>Y<\346}\276\274?\030\334;*\245\304;\347\240\325<\340\235V:\211*\036\274\344\302\003<\257\0354\274\232\014\375\273s\345\314<\215\362\n\273|\025t<\217,\017\275\305\205\276<\326T\214;\217a\313<<\271\213\274\351\263v\273\230A\304;E\275\350;F\352`\273\202\207J\274]{\213\274`\341\267=j\213l;H`\207<\2414z\273\231H\017\273,\277i<1`\177<K\333%\274X\362\256\274\246\373\007;)b\016\275\241\225R<y\210\240<\027\361\017;\"\210\010\275\300^\254<|\342\t<\252\314\311\274@\205\273:\177h\002\2757\351<<\321k\353;\302\223\337\274/\365\360\274\235\250]\274i\261\013=\275\024\003\274\271\264><\356\027W=A\365\377<)\244\366\274\371\006\341;(F\370\273\273q\375\272\246}\022=\254`I\275\034x\211\273<\345\010\275\354r\324\272i\322\213\274\212\tw\273?\334\240<\366\036\212\274|Q\226\272\022\226\013=\276\0175\274<e\370:\233\266!=\327\004#=\276Y\250<\333\313\313\2746;\277;\025\365w:\027\263\241\274\351\231\311:Oe\277;\205\314\223\274\361\366\355<(=\214\274fTJ\273\'K\301\274@Q\227\274\353\\x\273\270\340\201<u\rf:\021\307\206<\332\320\200\274^H\370\274\362\313\227\2742|)=-\347C\275\214\263?\274\010\311\217\273*\'\264\274\264\237\313< ~\034<\023\2267<\321\235X=\000\002\307\274QPc=6^\022=\tQ\212\274ZRu<bOn\275\234\336\212\274\245L\335<6iU\274\341D5<[k\305\271\327@x\273g\340\301\273\037`\311\273\276\001,\275\210\303\032;m\261e\273(\204\250<\315\330\202\275c*\333<>\035_\275-\224<=\352\222\351<\316g\235:\235Y\021\275d\240\016<\213Y\312\272\365\257\254<W\307\n\275\361\353\272\274\316D\210\275k\265\006<\231?h\273\277\324b<\206\t(\274<\365\361\274\363<\340\2746V\232\274G\026\234:\357\304\001\275\030g\277\274\261\337\220\2728\335\213\274n\254\301<\3136\370:f\355W\274H\034\365\274Mm\261\274\357~-=r4\214\273\246^,\275\231\003\003\274\342\244;<h\350T\274\362\357N\275\025\222\274;=mw=w\203\030\275\235C\235\273\334\2623\275\2109\232<~\207r\274\026y\006=\357wX\275?\344\014\274\241e\347<\375\305\237<\371\256N<Y\026d\2741@\231<Q\224\206\273@\2163\274\261\344\026\275\201I\037<\305\304\206<g\203N\275\351\026\362;Nu\037<\335\335_\275>\336\371\273\004\321]\275\333\304Q\274\234b(\275\352\226;=\007\230\243\274\302G\270<.\333\246\275i\030\233:Qt\305\273\201w\215<o\264\354;B\377J<\306p\200=Gb\360;=\351\300\274>^g<\237\227\024\274Yl\355\2740\221\027=y\014\355;n}\262\274\244g\026=O\024\357\274(D\274\274=\257\237<#\017\317\273\n\033.\273\221\200\223<\347\3051\274\036\244\007\274\021|\305<(k\253\274\273\3243=\330y\000=\273\301\001\274\221\027\314\274\200z\241\2737\003\236<e$?<\013\266d\274\"\224a\275\304\202\030\275\314\034\205;\t\257u\274@\324E<\\\252\000=w\302\270<\177\261*\275\t\232\325\273/\375\234<\206\332}<\221x`\274\\s:\272\261=\023\275p\030\300<l\344\305\274\252r\224\274\001B%=|\253\310<\230@l<\303\252\321:\023\225W<\375\013\035\275\016\361t\274\2565\304\274\327\360\217\274O\216\006\273y\262\016\274?a\t\275?\365\022<\273U\231=i\250\351\273\330\376\301:\234\301<\275\360* \273T,\242\274\265\340\203;/b\2139\317\356\226\272\211\357 <Ik\356\274r#y<\014\236T<t\307U\274\355\251\237\273\241\375\227<+\344_\274\r\032`\274k\272\327\274QI}<\303\377\371\273p\250\265;\r\251\377<Y\362\321<\244\363\371;\250h.\275\260\252\030<\206=\\\274H\245D\275y\265\314;\236q\320\274\315\2050\273wA\204\274\343\277f\274UQ\203\274N\261O<\262$\372\274\255\000\t;p/\200\274\223@8=\210G><\3154\013\274w\330\343\274\232v\026\275\364\240Z=\r\205\365;\264\355\345\274\367\312\240\273w\034S\274?x!\274L\031\016\275F\033B<\206\034b<\254\305E=\0351T\273HV\365\274\013\271\000=\374\016+<\200N\322\274?bW<\203\204E<\210\265\034\275\324\0043<\001\345\207\275\373\007\200<\214\364\366<(\364?<9\2012\272A6n<\234\374\327\274\334\234\023\274\023\317\205:\213\311\210\274\300\313\366\273(\021\317<\033\013\341<~\000\263;]\250\363<\344%V\273w\341;\274[N*\274\321\277\223==\0235<\216\343h;\030\310\312<HE \275>I\210\274;^[;\0312]\273\372 \r<\306\314\232;\370=\317\272*y6\273\256[\242\274P\301\022\274\347\353\010\274\372\353\202\274v;\345<[\342z=\211\260\256\275@\305\353\274kKU\273\242\202\265;J\330\302;\377F\255<\320\2634=C\221W\274d\263\204\272M%B<\272\314/<c\345J\274ZXV<\234s\205:m\0300=\202F\240\2747\341\026<\237o&<\263:\306<\260\270\340\274\024)\346\274\017\332\341<\302\027X<g\311\356\274\274\244\020;\364\245;\272L\306\025<[;Z<iUi\274\340\001\353\274\3418E\275q\307\216=!\254\237\2743\315]\274P\327><j.\005\275%b!\272F\277\014=\036\300\275\2727\267\037<\234\004a9ux\343\274\342.\023=K\234\301\274\206]\r\275\007\232\216\273\330\316\\\2749\nY\275#!#<\267\231c=\031*\205<:\345\200<;\215F<\225\021\241\274\333KY<D\002\344\273\"\0376=\301\362\360\274E\214\274<V\212m=\001\\h\275\247\3315<\214\271\200<V6f;\334rP\274_\361\257\274\356\246\377\273)\002S;\207,\220\272\026\334\311\273\336\353\001\275\233\2064\273\340d\340<\216\252G=\356\317\022\274~\213;<\344\203\354\273\360P9\274#\337&\273n\326\263\274\250\2535\274I\033\321\274\014\226\010=\010\232\240\273\344\211\316\273\006G\235=\023Z\024\274\365\334\272\274\216\226I<\222\215\000\274\374\350&<\024\271\002:\022sU=\261&d<\333\311\232<\366\005Z\274G\t\257\274\353\rL<D\002\312<\313g\273<\265\306\010<M\376\n\275\023\006{\274E\331\251\274\006\205\033=\300\2009\275u\271\335<,\227\032\275\256q\256<\322\321\323<\201\343\204\273\266\260\013\273\tQ\264;\251m\236\274\001vy=\276\224\252<*/\235<.[w==\375c\274\357~\300\273d\347\372\273\217\365\247\275\354\346I<\2457\'=\307\253\365\272G[:\274\217\330n=7\305u<&m^\272\237X\004<\236\203\247\274\025\374\207;I\254]\273\340\270\021<i\365\311\274GTT\274\\\'S\275!\321 \275X\205\343:c\3251=\212\300\270;\376\201J\273\204\364^\275\r\2122\274`\203e;\375n\335\274n\246\004\275\371b\023\274\312\n\356\274I\016\345=E}\2309\3636W\274i;\322\273\036\364D\272\205\233\341\274\352\276*=\371I*\275/\320}\273\340_\273\274\3208*=\2665\214\273\001\375\277\274)\325\034=\277w\212<\356\3207<\2625\020==\313Y;c\037{<\'\324L=\260*\314\274\366\223\'<\024$m\274v\367\324<\023N\251\274S}\216<\346\372\355;y\246\003\274J\3557<\034i\n<\254\3543\274\241h\343:\300\2552=\215G\002\274\324\006\245\274\207j\271;\374\351\243=\006fo\274@\201G\2725\246E<:\360\002\274XJ\021\275\002.\215\274`\324\233\271\260\t\216<v*?=S\202W\2745\216\264=\321y\331<S\250\006=*2\203=\320\233\207\273}:\025=\262\355\257\274\003\251\\<\211*V=#\0210<\345r\370\273K\372\2529\033\346h<\252\320\216<\033\"\371\274\025bE\274\025\233(\275Ok\204\274\026\013\355<\263\177\300\274\342\332\365\271\177\377\321\274\357\017\331\272\327x\312<\354U\020\275~\241\203=\227*4\274AX/\275\362\225\265\273\375\343\343\274\273\372\250\274k\030\035\275\371\376\224<r\023\366;\027\211\317<\367\304\t\275KJ,<^\212R\273\014s3\274n\007\341< \231\334\274\253\343\032\273\337\t\026=\004\356*<c\322\023\274\272\335+\275b\005l:}\020\032<\361\357?\2737\274\310<H\226\325<>\017#\275(9\237;T\265\177;0\240\343\274\330\326~\274de\221\274\3761\327:\003\201\315<\375sx\273{\317\223<S\217\036\275\336<\244<\201\266\240;\300A\2768\272\344\201<\\-H\274J\235\207\274\304\221\355\274j\201\325\2743\006>\272\301\374\326<\355o\031\274\360\2163;\267d\201\274\255\242 \274\355\352#<\3445e;\304m@\274a\327\207\273>m\364\273\302r\212\2750\344\n\275A~\201<2\365\225<\'\177\315\273< \353;\r7\346\274\360\243$=\332p\202\274\010_\201<vB\267<\270\266\271<d\211\270\2747H\344\274\302\342\360;k\277\202<\263\226Q<c{Y\273)\037\024\275A\034\313<r\231(\273b\331\016\275.nc\274\303\203\336\272\340i\376\273\347\212\207\2735\210\345\274\345\004\003\2741\374\020=\002l><\263l\246\274\266\024\355\273y\344s\275\271\230_\274iF\005<dU\277\273\031\237h=\221\204\204\274\372\301\003=\351\355\322\273|\236\307\273\244\264\014=]\223\377\273\326\367c\2752\256%\272\221Y\240<G\200j:\233\002\035\275\305bN\272\304Y4=\000P\254\274\362\330\227\275\263t\274\274\360/\313;\'\005\025\275\030BQ:\215\347\351\273|-\300\272Dq\317\274$\231\021\275\360\025\333\274\345|\203<9\270\327\274\014\257\352\273\2615h<(\361t<\236B\213\274f\232\266\273\001gJ<\376\260+\274|\216\330<) \004\274\362\243\022\275c\300\300<\241\rY\274\247\202\312<\373\301\241:\210b\341\273\r^\264<\225\352\352;\357\217.\274\347\350\223\274\274a#\273\302\275\036=\005\321\266<\024T\220;\265\262\255=\211\010\232<\022 \026\274:\314\362\274\256\222\355\274\215{\"\274*\372v=\036\3262\274\335\225\210<\354\340\010\275\334\023\220\275\362A`;\347\322\234<\026\354d<\367=\035\275\026>\006\274\203\207\0077\2348\301=]\310#=\236\217#\2745\3054\274\257\267\030<\376\220)<\r\222\003\275\344\212v<4\231\240\274\242|*\275\006\220\002:m\304\312<\353\001==*\274W=\354P\303\271@\253\235<&\356\235\275\323\224\320\272\200N\036<\024\301;\274\241\277+\275\253\031\356:\326N\222\275=\251T\274\316\234\023\275\247\002\300\275\340\242\304\273\313\304\326<\010\221 <\253\345\t\275\217\3221\274\005\"0\275%\007\035=`\303\250\274\222g\227<\005r*\274\335\311\247\273\332\204\245\273\215t?\274\177:\234=`\362\311\273\025\3267\274\230i]\2740\356\231<D \277=\0021\372\272\033\317\177<l\260U\274\214\205U<\302c\207;\214~\237\275\032X\247\274\006\236\216<\253H\220=\021\317K<\\@\212:\243\373\224;|\250\t<\200M\020<\263\312W<a\313\007\274\360^\":\320\020?\275\222l><[\367^\274)\3236=\037h\004\275\336\027\330<\016\304\023=\325=\230\271\345?\314\274\231\265\301\274d\004\203\274\353\324\000=:\225\200<Cmx\274\357\242\027=\227\301\021\275\226\316\000<\317\206R<\007\270\354\274{\302N;yE\234=\346O\225\274\265\220\340;\325\203\330<\242\224k:|%\236\274\221\236]=\317\372\3239\366\373\236<-m\332\273\240\305\232\271\005c\364\274\222{\216<h\240\002\275\262\233\031=\005\334r\275\206oW\273%\t\245<\037\271\201\275\3700\017<V\257#\2737\202\221<\261XO<\303\260}\275\205\273\276;R\321\256<5\233\t<z\331\365\273\013\345.\274 \312K<k\250:\2747\205;;\302\263\003=r\325\311\274\266z\271\274_\332p<,\352\271<\227\017g;\301\264!=\030\363\320\274\355\205\340;E\022\002\275B\034\321;E\035%\275\201Q\276<\260^\347<w\005\352<mF\007<od\252\274\270(\227<][D=\r\307\371<\263\025\207:I\353m\274\272\210\014\275+\310E\274\000\345\351\273\204(\200;\246b\274<\354\211\026\274\360\220\005\274\203<=< ]\220\272\242G\311\272\265\014\270\2731?\265;\2471V\275\302\245\333\274\035S\214\274N\373\306\2731K\035=\337\222\010=W\363\262\274L\007\017:B\366Z\272\277 \276\273\302=\006=O\213\027=\264\251\254\274\026(m\274\031y\302;x\233\273<!A$=\246_\267:\331\272\316<G\245\274\273\223\271_\274\020\325\027\275\355\035\036<\277T;<\201\007\205:o\001~\274\303\302\236;\345aw<\204\'\201<\321\375\003\272\351\263t\274\301FQ<\"\313`\274,D\361\272\333\211E;\327\301.\274\224\276 =pn\004\2749\316\211<?\202\357\273\273\020i\272-\274\n\275n\342\350\274\361{=\272\366\214d<\253\r\342\274\226\204b\275\253\330$\275qn\317\274R;\346<\274/\371<_\332\331<=-\000=6i\275\274m\372D<E\234\005\274\240\263\303\274\305\025O\274\007\255k\274}\332\230\274j\264\353\274\021\000\253\273\t\361G\273\022\031\3149\034\234Z\274G\253X\275-\246\263\273\335\212\014\274\205\335\002=\003\260\331\274\212.\215\273\013\030\315<YS2\274o\276\254<\277x\267<\227\2104<Z\367\235<\274\242\021\275\265\013\261\274.y\354\274\003\201\n=\215\000\367;}\353#<\363\315\201<\037y\365\274P\t\364<\354\024\376\272\363\374\224\274\306\036\243\274};\213\275h\275\276\274\243\333+<N&\n<G\337\033\274.\340\352\274d\371\003<\007\373\240\272!_\311<\276(\324\274i\323\263\274\272\010\207\273\214e+=\305 \336<\235\270\266<<N\266\274\316b\223\274\241}\240\274\233\202\242</\003m<\0038*<p\265\031\275\337\006#=\225\333\304\274\273\263\332:\010\312\260\274\305]\215<\207\010\213\274\302t\300\274\310\033\026=\032i\022\275{{\212\274K\347\177\274\031\210\357<e\267\214;\203\354\034<]\303/<\0029\006=C,\215;\351\204i\274\332y\036\275\002\216\206\273\334\311\207\274Xo\320\274\257h\032=\020\371\202\274d\270a<h\336\261\274\026c\003=\201\200\254;\352\035\307<\017\347\006\274\335\266\242\274X\023\014=u;.<Q\306\220\274\000\204\206\274\270\310\320\274\237X\211\274v\234\245<18\327;\030\347\277\274\274\317\016;\273\205^=\024|;\275f\005,;X2|<.\351\372:\222K\260\274tl\014\275`y\037=K\373q;?\304@\274\327\205\243<\253\316\r=\032i\351\274\354\212\231<p\270l\275\332Q\267\273\317i\223\274\216`\354\274\270Q.\275\250\026c\274(e\247<V\356\305;~\220\204<\230wq<Q\004\274\273\336\"\336<\214V\311\274\322\303\236\274\263F\252\274Q\207\313\274\300\324\231\274\275\361\320:\227\305w9\247\330\272<Q&8<\027\016\235\274W\236\330<oD\251;\005\221C\275\006\212\244\272\213\270*<\032]\300\275I\207\322\274\246\026\021\274\336\227\225\274c\357\265\274\004\236\375\2731\262\023=a\033\014\274\2559;=\325\354\r=\022\323\356\2747\243\256=9\202)\275x\212\325<3Fl\274t\3704\273m\346\n=\255x\027\273\312\3209=\370:S=Ae\341<\252|\373;6[\211\274\266R\216\275\0245]<!I\210\274S\240\"<i\340\003=\361(\003=\206\263\247;\r*.<\375\245\005\273\002QO:;T\343\274W%h=Pf\273\274\210.N\275P\266\215\274\231\356\037;\241K8\274\274\227Y\275\2403\227<\344\217\244\274\3657\244\274\316wS<%aO\274\372\305\"\275)\311v==\2660<,A\332<\305;\256\274\3605Z=\002\244P<Bo~\275\346\357~=]R\235<8xe\275\255\203W\274\0060-\274)g\273\274\001\221\237\274u\007\302\274k\320\330\274\200\221g<:\250\000<\333\267\306;46\306\273\273\360x9\265m%=\217\352\271\273\336\036\276;\377\261|<\306\227\245<\034@\277<6\200B=<\254Q\2749\340\267\274V\301&\274h\315\003\275Y\342\222<\277\374k\2730\375\227\274v\321F=4\035\240\274\351\030g;\2212\202;\023\212\342;\032\226\r=\352\265\247<ia\025<\377P\237\274\241$\363\273\324\240\002=\211\006o\275U\251\\<5/R;\325\252\202\275\026\365\014\274\356\004\300:\275\331\017\2724\2321=\373\006\277;,\230\253<\352j\257:\323oQ=\203\324\001<\004\372E;\211\371\340\274Q]\336\274n\037\345=\363\322\257\272\350\223\343\274\246\005&<\322U\005=\252\356q\274\255\320\363<\310\224h:\247\337\020=\264\010)\275\356@\267;\244\210\350\274\344\260\255\274\222\265\367\273\276\233\261\274.\323\201\274\230,\350\274\003\273j\274|\317/<\306Q\217:J\232\005<\355\215S\274\tt\350\274[\345F=\277\266\030=\252\301\037\274\354\3507\2746\255W\275\221\'\276<\021\241\204:<\304\014\275Z|\252\273\345\010\233\274\026\006\315\274N\021:\275\231\036\304\274hX\231\273\25749\274\371\357d\274\276S\3679\242\245%\275\002/\020;\251\217\010=\310\245Q<\267F\300<\370\345\264\274ZVe\274rA\203\274 \303\206<\264\277\351\272QJ\214\274\334V\003=\224\354O\274t\277\306\274\314u\013<va\245\273\207\366\344\274\306\360T=\343w\205;1\244H\273\301\026\224=`M%\273\">\252\274\005\267U\275\326Z\210<\177\366\374\273\275\370%=\352\300+=\305\304\210\275X\240:\274\263\017K=\335\262\361;\017l\324\274%u\010=@\201\213=\306\262v\274\264\327[\274P\021A\275\227\357\340;\356\221/<\307\257\252<\222\223\255<\030h\025\275\237\201r=}\366\\\274\017\013\006\273\225\030\375<\004\202\253<\235\\\323;\217C:\273\225\337n\2758\0206=\376\265\214<\377\336\333\274\225\326p=\223eg<\t\002\317\274e\316_\275\374E3\275&B\343\274-n\237;\\\010\257:9mL\275\0013!\275\345\254\307\274\377\201\"\274P\365V;,b\227\274\246\227\002=\324e\013\275\035\260\303\274\236\306\214\274\014.+;Z\2032=\361d]\273\277\371\355\274u&\t=%\215\243<$\320f\275\241z\377<\273\203\247<Sx\200\275\274\243\177\274\377\372\202<u\001p=m\212Z=\213?\207\273;R\254\2748PW\275\003\301\341:0*\347\274\0174\212<=\333\346<\\_\332\274\020b\367\274v\"s=\241\262\222\274Jd\036\274\243\007\003<)F\031=\263\307\214\273-\273\370<\354\353;\274\036+\324<\215\277\241;\227\272\261</\003<\274\275\262\233\274 \000\r\275\0356t\274\335?\276\274\272K\327\273\250\261\231\274\314\224d\274;\332P<\000A\266\274M\375\203<\202~s\271\344\266\035\275O\353p=\2601\242\274\365A\216\275\021\271}\274a\034\251<\376K\271\274\023jw\275=\004\207\274\320\366j<\316\220\367\273\035\353\257\273\237\365\016\275\316\331S<\2522!\274t\272\177\274\245\247\355<\342\211\263\274\024\270\230\274)\317\256\273\375]\253<f\017\004=\\H\355<\031Q\246<\245\253\325<\332\363\303\274g\300\201;\272\335(\275n\205U\274w\010/<NH\236<\323\034\014<\306\210\202=\356%\021\274\261\346>;\307\366f<\254\343\356\274\324\231\242;\356\356[<\022\2624<\nC.=\330\237\021\275(\026\264\271^\004\002<\206\264S\274\000\310\304<d\3250\274\2551\210\275\317\357\330<=\017f=[\264y<\265E-\275T\233p\275,!3\275\237\002\266\274To^<q\r5\274\246\244\362\274\006\024\357\271s\006I=\311\343\340\273\334]h\275\3250\214\274\024\213\244\274(\372`\274Wz,\275\201\373\304\274\276\020w\273L\232\274<\034\r\267\274\310\034\204\273\313M\360\273x\306\243\274\226\236\353<\226\375\353\274\340\365h:\355\200\302;o\350K=E\010\326<N\307\275<I\203j=\2516\275<yM\014\275\021:\320\274\036\026\033<\031\2409\275X\304@\275\003DQ;\370DA\275\014s@\274\364\352\027\274\3221\230<\200\327M\275Gi\356;\327 L\274\205\317]:\202\226=\275m(\031\274fq\371<\315\333\013<X\206\007\276\301\rl<u4S\274\362\232\301<\273\3526=e\277\323\273\341>\311\274 J\014=V]u\275\027\033L\272K\364:\275S\215h\274G\277\215=\237\035\205\274\031\021\022>GLW<\230\3513\275E$\313:\217\350\202\274C\r\016;*\302D;\317\023\006=VPX\274F^R\275\n\327N=\350L\025\275\320\355\000\275\266G]\274\244\256\027\275\007\256\221\274e\004\005=7A\256\273U\177\260\274\204\302\213<H\235\246;\210\336-<\242\005\251\274\032\314\014=K+\t=\225\031>=\350\273z\274tj\344<\322\022\n\274\246\014u\274D\245\350\271\023\t\003=\013\031\243\274\014\335\354;\336\231\257;\361DW\275w\275\030=\277d\\;+}q\275aO;<\026ez=JF\222=\223\367\266\274\363\211*\275N\312\333<\341\354\010\275\206\2371=r)\356<\002\364\217<Xg\202\274\345\222\346\274-\251\010<\373y\216<\326`\006=\374\021\351<\367%\313<@\\\003<\022\241\027\2736C\203\275\020\241\034\274\023\023\315\274\263\0275;\303a\\<\\1\315\274\213\027Y=\361\217\215\275mv\031\275e\376\254\272\341\205\016=\020h\310<\326Z\334<P\340d<\315\'\026\275\0244&=T\023\332<{(\006=\210:\372\274O\276\353<ZM\240\2743W\372\273\347\310\323\274\002a\010\274\3368\340\27403\236\274=\023#\273<\337\334\274\202/\276<\346\363\344\274\306_w\272R\351Y\274j\355\373;\315!\t=\255\330\021<\342\342\204;\265\027\226\274\021\223X\274| A\274R]p\275\020\033 <\224\007\007=\234\016\241<V\215G\275\223\353\320:\022\260\231\273\333\0252<e\356\036\275q%6\275\026\252\030\275\241V8<\275\223\251<\341\217\034=\332(\013;P\237\264\274\311j\336<!\357\320<\025m\216\274\351\301\014\272\310\3264\274l\372\335\274\267\311\013=L\234\262=j\350\362\272.\270U\274\366\367c\275\033\223s;W#4\2749\n\263\274\347\322\323\274\256\230\021\274u\034\334\273Ul\227\274\317\314\277\274\245N>=p\201\233\274\241\374w;+\343\365\274R+\r<\300\327H;\357\227,\274:\034\222<D\246\340\2743\3279\274\367\027;\273Ot\215\275k\231_<\346\345\225=k2J:\252i\231\273\336\254.=\002\355z\273\277A\027\275|o\232\274\210\215\355\274u\266\346\274\004v\262\274/O\344\274\n\260\323<-\365\031\275\312\\G=\230\016\017\275hh\340\2744Xh<\372\034\200<j\014\230\274\363A\241\274\031\274\232<\217\027\225\275\016\275\2079\337\313\002\274\303\364n\274Ov\211\274_\003n=\250\021\322\274\321\rs<\240\242\320\274\276\261\037\273\341Vz\274\206\350\346\274X\226\"<\007\013\367\273\347\300\302<\251E\n<pa\202\274\005\ta\274(\264\024\274\305\351\276;H\276\270<\275\250\206\274\314\263\2728D\364\r=e \273\274j\007*\274\223t\312\274\305\217\245\274l\340$=\360C\024\275>\3726\274\337\374D:\346Q\253<)\213\270;\t\262h\273\265\030\002\274Z\362n\273\352\307F<m\367\343\273c\\\363\274\332\213\213\2744\030D\275\274\361O:\376\024\014\274h\274\231=\343\333\254\274\241?X\274\346\362p=\316b\213<k\367A\272Vc\202;ok\240<\361B\235=\337\003\343\273?\267\335\274\322\300\357<\231y\214;\251\255);\353h\216\274\370,\263\274K\037\347;\226\333\024=\010\211\376\274\003M:\2725x\326\274j\217Y<\001\375\272\274=\345\017=\rj#\274\272\037?\275\211\351\373;r\344\266\274\371\037\373<\300\014\003=\356\236\236\274)v\t<N\002I\274\025C\210\273\256{\306<3\273\360\273NGr=/\201y\274\215%\307<\210\276\204<\223\250\302\275\266\025.\274S*\217\274d\2260\275\375\236\262<3)\212\274\022FJ\275\377\233\231\274Uf\200\275\r\226\261<H\220\320\274\\\224*\275\2462,\275\315\235\000\273\203\036b<\346?\022\274\311\307\210\274\257\257\210\274w\377\240\272\2235\353\274\375k\336\273^9>\275\322n\263<!\333_;\265\235Q\273\034\217\367;\257Y\367<NG7=\260\376\322\274b*\365\273\317\341\207<\352M[<\3676\326<J\377F\274\325k\253\273{/\243<\326{\340<\233\310_\274\334\206/:\2637\373\274\201\347O\273\2230\336\274\332\225,\274\034\242\266;Y\"\272<*\312\327<\177\305\272;\276\252\030\274\220\035T\273\341\010\376<Br\210\274\323\0318\275\306\025\312\274\325\313\344\274\264#\302\274r=\275\274\006H\367;v(X\274\"\213\362\273\263\211\221\274\342\375\277<\034,\003\275\177\\\033<\227\271\220<O\342\272\274\362\3644\275<\321\225<m\261\335\274\361\374\032=\300\355\372\273\264)\331\273\316\004\246<1\035y<\364EA\2732\303\235:\035\004\336\274\311V\375:\202ki<\033\026\246\274\010s\241\2742K\265\274\332R\037\275\271F_<\256\365*;&\271\343<&\217\014=\225\030\377\274\356C\251\273\226\222\002=.\270\212=\257{D\275Awq\274\200\274f;\224\030J\274w2\247;\2146!=\347\001z\275UEt\274\361r\001\275\022\373\366<^\313\204<0\333\203\274\241Q\023\273\203%+\274k\242\031\274(\0364\2757\003\014=\266 \216;!&\240<\220yL\274\021J\252<\303\1774<\344`\037<\203\264(\275\204\022$=\323\254\203\274PV\023=Q/C<\007\361\220<\250\2022=\3156!\275\335\372\347\2743o\326\274\342Tl=!%\275\274\353\2427\274\267\306\214;g\237\030\275\0313(<C\230\030<\017\036\r\275(R\325<\311Jw;\261\033\373\274\333zJ\275\233L\224\274\032ZV\275+!\263\274\332$$\274\241\257\310:\205\200\207;At\301\273\347\266\365\274:\230\010=\254\354\270\273h\245\301<\233pc<\325\303\"\272\226y]\273\224J\005=\334C9<\317\t5\275\n G<\272\263\201\274\331J\200\275LyT\274\036\217\002\275\332\364\376\273[\300\302\274\273Lu\272\272KC<\013v@\275K\230\256\274\020jF=$\321X=\272u\300\273\350%\n=f\3718=\317\021\002\273#&\201\275X\307G\274\366K=<\347\2428\274~\221\013=\372E\344:\317\326\240\275\224\006,<\0268w=\2410[=\007\361%=\325\231F\2745X\212\274\377$\010\275\033\203\"\275m\3642\274(`\007=f\n\272\273a6h;\330\025\017\2741\031`\274!\003\310\274\202\200i\275H{x<\237\256\305;\007\265\r;\032/\262;<;\365\2736 z\273\243\341-\275\034ri\274\315M\027\275\\<\317\274\'\343\234\274\354?\004=\356\260(\274\242\332\2509)\311\307<{T\001\275\025\301R\274%\341\306<\330.\216<\212\322\346\274\320\302m\275!\3132=\267\340\003\275\312\275\317\2740\236g;*\000\252\2759\316t\273\257A\016>\233A\313\274\037\247\250\274\342\354e\273\373\367N<>&\007\273N}+=\220\322+<O\377\366\274\341\354a;\024G#\274(\036\331\274\313\353\007<go\351<Pn\232=\365\262\366\2745\310\245\274\355\323\007\275\2771\310<Wg\255\273\020\207M\275\337k\033\2758\241\232\270\351GX=O;/<\215\271\r\275\265\256\320\274&A\222=h\235]\273\261\244\351\273\006$\210<D\252\307:\321\2328<T\272I<On\211<:q\273<\304\201\367<6\017\324;\352\r\222<\261\365\t\275\256\3627<\265\213\334\274\376\302\371;O\344N\275\242\214\301<\267D\324;\200\231\212\2743,~\274\320\244\263<A;\326\273\\U\272<:\244\374\274\222\213`:\024jX\275\332\363\005<\206\244$\275\200C\323<\346\313W\274or\207=\206E\221\274\247 ?\275\t\206\035=!R\247\274n\241\204\273F\235\347\274%\337:\275\355\275T\275|A\325\274\230k\031=Ry\025;\002\302l\275\212\374\025\275\300\301\222\274s\202\334;\246@\007\275\017\\\n\275\2011Y\274\247/H<\367\264\251\274D\t\213<\027E\036>\307\317\317;T,p\274m\377\300<P\336!\274\242IR\274\335\363\224\272Q\2462=\375*D:\250\337s\275\tX\"\274\243\204-\274\003.;=\247A\224=\265U\202\274]\235s=1\274\207\272\001\323\213<l\277\205:\257\316\243<=\235d=\235\207\221\274{:\201\274o\371\213\274\347/\306<\327\247m<r\3014=$\331\323\274\374\210m<=~\332;x&\227\274,\274N<\252\253\354\274ae#\275\025b\022;#w\206\274\006&\251\274W\324\216<E\344!\275\364q\003\2742U\027<\037\350\263\274\225\315Q\274J\212h\275\331\344\211=\206\352\354<\372\243\222<\364D\212\275\376\312!=x\312\324\274\312\365\271<T!P\274\374:\303\274h\020\205;\001\351\000=&\r\256<\365\215\252\273\022\244\305;\310\r];\241~\210:\177\267\020\274\261\2307\274\366f\223\274\225\373\030\275\017+.\275\210\'\332<\237\016\330\274;\n\246\272\235f\222<\224\275\237\274m\347\014\275[\316\211=\206\013\210<t6\230\274\210\375\006\275\322\016\367<\006\036/\275v\360\026=Q\273\311\274\234\220\244\271\340r,\274\025\344\267<\362\251\371\274\300\202w\2747\251\010\275d\366h\274\006\035\317<\025\213.\271\355\'\025<\252\342*:\207ib\274>iF\274C\357\001=\226\257\233<bFU\274\242\033\021\272A^\373\274\372\326p\274-\235f<\226[t\275\001^\241\273N1\000=hb\010\2734\321\271\274Y\300\270<\200{ =;j\310<[\343\330;\311XC\2730S;\275\0138\275\274\2310\346\274\363\237\225<\203\017\364\274\360\022\\<\2434\257\274\321f\325;Ml\017\275\014\306\351<\006\366\332\272\033\321\224\273U\246\254\273%\327\234\272\r\333\330\2739\274\232<L\266\014\273\245\251^\274\306\003I\275~l\320:\\\366\024\274\304\235%\274\350sh\275>\221\020<\226\370=<\253l\037<\364\363\242<\026l\024\274\272\264\002=\233J\010\275\325\341;<r%X\275\253\351\312<zi\236\274\371[\306\273j\023.=\215\360\025\275\213\020\034\275\241Z\017:\376]U<\236\302\210=\nQ\227\271\303\227\"\273b\037\024<\364\3748=\024\262\237<\356<\005\275E\261\254;]\260Q\273\211b\271\274\234x)\274\035\237\023\275\241\215\273\274\002\254\316<\020+\351\274m\271\315;</\356\275g\367\326\274\001\307\255\274\232\256\t\275\321i\277<\245X\246<\217\033\001<\342\">\274\360\261\013\274\351\277\210\274\2102\363\274\366\003_\275\265j\266\2734\367}\275VU\021\272\217K5\2742\364\370;\000A\356\274\340\'\030<\324\246`\275\210\321*\273\035\204\313\274\210{\346\274.\250f\274h\0310\274\244\365\337\274\301\201\273\274\375/\204\2741!\302\274D\250\201\270\217\006\243<q\204\t\275\310N\023\273\362r\035\2752D_\275\226\217B\275uR\301\274\221\270\275\273\307\262d\275\357JJ=[\tf\275h\316\222\274\3352A\274q\002\200=\020\221\324\2752\311\020<\326\254\313<\033\202M\275\220K9\273\333\241\372\2746)\'\273\200r\031=-2\234<l\0318\275~\t=\275i\300\020=@\206T\274`?\036\274{\326\005\275\017v\221\274\217{\350\274!\360\301\272\221\256Z\274w\362\234\273\263<\261<md&=\244i\007\275\310^r\273(\312\337;\362\245\370\274uz\247:`\360[=z`,\275\201h\260\2743q3<\275\037q\274\326\353\274<Z\231\201\275\004\222n\275i\365\230<\270\234\362<\350\0135\275Y\236\356<\325\352\211\275p\347U;4h\244\274\322\231\315\273%\377\376;Q\306\203\274\222\260\020=y\274\023\275Y\323\003=\256yx:\331\236\270<Y\214\243\272-\242\032;\325)\177=|09<\'\321*=\316\267b\275\336q\026<\210\274;<\210\355\212\273E\t\034\275\342.b\273\324G\211<\2144\026\275o\tw\274\333\r*\275\211a\202\274.x%<-|A\274\345\326\330<Wr&<\246\345\236\273\243\370\255\274\2676\032=\014\330V\274N!\t\275E\2664\275\343\302\021\275:\006\220<_\034\007\275\027\303\232<\311\375\210\270\036i\033;\036\204K\275O\341\234\274\313\376\276\273\243\243\200<\310\334\205<\"\350\301:\231\370\316\274V2 <\227\237\212\274U\033\315;\367\205m\274\377^\256\274\034-N\273\332*\201;\324&\226\274y\230L<I\213\225\273\333\330\027=\375\007\301\274i\267\217\274\356\255\223\273b\270\023\275\335\333#\274?\2752=\322\215\005;N\237\035\275\377\035\037;\026\014H=@\312\027\275`\022\213<\326=\272\274\006t\037;k+3\274\372\243\360;\312-o\274\262\200S\273\201S\355<i\357\336\2744$\257;A}\001=N\010\005=\202hh\274\265>\225<\305\377\244\273\227(e\275\"\255\344<\312\"\201;\267\341:=\321Dp=\357\2576<:\2232\275r\2564\275\274\232\216;FD\363\274\211\010\010\275\213\331A\275\213@\257=\270\363)\275\006W\304<\213\024\204<\276W\272<t\327\210\274{\251L<RS\235:\331\225>\274\260\340\027\271G\220\'\275-\342\232<-\222:;\304\245 <\376\016*=\3163\034;\201*\256\274\036\350\221\275p\265\214\275\365\224\273=\260{\311\274%y]\275\024\260)\272\213\376\201<\3403;\274`\333@\275\271\335\021\272\364wb=\340\340T\275\272\362\213\275\210vF\274\002wh\273dY\3479\373\216J<\034\371\034<v\017e\275\230O\250\274X\344\206<\303\223n;\341\367\324\271\006\364\026;>\253\200\275\366\237#\275\223D\013=\020\320\026\273\020J*\273,%\003\274\243\214\000=\344\362\000;\354\3277\275G`*\275\261\000\037\273Eb\223;\004u\353<Dd\226\275\344\273\376\272\014\344\200\274\277\215\255\275\314d\n=\305\210\361\273x\345H<!\033[=\276\303(\275\211VM\275\202\275\253\2751n\020\274\312\003\262\272\244\244\223\273d|4\274\230\233a\275\200\271\016=9s\233;AI\271\275\322!\346<^\0177\275Bc\231;<]\367\274\002\301q\274\347\213.=\353#\247\275\246\263\034=\204\302{\272|\345\037=u\214\216\274\357\371[<\212\232\026\275()\300\274\214N\037\274T\311\237\273*d\230<y\2315\273e\036T\274\032\325v\274^\010\333:\372\344\035\275\303{\214<\240\230}\273s!\255\274\220\335\315\272)*\210<\260\241w\275\036\251\270\275ww/=\324\023J\274\223\013C\275\3732\255\274C\005\250<\351|\331\273\276\263A=\262\333\037<(gN\274\025\226\206\274\343\255\242<\365\310k\271;\324\036\275\321\037\234<\304\345\273\274\007?\362:\036\356\231<K\234\325:da\227=Z\311\245\274\253\256\001\275\373\366M:\375q\n\275Z\004\202\275\2642b<\351c\256\274\300\312}<\356l\356;\215\0327\275\312\311 \271\375\014\321;\245E/\275l\370\005\275sS#=\323w$\274F\257\215\274\205X7\275\217\313\023<\220\232\022\274\t\227\016<d\353\342\274]\305\207=\023t\\<\205\300C\275\374\211\227<\300\235\264<\373\276\214\275\221\232#\274\322\255E\275\330\214\304\274\\\257\324\271KM\276<{\306\"\275x*C=\301=\222\273\241\366)\275\354Y:<\3200\242;A\326$\275w\302r\273\331\351\235\274iX\036\275\332\311\340:J\352\t=\002\246&\275\242\251)=u)N=\371\237\334<\324\013\373<{4\010=\334\2205=\333Mf\274 \254p=\257\027\215\274\270\202R\272\'Y0<\313\211\322<0\252\215<\233\264\005=~@\206;\205Ls;\0338\227\274c\366\304=\211&\002=sc4;\205\367\t<:\276-=)\033\006\274\361;3=B\324\021\274CJN=Q*\364\273\201\204(\274\370\000\252\274i\025h=\337 m\274Y\360G<\357:\'=\252\245\006\275\260\027\331<\315g_\274\346\277\301<;\313^<Nh_<\016\273*=4\203S=\\P\230\274\331\236\'<\246\223\035=\325\017\210\274\235\213\n=\202\022\023=<Ge;\371\rZ=\275\236N\274LNl\274\021V]\275DQ\211<\277S`=\363o\013\274\252t\244\274o\352\270\274\2139\035<\303\310/=\235Uv;\3223\360<\353 \035=\232\203\005=\022\3426\274\247[(\275\\E1\274\t\215\276\274\201\031 ;N\353\273\274\344E\\\275\022\361=\274\326\267\227\275\267\243\001\272\026\233\353;\257\013\n=\3652\333<\2438G\275-\234^<\177\031\321<\036\370\215\274K\232\035=j\"\177\275 \261\253\275\'G\n\273\266C\274\274*\244\353\274\266x\272\274\330\275\316\2742\240z<\020\264\230<(\241\373\2742L\013\273)\255\235\2741Q\242\2733Y \274G(\374\273*\257\320\273\262\263\245:\210\2703=\203\013^\2733\016\377<f\216D=ym\323\274\324aw\275\262$\244\274\262C\215;\256r\301\274\023\033\211<\246em\275K\375\260:\003\226k\275\034S\264<\377_\n=\371\275u:Y\245\315\274\225\023\373\274\210:\357\274\337m\341<\030\303\241:\236}3\275\330z\221\274\tj\324;\361\364\"<u;\342\274\373\232\336\274\342\355\253\275\320\023v\275\366\226U\275k\215\320<\324[\205\274\326\203G<&\353\355\274\375\332\257\274\030\342\361<\211y\250\273\231\020\267;6\026\001\274\n\036\245:\037a\025<\243\230|\275\255\\\001;h\0265\274\\;b\274\001\241\340\273\013\230\254\274\226\350X\274\321Gb=\203m\342\274\365\200\367:\274\000\003\275\313I\252;*\220\304<Tt\266<\367\036\247\274\351\264\245\273\207\273w<\273/\026\275\305`\360\274\301\261\2709r\3100\275\360c\257\274n\314\315;\260\035w<\\v\335\274P\215\202\274j#\001=cw\2679\256<\220\274\246bG\275}\036`\275\334(\243;$\312\212<nF\034\273S\251\026\274]\270#\273\246\264\242\274\032\n\005<Y\310S\274t\245!\273eVr<R0\352\274\350\336\350\273s{\014\275nu\210\274$\310\313<\237\250A\272\217\3202\275\3022\220<\226\365\032<\n\226\017=R\371\301\272\0237&<\201\333/\273\2650k\275v1\020=\014n\232\274\334\365\200\275\354\377\207=\315\337\232\27326%<?u\303\274\232\261\027:.+}<\377\327\037\274X\022\233<+Mw\275|\320\321<O\375\017=\005\213\251\274l&\027\2753n@<\363\237i\275\347\242\032<\337\321\210\274-\023a\275PW7\274sd8\274J\325n<\235\212\002=\303\032\231\274\310\016\373\274\030\372\277;\036\004\333\273\377\3649=\371\275O=\377\271\010=\'{\300\274\200o,=Xcn\274\254\361\264\274r\000=\2743\272\332<\000\337_\274E2(\274\356e\374\272\236\237\007\275s\240\370\273\037\244\302;M-\242=\247S\254;\t\263\302\274\311\321\332;\222\032%<cG[\274\010\334\265\274\330\232\000=\345\271\202\2742\370\202<(\016f\274\221\3427<\026\343\206:\226T\334\273\333R\021\275*\305\022<\216\303\347\271\236\001\020=\\}\010\274\335\311\215\275\r\362\036\274r\2660\2755G\010\275\341\3371\275\240\255\354<\013\275\222\274\226\002\037;\266\0343\271\230\001\017\275\343`9<\306\217\t<\346\346i\2741\323\331\274\302FZ\275\276:\020<\275_\227\274N\005\262\274_/\036<{\r.=\213T\253\274d\345\307\2749\377\253\274\276\323\200\2743t\001\274N}\262\273^\233t\275*!\234\273\264\t\204\274\026\216\270\274\312\310\023<\241z\017\274\325\341\004\275\262\240\n\273\343\027\252;\303\347P\275R3\220\274\374\231h\274\310PF;\271\010\254\273[>\361<\370?\021\275\260\214*\275\245\323\'=FH\307<\021\325\227\274x7\325\274\017\374X\275P\275\334\273{\372\301\273\016$+\275*`\330;~7\'<\212\240\302<\263;&=\302=X<6\001\206\273\024\330\272\274O9\313:\301O\306\274)q\326<\257\024 \275,\300Z\275\361\016\207;=J\003\275\252\357&\274\354\021\352\274\264D\001=\227ME\274\310\354)\275\254\237\225\274u\344w\273\330\226&=d\t\002\273\277f\217\274\376\262\360;Q\001\033<\324\221\023\274,lp\274\035\253\377\273Z\036\016<\372\177\242\273\336\342\033<*x\030\275\273!\361\274\350;\246<<K>=|\223\376<`\307\010\275\360\305\355:\346\336\001\275\264\320\037=\005\004\025\275\246q\221;k\236\226\274\002b\233\273\344t\023=\351\370.<^\350\220\275F\273s\274<\262$\275\376\231\343\274\220D\211;\207<\270\273\263<\244\274\304\301\275\274$5n;J\t?\274\232\013\247\274\031\r\264\274\311\313\252\274\224t3\274\177\365\277\273\370p\351<Vh\335=s\256\020\275\244\370\224<\313\341\024\275\256j\205\274\377\216Y<8\326\265<v\t\212<a\264\177\274\322\206\334\274x\201\232\274\263\200_\275x{\277<\272$\302\274y\254\227<\311\247\365;\211\215\354;\202\2749\274M\003@=\322b:\275\251\200\244\275!s\303<&\324\344;\210\3639=u\375\236=\271\000?<\002\377p<\263\237N;\306\225\223<\303\2363\275\032\010d\275\r+6=\020\330\234\273\265\220\306\274$V\000\275\374x1\275\272(\236\2731\3648\275\3101*\275\224\022x;\246k\201\273\001F\351\274\034\033w\274\336\346<\275\244\357\023\275\277Zk<\3463\005\275\350\371Z\274-\375)=\264Yh\274\211\335\214\274\202\310):\214Y\r=d\354A<4K\216:\352v\214<\255\301\260;\270\324o\275M\304\r=4^{<\322\003\357\274\351\220\325<1a6<Mf\343\274\334#\357\274\200_\311\273\371\363\364<\213I\357<\316\237\024<`@\350\274\212\271k\273\276E\265\2749\n\253\273\204\003\247=A\313\216<\333\376\330;f\"\307\274\276\323\312\274sd\314;\350\002\3258\361\277\201\274o\023\034\275\325\351\027\275\013\036\210\274\203\017\336<\234\266\206:\2452 ;\022\310K\272\014\256\031<\301\022\2719\367\377\207<_\321\'\275C\3242=\332\025\256\273k\"\335\273\234\251*=\0376j<\3505 =\241=8\275n\r\274\274_\002o\275=\333\365\2739&\352<=\362\332\273\230\243\321\274\266;k\274\362\246R\275\306\204\356\274-\340\255\274\247Zh\274\021o\006=+\276&\274\350\017\007=\034\207\370<\262\235\010\274}!\002=\307\320B\2756+\376\274\237\303\025\274d\356\310\274\366\240$=#fD\2740h\351\274\320\017\341;\336\355\303<2\032\244<\353\311|\272\000\323N\273\271\373\275;\317NI<H\334b\274\177\206\203\275\316U\016=\n\302z<m9\333\272\301]\337<\323\350\353:\3028I8\033\220`\2742~U<-T\300\274\0039y=\307\030\027=\342n\201<\275I\254<\037\301\024\275\350\3254\273\024\324\305\274\336\323>=x\330\003\275\247\032\340\274\\2\366\274,\372\307=Q\250\271\274T\333\365\274\225\026\220;\320\023\305<\355\346\\\274\362\207\333\272\322A\023=\3521\r=\026\324\007\272B>\247\272 \037\220\2741\275\226\272\032\203\343\274\037\310\240=\370\221\002\273\362N-\275D\1770\275/I\260\274\034~}<\263B\022\2740?x\274}\016\002=c\262\371\274\\|7=\352\336\247=f\334\372\274m\2305<K\024)\273\370\261\036\275\211\365\036\275=o\312\274\211}(\275f\367&=\275$j<\317\356c;,\016y=o\360\002\274j\262\245\274\314\010\247\275:\334\250\275\231\255\313=&\264\332;\272k\334\274\352\021\264<\347>i\272\236\271\001\275[\266&=\334H\367;\343\300\024=E.\274<\000\226D=\350T\203\275\266B\256\274\370\207F=B\377E=U\346\277\273\t3\n\275\323\006I\274@\345u\274l\r\247;\311\003\226\274\306\031\320\273W\020\254=q\025\267\273\013$\301<\t\244\222<%;^\274\243\236\247\274\336\330\217\274\252c\346;m3\376<\374\236\342<b\353\256<\207\305\177\274\364n\036<\273~B<>\352\002=\252\337\236\273\301[\304\274\321\3556\274d\020\212\273:\n\262\274\023\204R\273\336\323$\275r[\270\274\305\331\202;\372\277\026;\311\332\005\273\301\211\374\273y\316\361\273b\345\013=\226\371\217\275\022mL\275\203qq;a~\303:\261\220\324;\231\223J\274\340\364\363<\237Ln\274\311l\252<\300\2742<\315\020\037\274\331/\n=\026a(=\326\377\026\274\347\277\021<\220\017\324\274t\007h<\t\'\236;7\263\330\273\327}\326\274\324\"\305;*\240\256=\202\250;=\000\006\216=/\261+=\357b\337;\021\027\245\275\323\275\024\275\026\317M\274\341\334 \275\251\332\354\273F>B;\323\3221<Z!\177\274\302j\334\274\261\214T\274\226\010A=\274\243\220\274\2144\201\274\'\326};W\n\005<\360\256F\275\203\347\352\274E\275f\274\017\373\321;=)\310\274\251\275G=}\367\332\274i\333\177\274\205\374\265\274\177E\366\274Pt\353\274\200\2752<7\343\272\274H\306\317\274\354j\334\274n\\M\273+p&\275\025\321{\274Q\340R=OEa\275\026Gq;\332\273\230<\272r,\275\350\355\351<}\310\013\275\322\177\242\274\025\031B\274\267\304\032\275\334^x\274\317\360\010\274\234\214\"=\303\344\226;\323N\266<>\246\263\274\235\271\314\274\353\260m\271\251\222\345;i\257W\2746\244\013<\177\247f\274\326\250\005=\354\371\244\274\\\360d\273A\025R;&\013\215\274\267\360\345\274\027\026\"\272\225\324i<\257&\000<\200\021\240\274j\261\276\273\335 \236\274.\341\262<\212\275\004=E\230\016<\224\257\310\274x\324q<Y\337\206<\347N\201\274\3657\362\271\200%c\274?\2648\275\032\0079<3@V\275\220\235\007<\354\316\355\274n\333\330\274,4\205\274\025\220O\274mD]\274\32654\275x\177\267\273Sa\\=\0142\344\274\264\257\214<\301y\3469\001\361=\274\311r\327\274^r,\275\330H]\273\213B\241\274\350Dk\275PS\277\273&\364\202\275\354\236Q\275*o\215\273|W\214:\370\3122\275\222\016&\275\365A!\275\200^\250\274\303?\025\275\353|-\275p\250}\274\256\205\010\274\037\001\017\275\027.6\275\343\245\232\274\246\374\026=/]\014=ev\367\274\360`\346\274\366OV\274I\035R\275\207\265\241\273\301e\347\274\361%R<^\032\325\273\202\252\020<\350\034\221\274\322\n\241\274\250\242\357\273\223\2423<~\207\352;}\346S==x8\274\227t\035\274\363#\321<\367b\t\275TS\016\275\200\336\207\275\r\217\005=[\343W<\210\326P\272\307\260\363<\305\270\241\275^\331\310\274\222\024\202<N\237\016<\3571\313\274\004\307\234\273$\233\241\274\207\211\242\274\016\324\213<^ol\274\244\013\231<\325\334\265\274\347\2537=\n8:\272\376IF\275\240\215\334\274jk\211\274+\242\236\274\261\313\342<Ro\014<tO\r\274\3115P<\216R\375\272R\305z\274h\363.=o&\215;\025\315\207=\204\263(\275\030?}\274\360\215\026\2750\021\324\272\275[V\275KC\224\274\253\362\356\274-\r8\275\363H\036\275{\033\254\273\251s(<?\232?;\341\353\037\275\306\335\033;\rhF\275;v\375\274\326\001\203\273n1\006<\224\321\031\275o\307\310:\376F\245\275\354k\336<\270\315\364\274\313[\005\273\362\274V=@R\321\273\333Tj\275A\010@\273\370)\310\273Kg$=R\010\236<6\023T\274\236\331\355\274\371(\323\274\024\022\223\274\212\200\244\274\223,\321;E\020\025<\005\364\237:r\357\204:wPW=\252&\013=\371\375\016<<\230>\274u\350\332<\355\024\"<\214\0131=Se6\274A{\312\274}\314t\274\203\317\321<\371\230\243\272\262\364\370\274V\215\016;\007\210\220:\216z\364;\355\351\344;)\201\352:2\302\307\274\251\223\363\274H\307\234<\200\336\332\274\236\235\353;\341\036\n\275\013\241$=\246u\027\275\226\355y\275\002y\355<\007\004a=\246\007G\275\265\222\233\275\332\032;<(\004\315\274\315Qi\271\221G$\2750\365\230\275Z#\351\273\020J \275\372\022\347\274\343Uf<\020B\203\274\n\334\206\274y\234u=\030G\236<R\240\355\274*56\274\035\237)<c\276\270<N\t\004\274\211\333+<\r\022\212\275`\n,\273\2746\213\274L.\035<\005\240\220;\271\366\262<gn.;\325\321\251\274p\024[<\223=\242\271#RA\273 \363F;..\365<\347\302]<C\341\017\275\256\037)<o\003\245\273\313\237+\275\275\007I=\255p&;\261\007\300<\323\323\352;\"\232\236;\232\244(=5\027H\274[\3653=\217&\026\274\001i\270;\262m\031=\3609\215;\241\365G<k\305W<- \230<nSc\274\241\325G\275\3376\272\2749x\025=\305\2100\274\353\260F\275\347\303\274\274\034\215\216\273\371\345\355\273\317\n\023\2740L\324\274\2557\200\274+\331\255\274|\314-<u\254\016\275}\306V\274R\370a\275\225iG\273f\005\001;M/\247<n\203\314<\010&+\274b\000@\274\236\205\030=h%\022<\\4\324\274B\363\300\274\2300\364\272u\225\021<\307m\020\274\353\254\035=PX\301;\tW\224\274\347`\275\274\311j:\275|k\225\272\263\321\316\274\226v/=ZX\357<\376\3129\272\033\326\023=\013S^;\242\245\022\275\277\360\277\274\222\305\217<J\215\251;gg\302\274\333\231\t=\243\330m\275 4\210\274\276\353\010=;D\207=\350\211\360\273_\260\364;J\227\256=\374V\026\274\245\316z\274::\352\274\204\371\214\274}\354A=\266\335\223\273\230J\214<\374EI\2755\220\213\274\300U\023=\267C`<\006\013\375\274>\352\220<Z\320:\275K\026 \274(v\010;z`\240\270\326q\234\275>\371\204\275\357\027\306\274\312X\301\273\252!\301<U\320\031=\2751\235<\001\'\257<\022\307<\273,\251\n=O\260\r\274\341\006_<\021v4\2749\260\250<\236\343\250\275\324o\332\274\356\371\245;h\304\032=\352\370t<|\221{\275Q?\260\273\314\204j<\264\201\025=\327\273\336<\355\222\274\274\360\347<\275\333\014-=\2065\271\273+\313\233<\007\t\301;\204\026\010\274\t[m<\\\023\030\274\027s\207\274)#\010\274\276\252\260\273\346\320]<\271\317W\274\360\337?=\355\002\307;>\362\261\274\007\036\264\2728\177b\274Ho\312\274\225\277\001<9\375/<5\223\227=k\212\273\275\247\345\212\272\267\276\351<\306\311`=\235\215\335<\235h\301\2747@\362<W_\224<m\022:\274\220\260\232\274R\366\023\274\263\034\251;\312\\n\274\3230u\275\255$w\275\016\\\271\274@\326\347;\367\251\203\274/\022\242\274v\017V\274\rM\205\275\266\302z<i\005\274\274]Oo\273\216\372\221\274\21191\2752@l<\003\362\234<\005\233\024=5\336\004\274\027\305&\273\217_\354\274OEN\275?\254\030\273{[\252<9\335\037=\341M\317\274;\024\027=\022pf\273P/8=0\032\t\274>\307^\275P~1\275\351#v\273\241h\010=^\026\022<\220of\274[b\001\273\031\341\327\274\252 \245:\023\367\241\274\'\316D\274\367\372\266\274\224\263\023:\357\353\261\274\004\314T=\031\365\235\274\212\267\236\274\231k\t\275\272:@;\210~)\275\020`\224\2743\206\010\273\306y\355\273\274\007:\275\350\212\002\275<\006\025\274gA\006=\006]\330:\234!\255<\307\3143<\261\010\274\274\302\\.<E\317\304\274uB\027\275\325\265\036\275\003\363\201\274]\036\354;\375L\237\275\361i\017<\361e.=\334\236\205;\254\220\370\271\322;I=\221}g\275J\230n\275\214k+\275\'\002\t\275\242\376\312\274\2267\001\275\202\247\024\273\234\370\353<^\035\221\275if~\272\215U\200\274\211X\376\273\251\301\357\273\210&\036<E\270\361\274\005M\203\274\344\033)<\330\177\227\274|9\357\271E\020W\275S\003\032\275\027k\350\273\221\370\267\274+\000`\275\016k\316<\001\256\301<S\307\':\262\354R\274\355\023h\275\036{\332<\021\026\216\274>bv\273\037S\242\274\236\330\177\274;\223;\275\226\271\213\273\031\350 \274\226\212\337<\260\224E9\356U;<m\267\210<\004\265O\273\335P\001\275\006\021\237\275\234\035\':\314L\323;9\2244\275F\r\300\273bD\031\275`\2503<+\303\001\275\357\375|\274\001\204.\273\234\200\244\274\275!\313<Ww\330\274\212\362\020\275\274\240Y\275\227\301\177\275\261\257\220\273\375#\252\273\2719\302<|K\273\274\367\225{< H\002>0\234\t<h\204\203<\020`Q<D\273\t\274U7\203<8DO\273\213\010G\274{B\271<\246K\270\274dxn<\362\346\257;t\375\245\274\266\037\233;,J\001\275w\270\033\275\256\225\312:\354\033\312\271\203\251\363<kP\210\275U/\230<c]P:\373\342\033\275d\226\313<h9\304<\200\323\370\274\270\315\021:\270xF\275\204\357\246\273\230D\274\273Y2(\274\327\217\216\274\306\266\214<\1771#=\300\315\363\274#\352\214;<\230\305<)\025\032\275y`[;i\312\275<\251\201<\275\277\2574<Z\005\332:\341\3142\274U3\343\274Rq\247\274\000\202\375\272\213U\355\2730\203\216\274\024QI\274+q9<\243\035c<\335\271\212\274\032\335y<\210f\360\272\357\357\215;m@ <\255[&\275\300\211l\275c\235w;$\215\216;\003\275\324:\331e\350;\354I!\275\370\025\364<\032\360\017\273\245\265Z\275\324\265\313;\006q\235;?\205><{\244_\274\201\367\220\274x\\R<\320\351\013\274\020\302\223\274W\'\002<\370F\021\274\274\316\246\272K%\243\274\356\025\"\274b\3263\273mE\207<\001x\277\273\257\'\017\273\254\003)\274\207\221\r\274e\276h<,\024\006\274\277\211\306\274\025\302\033=\267\256\251;G\335\010:;\240\n\275%\320\315;~\245\254\273\332!\016=`3;\274\356\tq\273\232H\341<\030\241\304<\036A}<2\266\022\275p\r\264\273\312\001\354<\264\224\275\274p~\205<\321\316\353:\322\032\332\274\017\274\217\273\350\255\342<`2\314;\265j\257\273\004`\242<\360\361\027=l\265W<I\271\217\274\333\276\235;\343\371\377\273$\020\007\275\253\345\247:\302\265\247<\305R\220\274\216\356\222<\230\030\252\274\365\257\363\273AN\262;\\\242\341<\317\032\023\275\206I\324\274\245\007\305<bj\254\274g\343\350<\264\234m=)\032?\275\323 \020\275\340\224\005\275~\354\003=\322\311\203;\355ch\274\314k\263\271\322-\"\2756=\017\274#\030\337<\252\361\341;{\345\211<\037d6=9\211Y\274\346Y\345\274c|\234\274Q\247\264\273\260\251\017\275l;\344;\207c\035\274i@R\275\245\245\\\272\335\r`<\220a\024=x\214\203;\254\275\267\274^\030+\275\205\305\351<\005QV\274\221\337\264<\023aw\274I\235G\274|\364h<\007\363\273;7\240A\271z}\226<^BH=\032\021\010\275\237\277a\275\300N\233<\035\302Q\275)-\244\274\334\332.<~\234\304;\016\367L\274u\252\201<\352\2661\274\3614\312;)\022\272;l\330\255\274~\247\236<{N\276<\311y\211\274Ww\350<\200\347\022\274\353\236\233:\226U\274;\203\333\221<N\365S\275\205\016\232\275\342\252\036\274`\022*\275p5\\\274\215}#;\340\322C<\203\001z\274\371\257\247\274\026\035\345<\357A\023=\253\366\226<\265\300\232<\256>\243;\221\\\376;\241\306\026\275\006K\236<U]\022=\371{\331\274\026)\324<F6\314\275\254\350\337\274\277`\026=\0375\"=\243\014\245\274\340\326\006=\322\366K\275\017\265y\274\014O\346\274\315w+\2747k\007<\301i\215<\262%\251<\267\263\337<{\225\'=\351\335\256\274\354S\005\275:\335\227\274r;N\274\nU\272<\365\2718\275\274\023\035\274L8<<\357\300\r\275m\322\240\274)\366$=\271\277\276\274\016\346\233<\246a\025\275w\0105\275\022\342\013\275\361,\320\273]O\002=\374z.\275\363\006\350\274\266\261\013=\013b9\274\035\253\345\274\032\000O\275\013\0078<\360\220\334\274\200&\234\274\354\374w:\2620\225\275\212\333\024<,\256\230=\227D\332\274\230\311\201\274\344\030\273;\234x\301\2741\001\255\273\215\247\020=\341\237\273\272\273-/\275[\372\'\274\216\204\234\274y\273V\274x\272%=\003@\230<\202F\307\2751\312F\275\261~\002\274-3`\275w\215\211;\257%\317\274\265\256\023\274\246\320\346\274\355r\325\274\255\242\212<X\234\216;7\252\321<\312\346X\274^\032K<Z0\014\275\'f\242\275\367\360\035=Y\351\340\274\373\236\016<od\276\273\207\311\357;\244\371\013=\246\252\001=\013\335\265\274\250\031\014=\024b]\274\020N\021;\311y\233;\037\316\253\274dZ\220\274\337$b=e\300\214<ENn\274@ \276<JfX=\032\250\343<A\2733=\035,;\275*p\320;K \301\274LJM\274VPB\2756\243+\274\330\266\277<)m\007=Jo \275\320\257\031\275\2637\257<\330\330\334<\377\n\345\2741\376v\275\214\004\325\274\003a\020\275Q.\017\274\202\264\030\272\375\326\255<\177]\250\275\n\334\236\274\340\251\032\275\343)f\274\'\001\372\273y\341\211:%@\355;~\303\010;\372\330\225<\000\274<;\\\215\303=v\232\203=\016\211K;\010\346e\274\\\240A\275\334-\266\273V#\243;<\332\254<\353MT\274\211\365\211\2735\250\r\275\177\324\037<\366\207Q<\301R\245;\372\3702\2750\030\311\271e\366j\274`\346A=\323\\;\275\201\343\233<Ff\000=h\356\331\273\2007S<\376\353\275\274\345/\031\275\030\036#<v\362%=\351*\274\274mv\377\273\210\002\374<~t\251:.M\324:r\236C<\257-\204\274\371\204m=\005\376\352<\242ZA\272YY\371\274cH\276\274mH\333\274\366\r\024\275\372 \225;\177%\204<\322vM\275\2046\215=$\030s<\247@\031\275\317\251\177\275p\305\314\274 \244#\275}\341\201<\236[\307\274\222\311>\274\275I9\274!\005M=\030\r\030=<\217\267;\221c*=\250\353\227\274f\263[=\204/\n=V\005\362\273\r\254\365\274\207\245\033\275$\337\231\274-a\331<(\226U\274~\\\023\2751\266\277\274\316\244\233\274\230zJ\275\334\333\033=\354\340\\\274\255R\366<\255\223:\274{\274\352\273\223\267\335\274\211\300\314\274\202\371K;n\337\267;\0346\263\274z\334\243<\257\241\332\273\267\332\321\274\300\216\2509:4\235\275\"\013f<\000\247\206<E\324\310\273\372\277\211\274a\265r;&\277I\274\262\2179\273\247\262\275<\317\313\025=\261n\"\273\274\027\221\273I\010\376\274Y\275\260<\310RD\275\n|W\274\014V\325\273\322\024\370<\333(B\274\276!\007<\002\311\237<\224\325\261;\250\374\177<\302P\034\273\375\266v\274rBm<\233S\374\274?`\325<\037]\024\273b\207\253<?\276\027\273>\234$=l`4\275l\326\332<\006g\255\274\273\253m<X\221a;\271\246Q\274e\204};\t\223\241<S@\231:z\346\333<\2274e\273T2\014<y\010\204\274\254W\305\272&3\337\274\035\340\276\274\255n\370\274-\"|<\375\240\240<\266\347\261\274Q\214\271\273]>\213\275D\243%<=4U\274Q\370:\275\215+\205\274\357\337\007\274L\266\037<m\305\300\274\207\3648\275wp\226\274\232\365o<\344\364\323\274\303\024\227<\227\334\2129z\034#\275\361\270\322<\372\327\213<\260\246\266\272\302\314\030<\313\317V\273\033pp;a\324D\274\333\217\246\274Gg\021\275\022\203\305:\371%W;\023\337A;m\250\217\275\330S\222;\034Q\346\274\234\2043\275K\223\246\274\2738\":\313\245\017<\033\254\214\274\300.a=\256!]<0\313\231\274\373O6=\247-\373<s\301f\275\000\"\201\272G\222\210\274g\270><\244\245\261<\226\221h\274`LF\275\273h\013\275>\037\322;\3736\020=\250\002\337\273B\362z\274^\341\237\2757\023X<Zd\370:\\f\246;\357\t\003\274\035 \016\275F\300\367\274\302\324\205\273\253\213\213;\244S\214\274i\251T\274\031vb\274\"c\364\273\023>\341\274\202\322\n=$Ls\272\322\003\364\272d\345h\274\243\016\342<\337\263\333\275U\215\374\273e\321\225<\347\217t\275m;\260\274}\226a;\344\247\234\274C\322n\274\246\250\353<\260\306\231:\323\177\001\275m\203\204<\370J\003;Q#)=\217\316C\275y\313\226\274\260\205m\274I=X\274\235\362\361\273o\007m\274\2213\352\273:j3=C\370\266\274\3719<\274\371\266\263\274\330\366:\275\376U\005;\033OB=\tWD<\203\3351\274\004\3140\273\037\213-\275+\303\035<\225\nt\275\353$Y\275\207\315\n\274a\232&<\230\333f\275\301\0321<\021\314\000\273\203\233F<\335\303i\274\233F7<\376r\002\274\307B\273\274G\204\237\274\223U@\275\364\276\216<;\340i\274\2610\224<\206\260\351<\347\200\371</\304\366<0p\216;Q_?<\365\270\020\2757\275\303\273d.F\274\376\270\367;\201\351=<\377X\353<\257\260\232<\270C\307\274\326\235I\275\355M&\275M`\203\274)\032\265<\202\321\347\272\373d\200\274R\002\207<\273P\227\274\367\373\205\274\214\005]\274i\262\036\275r\003\010\275\212\324<\274\343\202\242\274Y(\222\274(\355\035\275Q\367C\274d\016\337\274\212\334\005;\2105\003\275\201(@\274\301\350\371\273k\004U\274\0247\001\274R\n\216;j\207/\275<\361e\274./\360\274\376]-\273\"Yv\274Av;\272cy~;C\270\201\274r\220\336\274\327h\366\274\336w\201;\215\361\210<x\021\003\275\217$\236\274\250\025\023\274\302R\321\274qLX\274\206e\"=\033U\276\272:y\236\273\271S\265:\232C \274\341Q\313;\2172}:\345\2767=\204\256\370\272\203\237o=\364[x<\002\223\256\274\205}\316\274\300\037\303;+\213e\275\225B\346=\037\036\"\274\342!P=\331\235v\274A\265\224<\276\215\356\274GP~\275w\371\302<\255\024R\275D\335\204<q\233\023=\037r\022\274a\240\021\275%z@\275rM\372<\266F\232<!\241\362\274\363\3166\274b\353\376<6\347\373\274\231.%\274cH\240\274x\017\240<\360?K=\271\262\007\271RL\202=\253B\007\274\2650\022<\305\301\360\274\3136\377\273 \370T\274A\214\272;\"uX=cy\033\275.\200\315\274\022\225o\275\271\023-\275\313=\005=\202\323b<\013\361q\275/4V<I\365\204\272\344oK\273\313\263\222\275\373\035J;\237 \256\271\347F,\275\016\270\243\275x\247\216\274Yg\311\273\013\324L\274\260\352\n\275J\357d<9\332\000;\023\336?\274\301\336\250\273\246\022\313<\265\226*\273\013jT\27533/\275c\277#\275\0036\273<J\347\346;T\266\362\273\375i\244\274\204?h<P\\\205<\310\221\312\274&2a\275\'(!;#4v<#\"*<:\325[\275\375\333\220\274*:\010<\326\334*\275R(o<*W\340\274.r\342\273\340\255=\273\347\273\357;\305X\373\274A\346*\275\257i\360\273\337\352\002\275\303\247\020\272\320|p\274-PK\274\337-$\271\2749\220\273@\232\201\275\345\365\251;\000^f\275_T\000\274\352\032p\275\245c\375\273\177\211\321<\252^n\275\310\266\226<\024K\'\274\\\315\257<^b\326;\227\200=;l\016Y;u\'\020\274.\226\004<\367\371*\274sCT\273.{\024\275,=\326\273%\213\357<=w\200\2741<\267\273~v\203<\336\375\001=}\2678<\315\316\232\273;\r&\275\345\nY\275>\034P\275\261{\355<\223\224\274\274\253\202l\274\260n\t;\323\025\336;{9\357<\014\205H\274\257m\003\274\356\226\270\274a(\021\275\251\035\374<s;\276\274\233^\305\274\214W\230<\324\n\224\275E\233\343\274\361\230%=\217\205\225<uF\351<\342\230A\274\327>\363\274\357\3169<\t\203\016\275\316\030R\275\376\221\016<\r\027w\273\212Zj\273HqM\273[I^\274\313\030\363\273\022m%<j\331\235<\303\307\233\273\002\316\204=-N`\274\316\273%<\023ZQ\274\030\326\300\273\253<%\274$u/\274\347\340&\275D\177\n=\n~~<\367\376\037\2740U\215;\316!F<\363\301I\275\014\037V\274/\355%\273\251\222\004\275\217v\037<\321\256\221;w\335\021\275\207\222\263\2737\250\265\273,\0376\275\371\2619\274\202/\001=I&I\275Wk|\274\362\362\\\274\007v\266:\271\265\"<\227\270\270<\250yd\275;\361\013=7@\023=LI\302\274U\0067<\3264s<6h\222<\017\3341=\365\016\205=\333\312\247\273ra\313\273\220\240}<\234\340C\275\036\320\201<j\317\027\274\355\377^\273\003@\004=\000\271U\275\317kB=A\303\013=\r.\010\275l#t\274\326\033\245;\267N\004\275\310\361\265\274\370\226\217<\342c\330<\3008}<NP\247<$\010\322\274&F\200=\021. \275\245\270\265\273\315T*=:\232\237\2746\324\221:\314!\002\274.{V<\036\342\366\274l\366H\272\224\017I=7\244\342\273p\330\373\274\213\343\225\274=r\206=i(h\275\326\306\320<\177\365\235\271\255=\003\275T\264==\254\273q\274(kh\274\375\003#\273\235w\036<\003\300#=\2201\236\273lh\031\275\340\002\'\274J\326\3179\263*#=ROF:\310,\026=]\336,=\350\363\001=|0f<x\335\231\274\303\354$<\304$+=\2376g<|\255\317\273\026\2536\275e\307I\275\340\220+\274\357U\224;t\310\355<KU\200;\251\021\336\274X\022\030\275\301\026\200;\245R\257\273h\3008\275P|-=\324\277\332:\345\357\206\275S\203\236<.\261\325\274\242\251\023<b\221\000\275\201\372O\275\324\3407\275\333\250\224\273(\266\315;\013\007\302\274\327\371\016\275\024\351\326\274\000\277\\<\331\224\214\2748\347j\274\211\312\225\274\001\212\337<\231\323\370\274\332J\377<\247\365\360;\260\323\031\275\330\240\370\273\265\333\312<\374\254\005;j\240\024\275\303O\303<\2135j<\017\274\360<\221\224\363\274\005mX<\350\026\233\274\276\344\315\270h\004\243\273\316\307\232\275\343Z\251\274t\023a<\260\017\031:\247\034N\274\233o\366\274\253\313\225\2742\366\247\274\377W\320\274\350\314-\275\260\355\252\275\nov\275\372O\211\274=l\017=`\212\255\273qA\205\275Cp)\274)\322\030\274Zg\355;o\303\243\274\355\025o\274\224\033)<2Oz\274&\340\030;r\331\323\274\022\207\221\274\024\372\027\2746e\350\2738AZ=\027M\341\274tC_\274\336Bx=\313\177\236<\354,\306:<\"D\274du\215\274\035X\n\275Y\242\255;\353\0204;\035\272p<\230k\003\274\355S\033\274U2$\274\'\270\221\273\265\000O\274d|\000<uJ\001=m\234W9g\002\270\273\202\256\\;6u\266;\"\310G<3l\034\275!X\345\274\234\240\221\275n\300\327\272\000\032H=\354\3713\274~\022\031\275\3049.=\261F\010\275jM\351<\272\213\246\2743\023\344\274A/&\275\257l\205<\3521\362<\332\230#\273wO\304\274\205\033\201<hC\350\273i\233Q:\311\000]<bZ\262;\334Al\273\362\344\274\273+\202^<\305{c\273\335\346\000<\026\212W<\210f\017\274V6\007\275\243}4=\t\212\007\275\261n\327\274|>\366\272\220\266.\274\251\323\232<\345.\253\274\013\207\t<\356\264g\275yc\217\2749`\205=\020D\236:\014o\267\272+8\\<\235\213\276\274\201\334\277\274\352\377`\274\336\026\201\275\327k\270\274\003\275b\275\211D\214<\366S&<1-\255\272\013\271\007\274\340\2342<\311\352\035\273\375\271\310<\344\246\242< \277F\275R&\025\275\352\301\200=\016n\331\2746b\274\273\021y\256\274tE\300\272\333u\373\274\337K\244\273\371:\t\274\000\343P\275\023L\305\274\211\024>;\r\270%\275\263\236\323<e^\273\274\037\234\032<^.W\274I\tE\274h?\247\275X\022m</\246\222\274\221+\"=5|\225\273\031\253\232\274B\336O<\256\334\212\274\374\220\327\274B\021o<\374\303\250\274\345/m\273\024wJ;u\2669\275\273ow\275wS\\\275\347\003\276\274\223\337*\274{*\353<.\366\224\275\350|\261\274\271\340\337\274\315*\342\274\326\276\016\275H\265\020\274\330i\027\275\324\304\213\274\025\260V\275\275=\272<s\341\362\273\005\0075\274\"\363\241\272^\300\265<\301h\r\275\000\251\246\275ul\032<1]\377;\203?F<@H\001\273\267\252\034\2753\243B<3\270\213\274\023\017\323<\212I\225<@|\006\275\326z\016\275\017\"\246\274\347\004\2229\005\323\336\274\275F\023\275\361\376\035<*\203\006\275AY\000\274r\002\370;\360\306_\275<S\224\275\352\023\274<uI\251\274\254W\275\275\233\000\267<\332\232\362\274C\221\211\274\342\307\223;j\367\254\275\304\217\230\274\r\305\321\274\313T\214=\274&d=o\370_<\021\317[<Q\332\222\274\0179\036\273\305\266\214\275\212k\200<fV\216<\216p\252\275\317\005\013<\337w\373\274\323<\332\274\354\312\350\274\376\260(=\350\262\325\274\363\323\342\274\203\2603\275\343\002Z\274.\344\365;\031\227\277;\212d\237<\324\266s<r7\036\274\307s\003\274/T\233\274y\335l\2734\371\001;(\205+\274\317\031\372\273\264\357\265\274\221Q\372\274\212\355M;\277y\325<\207\226\243;\000\361\207\274*\262\025\274v}1\275\254\007\222\273pl\027\275\302\336\231\274\005\215\250\272|\227\034:\3354\334<S\310\230\274\n2`\275H\301\311\273\257\0004\275\371\306\207\274g\224\035\275\177\255\224\273\352\220m\273\373\367\201\274\365\306\221<:_\221\273\364\267\275\274!\370\267;R\335\377\274x\224\355\2706\3631\273\256\020\341<\237e}=p\220\335\273\017\225\350;\t\007B\275\324\036f\275\210\240\242;\353+\200\273\262\250<;\232\002s\274;\007i\274\247:,\275)/{\275&\347*\274\256\351\343\274\330)\003\274r\017\340\274!\335\264<\366\227\307\274]\311&\2752\231)\275c\372\004\275Gt\034\274\016\236\260\273}\337\002=E^\214\274@\235\201\273\022\202\355\273\362\254\250\272\332ji=\332\014\211\2743\342\210\275\347\247\n=\250\0145;\035\363\367\273S\204\203\274\3242y\275\362Q+\273\003;\337\273\260U\256\273\"G\321\273Kb\331\274\233\352V\274\323\224\315\273\306\370\027\275#\313\"\274&|\004\275:X\221\274\340\375\233\274h\347\\<\275\277\325\273\251\251\221\274\340\316\016<I\370\366<\242v\250\272q5\025=.\243\203<y\376\002\274I\017f\274\310\212;<\372\346#<\030(\301\274\024\273\003=\227\264\263;\204\244\224<\260\212\252\274k\252y:\213E\031=\362\222z<\247T&\274\266k\233\274\255\337\231;\260\336\2679\255*\342\273\355\007}\274M\017(\274\033\342U;\373\375\256\274\'\350\242\274\004l\257\275\236\3420\274\250\364\257<\234\036\304\274\356\004\257\274\247y\007:Qmb=\376V\243<\344\301\210<\360:\001\274\225\201#=G\323\324:\n=\\\274\003\2532\275\200\216\0375\315\305\037\274\315\315\255;\264kA<@W\017=\024=R=sh\025\275\256\275V\275)o\021\275@\215\226;\374\273J<\315\345&<\201\346c\275\\z\311\273/\353|\274f\353\313\274\034\022\024\274\207V5<z\333V;\252\210%\274\240\312\230<\235\r\213\274\207\206\351\273\304?\273<,\275\354<\021\320H\274f+\355\272\352\314\276\274\034\241P\275\273\210v\274h\376\030\275\251\221\000=\241\354\265\274\305\2071\273>\007$:\020\341\004\272\212\334j\273]\324[\272)T-<H\342H\275\243\2410<\214\301;\274_\315\221\273a\270\323\274\303\325+<f]q\273\203]\205;\00730\274\3361\322<\315\360\027=Ih\370<\005\2237<.Y\230<\001\342\274\272-\036\354<\237\271\361<\365\347\016=jF\310<\364r\017\274\267Z\337\274\230\344\001=\002\326\340\274\372+\226<2\301Y<\330\307\236<Q\3522\2751F*=5\305\357<M\322\027=y\213\325\274NN=\275\357r\244\274|h\243;\307l\212\274\254\322\230=n\004\254\274:\206\323<\270\327\325;\277\207\000<i\353\323\274\264i\307;\'\000\267<\301\216d\273H\254{\274\203\335\344\273\235\343\030=\3413\206<\224\032\211<\274\222\317<\004\226\246\275&_\366\274\"\233\005\275\250\272`\274y\217\r\275,\377\307<\355=\305\275\tr\204<;\220\222:(\201\255\273o\227\225\275\3537\210\275\313z\330<\261\243y:\235\\Y\273{\252(<<\272\247\274\202\374\212\274&%%\273\225\345\004<\213\311O\275\025Gm=02\376<,\246z\274\257\010V\274\266\013h\272\267H5<\373m\235<O\227\030\275\267N\261\272\300]D\273\205\353*<\223+_\274\332\377\351\273\333\351\370<\000\251\t=<t\375\272\313Y\276;\264\344\314\274|\2256<\003\311H\274%i\366<\035\026\350<\301\221\205=Tc\177=\013\003z\274Sx\346\274\375@\341;>r\031\275\013)\031\275\206\024\006\275CpK<\257\341\274\2748W\343\274\235/\020;7\365S\274\007\277\205\274\373\360\241;U\255\370\274\3521\260\274\327\261j<\361u\277;\321\276C<I\006g\274\354!V\275\032w\216\274&\2325<\304\247,\274u>\254<&]Y\274~\373\223\274*\036y;\277\210n\273\342;\302\274\247\357,;s\005 \274\027\264\"\275K\255u<\023U1\275\353\366\307;c\344\023<\034x\226\274:\300\016\273\250\316\270:\340\362F\273t@y\273\305Zk=\204wy<\233\312\334\273\264\230\033\275f\266J=\026O\r\274\014\263\021\275\000\000a\262\212K\300;\035D\335\274b\277l\274#oF<\262\343\r\275,/\002=HGF\274\235)S\274\26620;R\263\021\275\224\337\302\274W\025\345\274(\326\350\274\302\264*\275s\312\310\273ax\203<\310\276\256;\204jK=\301\311\227;\343\306\200\274\202M\031\274`T?<\356\020\370\274\321\261\221\273^j\264\274*\016<\274>\211|\273\3606\006\275\335E\332<\342J\247\274I\274\357:IUS\271oz\000\273m\031\274<\256ny\274\365\273\302\274\020\203\300\274\214G\212\275Jx\"\274,\202\215\274\266#\323<K\245\006<\354Pq=\234\230\246\274\350\315\r\274\243\231\351\274\205j\350\274\3148\260\274\210\334r\275u\342\332;s\005\373<\227\301 <\337r\000\275l\325\020<\255\330\035=\322\334\002\275S.K<\316Q\332<\001e\325\273\262\260\241\274\217\373\035\275\013\002\322\274\341W\013<\263p\215<\307>\225\274W\2401\275\371\202\377<\037\345\014=\376M$\273a?\251\274=\345\t\273rv{\275u\371\363<\3257~\2752\013\346:\205\275Y\272\261?\275\274~\220\353\274\362\025\233\274\223\204\003\274\266\3507\275\312\\\037\275\211v/<\345\210\202\274\305\353y<\326\311]<bPO<?B\267\274\357\202\342\273\223!\253\273I\n*\274\361m\031\274=12=\243\323\004\2750\252 \275\004\370\331\274S\014\255<\326\374\035\275H\362\357\274:\262\324\274z\365\010<\'b\266=\231G\002\274A\335\305:\363\355\031=\324J\255\274\335Y6\275,/\'\274\r\331\003;\372\206\263\274\337\200\016<\nT\307\274~_8\274\321dK\275\367\365\265\273>\332p:q\254\265<I%\353\273\320\362P;\\L\034\275\330\267_\275\341\354(<D\325\271\273\277O\210=\0054\332\274Ap\244;\000\232x\274\353;\340;V\240J\275\rGt\275F\321\024\275L\235\224\274>[\361;\177\372(9\335\262\344<\352\271\261\2756{\000\2752\222\362\274\307\021\353\273\341v\366\274\301C\311\274\327\"\215\275+;\310\274\215k\251<\341\337\233<-q\237\274\237\256\326\274f\321w<\032k\n<\264\020\017\275&\252\201\2755U$=\355\264\311\274\2543D\274\036\264L=\273\220>\275\235\360\375<\246\354\272;\024\370\347\273\377\265c;M\r\305\273\r\331\362\274e%b\275\373\005\216\273\1774\255\273\245t\017=vle\275p\341\261\271TwH==n\002\274]\3004\275\027\343\217;\325C <B\356\037\275\177qv<\367\344L\273\255\270\377\274\335t\370\274\246\345\234;*\006\356\274L\372\200\275\320[.<\332W\001\276\325\302\304\274\367\2271\275N\273y\273\341Ac\274I\254\252\274\335\365\347<\241\257\026\275\375Y\253\273\246[\000\275\200\365,=\020\341\367<\312\003Q\273=\323k\275x\246\304\274\353{\020\275]5\036<\256\366O\274\242U\n\275\034;x\275\261\271t;g\277T=\223\025;<\336M\354\274{f\307\273M\232\337<\210W\245<\227\234\215<;\326\347\274\355\354I\274\203\205\273<\270\303\264<\004K\215\274\355\333\241<x\330>\275\353\356\022<o\t\017\275\025\321\t\274\313\016\264\273U\214\333\274Bp\334\274CF\235\275\263\007J\274uD\022\275\271I\022=ES,\275S\373J\275\024&\200<\270\013V=\001\307\034\275\202\270\261\274)\217\245\273\276\355V\275\201c\236\273\332\221\244\274\303j\021\274\247\177F\274\353\206%\275\305_\003=I\352|<\270\313$<,\006q\274\343\361\303<\367\260*<\211\327\000\275p4\356\274\264\306\332\274(\267\207:&\223\377;\300\236\220\274\n\036#\275=z\254\273\'(\352\273oU\271\274m\322\217\274\327\303\333;\273_\257;L\255\223<!7\365\273B,S\274r6\313;\242\272\253\274\250\323\220\274y\253\361<\257*\357\274B*\313\270\337\200\373:(\201\204\275\264\2033=;S\214\273\006g\312;\314{m\274P\342\325;\232a\263<yI+<\274\352~<\303\341#=\304\265\244<\222\344?\273n0\262\274\333b\357\273I\351f\275\222[\311<\003\363<\274x\353\030\275\271\366I\275?\031:<\240\2623\275\233\002\206\275\025\207\000\274\2019\222<\021$\316<\352t\036=\321U\277\271\1777\030\275\315\300\010\275|\333\221\274d\364,\275g\246\366\274\224\201U\275\245\220\316\274\212>\261\271\315D}\272/@O<\204\303z\271\276L@\273\303\312u<\344\260B\274\241\325e\275\246\352\201\275\220\306\324\273\001\200\335;0\376\363\274\267\305E=L\3638=\355\331{\274\360\231\320\274N\273\010<\240\205b\275.$!\275,*\033=U\221M\274?\034\201\273\251\327i=\177\252\\\274\243\346\270\274r\375\252<\273\214b\275\340sz\274\270/\204<\305\256\021=C=\320<\030\3768\275C\276\034<\266\360W=\340\225#\275\334\341\203\274Q@j=\342\227\341\274\206g(9,{\261\274LA\220\274,^\200<D /\275\363\025\312;\010\352A\275\343\017\357<R\373\330\273=B\202\272\371\301\311;eH)<N\003\373:\356\010\r\275=v\n\275?Z\352\273k\344\245\275\330\216H\275\022\n\005\275\2721[\275R\006#\275<\262R\274\302\320\331<QR1\2735\031/:\016V\232=\253[p\273\325X\373\273\373n\010=\210\245\"<\024\026\356\275\252\337\210\273\355\013\245\272\227\\;\275\220\222p\273\177\323\234\275\354\202\002\275%\262\234\274\262YF\275m\375\023\274<\006\010\275\331\027B\275~\r3\275\351\2508<B\304\234\273\277\251%\274\325\230\204\274\r\020S<<\317P\273\235\370\344\274m\033\264\274\375\2135\275M\277\177<\010\035\"\275jC^<\271]+\274\373\202&:\330\366\036\275\251Y\312\274\031\031\033\275D\210\335\274\270\247\276\274t\317h=\005\222R\2759,B=\212\321B\274\177,\000=Z\004\214\274\373H^\275L\232(\275\266\336\302;\222\3154<QEZ\275\206|N\274W\337_\274\332%\203\274\343s\275\275\327\027\231\275q\377\"\275\245P\347\271}-\206<g\\x\275\234\350\241\273\355C\213\275\037\246\341\273\203\3357<\306\260\3309\305U\246\274\022n3\275\255\257\276\274\327/@<\024M\004=\010\275!\275\270\025\212\273\353\313u\275\210\320\222\274\240\240\013\274\216\3255\275\245\360\003\274a\231\324\274\3026}<:\0231;\324\326\223<\3777\033<N!x\275?\217\3709]\013;\275^\001\005<\262\304G<\273\345\024\275\016\315<\275\272\344<\275\003\324\t\275ip\'\274\331\367`\274\375C\377\274\315,\310\274\266\343\335\274\324l\017<\245^v\274\370\215)\275G\370j\272\342o\335\273\327N\222\274\226;\310\274\217*\357\273\017\334\245<\025#\206;u\353\315;\203\232\271\273\305\200\007<\034\372\347;v\013a=\357\311K<z\317=\274\272\260\223;\036d\352\274\256\305;\274\022\026\347\274\227\225\261\274\227{\257\2740\234#\275\330\356\204\274\347\356\204<M\204t<B\341\337\274q\014\335<\0148\356\275\323=\342\272\355\375\\\275}\3110\275\3773\240\274P\033c\275\254+7<4\021\333<\371\307G\275\333\343\215\274>\020z\272\326h\034=|u\360\274d\265\037:\251[1<\222\260-\273\245\227\371\274\027\332\007<!R\317;r5\334\275\304\032\314<\037\311\325\274q\201\207\2750oB\2755Ce<\262\275;=\263v+=\266\240\321\272d\345\207\275x\213\004<^0\236\274\346\364J\274\371(\251\274\305\254\226\274\274\217\346\274\265\307\344\274\317\005\017\274kJn\275pL\377\274\202\334t\274b]#\275\224\031p\274t\200\323\274\244\251\210\275\317R9<Xq\264\274\317\237\316\274\274\351\230\274\375)\006\275\241\255\010\274\253s\026\275\013\242\t\275\032\310\341\274\330-\225\274o\354\225<\335R\357\273 m\263\274\366\332c\275Jj\222\275f\233\232\274)x\266\273^*\177\275Q\344X\274]^\205<\252D\224<\220l\334;\270\375\037<V\303!:\216Z\313\274\202\360\367\272\274\332\001\275\257\200\272\274%\250{\274$q\353\274,\312\271<0\203\350<\371;\210\274\377\276\3669%\\/\275V\343\271\274\367&\224;T\020A<U\322\300<I\321\001\275<\350\002=\360}\240;\207\254\010\275X\300\306\274\232C\200<\324f\003<G\332\244\274\223\257\200;lS\305\274\037\213\273\274\016^\323\274\215\301i\275\276w\235\273&\352\233\274\031\302\000\275D\374\003\275U\271\371<\304\372e<H\030\244\274\213zt\272\240\367\022\275U\020\213<e\036\301<`n*\273x\026\006\275\317f\204\274)\355\255\274\335\327:\272\260\366!<\344g);\340dF\274\3751\315\274W\327=\274K\366.\272\311\277h\275H\265\014\273\310\336E:\007\355\312\273\355\014B\275j&\247<K\2752=\271b\032\275\371\352\"\273;\326\013\275C\332p<\035\013O;l;\007\275\203`\276\274\214|;;D\330\325\274\222\254d\275Tl&\274\330S`;g+?<\263\210\352\272\366\213\255\274&/\231\274\355\372{\273\r\216\277\274\272rO<W\\\300\274y&/\274`\231\n\273m\\\024\274QI\341\273\220\020\255:\312\355\316\273\277d\007\274\354 \250\274\336{\233=\034\\N\272\354\303-;\375\205\222\274\315\027\234;Q!.\273\261\234R=L~\312\272\327m\337\274y\207\272<\237\275\001=\335\2165<u%#\275\315\372\266<*\204\371;\346\304a;\355W<\273\312\200\373<\2062!\273\0279\214\274sG\330\274\265\232\373\273\370\355\037\275\326\375e<)\2772\273\202`\332\273\n\305*\275\255\341\373<\225\026><=c\031\275\021y\t\275\220\331\235\274\243(T\275\205\313\327\274\323[/\274\230\206x:\361\221%\275t\345\224<\027@\005<\244\000\233\275\344\022\315\273\306(\350\274\273\210`<,\3637=S\\2<p\321k\275\315\236\371\274\236\2012=\3760\016\274\334\361\273\274\"\351\336\274\271\247\346\274\253\221$\274\345\3345<\356Cx\274\217\304 \274%\213\331<\241|\200\274\270jM\274\335X\000\275q-\332\274\361\261\311\274\257\232\264\273J\035Q\274eX:\275r\311\227\274\257\n\036<\326\275\343<\347\007t<\330Q\261\274\254c\203\274\357dm\274L\013I\274p\231\342<[#\216\274[\337W<FgL=\270{j\274\031\261\211;u,\260\274\376\277j=\264\300\204\274\025\323\027\275_\005\275;*\001]\275s\241\257:G\365\211\274?\200\034;\177\242\242\274\264\032r\274\360\264\037\274\024\036\300\274\207\362\314:\266\314\024\275\353\202\023\274RBi<\205\226\005\275+A\023<\"\320\001\274:Q\353<\006\234R\274t\337,=\302K\245\274\256\332\242\275\257\351\030\274q\353\030\275\222\217\010\275\330\n>\2749\302\330\273\264\202q<\374}J\274\017\367\006=8~\231;\201C\205<\241V7\274\242YK\275lY\347;3q\263;L[i<\265\305\007=\354L\220\275\241\244\003\275~\321V\275\365x\033=I\306\014<\354\2322;\357\032%\275\202\317\026\273Bq)\275sW@=\371\217q\274\256\253\265<|\351h;\246#\004<g\265E\273\253$G<\325\266\333<\260$\306\274\2773@\275l\035\231\273\017;t\275*\302\t<E\262\236\274\016\370\217\274\244\303g\274\353(\263\272\303\020\246;\254|\236<\243U@=\024\300\313<\025\rT\275\334\243N\275_\362\263\274,[,\274 \211\225<.%W;\177;\331\274\312n\036\274\264\267\n\275\314\364\260\274[\312\252\274\003\275M\275\033/K\274z<\322\274^\310\266\275\032q_\275&\321\020\274\325J\335\275 \242\250\274B\317}\275\017`\'<\036\316\272\274\244\322E\2735\214\005\274\031\267\317\274{\341A\275\321\201\302:\235\014\020\273\342\200\232\274\037Q\207<\2524E<\234\003\252\275e\340\270\274w\212\302<\320\326b\275\272\3143\2755\013\366\273\315l~\274\247\234\263\274T\244\373\274g\213\014:\340H+\272\331\334Y=v\353\001\275\006\270T\275\203\3222\275\023\374\242\275\000G\375<\341\2115\275\371\n\311\273\245\035\344:\004\337\255\273\260\310\034<\301\3274<^\331\313\274\355uA=\'y\276\273dd\037=\266\017\006=s9\301\273\363fT\274\2203d=\000i\023\275\236\233\302\274\"\210?<T\"\203<\003\037u=&4\005\275\\oF\275\034:w\273G{\263<\023\217\010<\323\270P\275#\362-\275\312qO=\334\272:=L\250\000\275\374\033a8b\2611<\375/\000=MkJ;\322{5\274p\236\343<l\247g<\304\333\n\274\304U\021\275\257:\000=\"4\343\275:\300\211\274\305\271\030\275*\344\307\274\237\243\034<\211Zt\274\004\026f;=\035\361\273_\254\032=\r\332\204\274o\rL=\331\364\035<\2741\351\273M\316=\274\022{f\275O\027\314\274\221E\204\272RL\261\274\033\214\\\273\316\222F\275\335`\025;\301\302!\274\314\207\"\275y\267_\275\222\251\335\273\177\310\312\274\272Iq\2749\t\352<\315\037\333\274\211\230 <\347)J\274\372\337X\275\2176\301;\"b\256<\005\345#\275\362.\020\274\"j\021=p\336i;\223\364\206\272M\010\270\271\321\205\212;j\366\236\270\005\302:;`\357\177\274\260\264\232\274\304!\\\275\262\267\242;[\314\025\275R\023\035\275N0U\275|\325\376\274-c>\275\3169\222<\3602\"\275\276\346\321<\255\034\304<l5D\275\321\026a\275T\317\243\274(PK\275\203\177\351<\363v\347\274\363\254\245\274r\2448;\177\274\307=\021\006\235<8s\302<\243\253\025=\261U\037\275fG\013=\\\026\037;\354D\303\273\240\027\240<V\351\273\274\3416\177<>\022\232\273\217\254I\274fZ\334\274\241\232,\275\310\325d\274\261p\n\275\2421\360;]\327\340\274W\353\247\274\213A\205<\350\333n\275\301M1<s\334\204\274\243\212\002<\033f\006\275\224\241\230;\205\006\301\274I\032\220\273\236\214d\275\355\262\247<2F\235\275Zk\301\273C\242y<{\320?<\243x\353\274!F\352;\023\246\241\2737\365\254<\303\270\005=\255\320==\277\370\334\273\317Q\212\272\342\014\364\274,\323\271;\206\002 \275\351\221G\273,=\307\274\351\006\024=v>\255\272\373o2\272M\005\032\274\353\r%\275\217v{:\251\271\251\272g\n\335\274\000\271*=\033L4\275\342b\211\274\343\'\342\274*\357\024=a~P:B\311\002=\316o\032\275\303\314\314<u\t\030\275\036-\032;6e\300\273\332:!<\253\310\023=I\305\231<\036Y,\273>I\247=\342\271m\274!F2<0n\356\274<\\\303\274\314\354\001=\037\216\026\275\364\340{\275\265Z\016=\232ow<\035\351\001\274b\033\000\275\014\264\250\275\023\006N\274\261\342F;\332 \002\275C\257\210<\013&\376\273\253\327\206\274\004H\275\274\343\304\036\275]+b\275\210g\327\273\2715\207\275\215\306==\342\343\304\274\306\236\274\274\031\244\377:}\317\262<\0041\262\274?\221\301\274\2369\231\274\200\0213\275\2131\246<\033\242\001<7\'\333\274E\372\253\274D\027\267<\024Y\220<\024\210\004\274\225\323\200=tw-\275aG\277\274\332\255@\275\252\223U<\006\010\236<V\252f\273S\255G=\200\235\n:c\326b\275}R\r>b\t\212=\207E\270\274\230\240\016<\243\377\027\275\212\236Z\274-\273&=\014\210>\275\374\037\251\274\352P\205\275\362\330\311<\"\243P=t\014\220\274\306!:\274);e\275\240\353\014\274\022nm<\231\325f\275\221\365\242\274\004\247\003\274A\2150\275\202\200W\274\217\r\227<\233\243D\274\236\375V;@\303y\274D\003I\274\247\332!<\361\301^<\375\302H=\001l\242<\363X0\275b\305w\274\274\243\231\275\177R\017=q\233\237\274\n\372)\275W&\330\274.\207\234\273j\277\'\275G\315\000\274\277\320\314<\260\273\325<\373\321e\275\277?\223\274H\367,\274\264a\355\274\205\344X\275\'\261e=\355\305w\274Rdm\275[\330x8\2250\311\274\337\365\315\274\026\362\021= \205\214\274\177\364<\275A\212\203\274\251\257W\275F5b9\235\277\025=\325h\273\272\376\n\336;>\252M\272),%\2750>]=kE(\275\200\222\022\275\177\327\306\274\370V\"\275\221\244~\275\r\2166\274\0044p=\034m\207\274AA \274\005\203\247<<\3731\275\336}\315\274}?\203\275+\326g\275\030\272m\274\000Q\312\274\341\025H<\0219\310<\254L\314<\310\366\"<#\306\261\274\017\220-\275\377h\r\274\245\257N\274\242\242Y\2755:\273\274\301x\371<\315\242\205<\251\320\2219\204\313\341\274\224\024\024\275\212\206\020\275\036\213\257\274\302\0072<\270\206\206:\224;N\275Jw\227<\360:\246\274\251\017\260\274d^t\275\315\0241\275l9O\274\226f\264\273\363\234O\272)\266^\274Z\346W\274\343\214\344\274\320x\"\275\352\257\223<\241\264|\273J\312\263:%\255\354;\246\221\216\274I\275\177\274XU\001;TiW\275\205\354Y\275\312\300\035\275\200$\017\274\357\000<\275\230\256q\274\264_Z\274K\240\260<\313\363S;\010\322\320\274V\204\332;\023\n@\274W\000\311\274\3013S\275\304\200k<\314J\355\274x\346\036\275&\014\250<\225\204\006\275\314e\273\274Ok\355\271\227l}\275\304t\360<P:\236<zl\244=\220\217z\274\270\014\222<\224\351\266\274%:\314\274\361\365\032\275Xl\267;S<m\275\027Y\226=\024\276\302\274\202w\027\274\027\340/\275\033\305<\27446L\275\327m\205\275\006\037$\274\257(\307;\033\376\322\274`o\027=\261\363\024\275\276\206\204\274M\237S\275Z\313\312<\215\336K=\327\022\241\274\264%\213\274/<\'=H\\\210\274QRB<b\262n\275\227nN\274\r\342\003= \362\267\274T\253\333;\257\361\241\274\343N\325<\213\2370;yPS\274\300\241\016\275P3\230\274aa;<\353!\227\274\2202\002\275c\025O\275O\236\270<\344\024>\275\255\233j<5\243v\275;}\240\273\241\021\243;\354\016\212<}\204v\275\330\346\376\2749\270c\274\2010\357\273]\264\267\275\363\327\032\275\375\217\352\273\314I\007\275?\025\353\274:u\206<3\255\036=\225\004\226:\250EG\275\0232\030\275G\351e\275\221\264c\275A\317\361\274\305\357\001\275\037U\277\272\260GF\274\307\3755\275\037y\264<\301\200\354;\373\370]\275\376^\213\274\\h\205<^wG\273\3615\016=\014Q\214\274.W\236\274\251\\9\275\"\302\025;\n\273\326:uW\235<$\352e\275\210\3019\275]\305\032\273\005_\027\274VJ\202\274\356-\215<A\341C\275\364g\370\274\n\020\026\275_\026\373\274i\346T<Z\317)\273_)\013\275\023\202\214\2755\330.<5+\303\274\264\232\220\274\235[\223\275\204\357s\273\306\265\215<\030\202\274\274\275\036P<qy\033\275\255h\216\274w\026\206\273\353\016\300\274m\013I=2\212=<pOI<+a\035\275v \\<\256\224)\275g.\344\274A\251\025<\216\3074<\243\247j<\301\036\313<m\005\202=\027\240\344\273z\355\310\273\230\330\261<~v7\275\266\016\022\275\332)\004<\227\201\"\275\037\377e=\232\356H<h\370\216\274\035\220\017=\345T\307\274\312\r\315\274\307\022\241\274H\004\021\275\022\010\300;\225v\014\275\323\010\022:\331\244\005:z\023\001\275?q\342\274\007\362E<\242\2043=][\334\274\024\304\240\274\326\023\237\274\364\030I;\301\213N\274\363\3627\274y\277\r\275|\024u\273\313\265n\275#A%\275;h\256;\313\327\305\274\322V\362\274!q?<\330q\275<\017\373\216<\266\315\017\274\373\355\331<f\272\r<W\330\025<\214J\271\273\261\267\245\274)?\375\274\263\300\251\274O)[\274n\230\215;\240\010\321\272\033^\352\273\256k\232\274\0143\215\274N]%<\004/W<\014\356\200;\263$]\2749<2\275W\276\211\274@\024\003\274\337\204 \275G\000\234\274\226S\006=N4h\275\034\271y\274\213\331\305\272\035\372(\275\334\330s<\275\021\213<[5\003\275\322%W=\273\003\327\274\224\365\251\275\232\266Z:\355$W\274\0338\311\274lwO=\307|\217:\300\375\353:\222uq;\367\260\275< \302\242\275Bs\013=\020\275\263\274\226n\317\274\217\016\334<\020\323#\275K\345\364<v\203\030<\017\254\205\275\003\304\325;E\353\337\274M\022[\275 ^\220\274\001\352\000<\033\035\236<\227\217\214<\360q\022<O\003\226\274EX\362<d\030\271\274\201\257\024=\211\342\022\275^\345\036\275\223~\270\273\362\273\336:\'<\006<\022#\331\274\241\265\213\275\350(\214\274\260\322\262\275\233F(\275@\357\204\274\245\\\021<;\360\330\273\233\000\005<\235@s\275OOJ:\024\232\252<F\007\324\274\357\337\330\274\016\007\037\275dP\006\274@\347Z=\302\375\032\273\016\260\245\273\265B*<\352\237\320<.\0014\274 \307\007\274\344j\306\275&\222N<\\\377U\274\0006g<\036\002^\275\020LN\275(M\344=\023\243\t<Q\201\243\274\365\313\371\274i\213\037\275\251cb=\301\030\276\274\237\034\313\273r\233\'\275\321\256U\275\366+\273\274%h\207\273\031u\017\275\253\013\213\275\205\342N\274\372\301\223\272g\275\313\274\021\212\n=\332:X\275\t\366\034=\033\267\224\275@?-\275\177\225\302\274\225\213\037\274\271\306\020=\334\3338\275\221\3770\275\013b-\275:\376\352\274\006\'\307\274Xy+:E(m\275\006\263N\274\356Qd:,W\337\274\255\014\r\275\010\tF;\375\022\200=V\365\245=\333\245\365\273\221W0\275\327\276\026\275\035\335\016=M?\254<\311\031\212=nC\001\275`\0058\275O)5\274\255\303\241<\264\255\034\275?\300$\274\325\364W\274\016\361\014<\363\267\013\275\204\0347\275\352JV\275\200\240\316\274dC\244\274V\220\230=\017\330\031\275\020\310\202\275^\226\220<\036[\245<(\236\033<\304\023%\275\002e%<\350\326\204;\031\323\301\274P\222!\274\216T\257\273\034\253\352<\255a\026\274{\036\231\274:\237\021\274\005\331\243\274\t\262>\275\221\350z\274\013\267c<G0\333\274\037\326\320\274\027\225j;\037\307\216\274&\010\334;\267?t;g\307l\274\023\025\240\274D%\223<S*\361;\3677\300<\026\250\031\275\006~4<\000\322O<\266J\244\274\345\256\353\272\3515\241<\316r\244\271\231b`\273\266o <S\337\002\275\236a\226;\225\301\254\274\005\277\251\2753\252\"\275P\023\211\275\344@\354;\273O\342\273\276$\276\273u\326\032\275\3511\326=\366a\023\275\r\310N=9.\347\272o\253A\275\270\310\017\274u\211\354<\307\207x<\264\322\034\2751]\373\274\302g_\271\036\007\n\275\032\351\332\274#\365);\322u\343\273\372u$\275\377\361f:+\377\326;\2716\237:\317\212\214=\354A|<\334\237*\274\003\203\336\273\213 \230\274\016\212\233\275#|\214\274w\r\226\274\216\256\007\275\347t\301<\235f\273;\"\233\212\272\333\237\024\275od\335\273\371=\370\273\027!\323\273\323\363\236<v\244\265: \325\203<\343\277\006\275K;\315;\360T\220;\234\370\235;\020\316\265\273\203\201\202;4\253\013;\203n\250;\315\"?\273\327h\r<\034\372i;_\252\030\274\243W\343;\317\231\241\274\240\200\222<f\271F<\350\200\215\273\272\301\006\275d@\007;o\334%\275C\265\214\274L\004\014\274\377\250\257\274f\\\025\274{\240\250\274qnC;R\305\237\275v\313\317\273}aY\275\035a\r<\336\334\340\274\001\311\342\274\231D\203\275\350\240\254\2733JZ=\007oJ=\265\364\324\273Y\031\024\275\333\261\211\274\300\003\245\274\"X\223\275\021\004M\273O \'\275\304\361X\275\332\345\275:Ld\206<JP\215\275\336)\214\275\372%\266\271\031\"\370;\210;\267<Dd=\275[\250\306;g\223<\275c\306 \275\360\263\025\275\267\317}\274\333=o\274b\375\0138\\\324<\275\313\355\210=F|b\274\031\250\001<\367dz<[0\242\273\247\025\350\274\371$W\275\2269\361\273-\346M=\311\r\230<v\320\255\272 D\032\2753\360\231\274\202p`\274\000\252\\=\234@\024<\350p@\274Y\347\262\274\024\326\326\274v\244\215:U\350\274;\276N\250\275\271\177\r\274,=\247\275\027\007\275<?\216+\274\211\020p\275/\267\211\275/\373\214<\364\306\267\274\0135\237\275a\277\001;\021\036\005=\342t\363\274\326F}\273Go\245\275xms9\376l\200\275\315+\027=\214_\037=\200\214\253<@\322O<[\375\002\275v\213\351\273ne\207\275\224\304\273<L93=*\td\275\253t\341\274\240@*\275\351`K\273L@\315\274\027\375~=\200he\275\3639\371\273nW\022\275\374\221\026\275Ai\025\275\244H\217\273\266\035\003=?\253.:\250y\263\274\355\325\271\274\264\234\330\274M7Q\274\200F\014\275-Gn\274\rc5\274j\202\365\274\025.\301\274\356I\n\274\370\305\325\273\325\265\303\274\372\364\034\273b\211=\274\207\t\027\275LB\r\275I\374g\275/\361\003\275\326\253\360;\014\270\013\275{\177\224<\301\222?\275\242\274\375\274\353\251/\274\270r\345\274 \314\033\274\324U\267\275\313\035\247<\256\261\037\2739\002v;\231N\344\274\361\357\003<\367\350$\273\022\221\003=\"qa\272{\211\226\274\225F\373\272G\255\252<\000\010\323<\346\335|<\313]Y\274\026g\007\275\332p\303\275\225\317\202\274\335v\013\275u\265-\274\316B\227\274\017-\334<Xv=\275s\275Z\275\004\3525\275\321I\257<\246\215\204;\2412\324\274i\344\024\274\242\'`\275N2A\274B\035+\275@\005:\275\003\253l\274\273\251\030=\035\314\205=\2453_\275>\345\235\274\360b\310<\"b\204<\245\311D;\266\362\306\274\327\017\264\275\330*K;\032\365\325;\327mn<\266\367\233;\221iQ\275Z\371\n\273\310\326\326<\242\031\357\274K\345/\274\317\026\215\274\014\036\211\274\372\251\t<\345\303\365\274\263/f<\023M\000\275\t\242]<\357aG\274\357T\271\274\224Z[\274<\243\251\274O\373<=\271;\303\274\334\255\003\275\nB\024=\260\222#=c\364\305\274\217\341\021\273}z\340\2728fn\273\220#\206\274b;\301<\237>(\274]x\307<\246\336\211\274|\354\021\275\023`\356;\026|\372<f\177\252\274b!\001\275\351~\313<S:\016<\320k\\\274\tf\037<\214&\005\274$\355\265\274\037}\264\274\260\324\375<H\273\235\275\313fC<p\006\031<\001\021k<\005*>=\202d\026\273\213\244C:H\2734=\214\230\"=\336\350\225\274\355&\323\2748\036\005<<a\205\275bV\232\273\210\344\351\274n.\300\274b,\315\274_$\024=T\220z<\030\025\230<\234S\n<\231\346\212\275\007I\034\275\252\2162\274\247>\212\274w\030\023\273\'\222\t\275\366\336O<\177N\336<\373\237\345\274\247\226\033\275go\205\274]\375z\273L-\242\273R#_\273\254\362\244\274\025l\027\275\262\270\032<\212\242\336\274\2322\010\274\257\205}:M\357_\274\237\210\211\275\236\371\244:\003\0078\275\371\225\226\275\352A\206\274\021\355K\274\317\236\213;m\243 \274\214\246\254\272m\312\350\273o/\205<8\202\013\275!:~<\323Z\347\274\301\225\232\274\240\3060\275%\202\324;\344\235\357\274$\303u<\340\254\003\275&\351\212\274!\273\225<|\377\305<J\2777<^\337\276\273C}\254<\215+e\273f{\370<S\225H\275\275\224+=K\305\n=\036\n\360\272\351\302Q:\242\232\376\274\030*\000\275BDS;\313Q\310\274\022\202Z<\327\322\212=<\275\275;\251\302\023<\343\205\214<\272\317\2679AXb\275\372\271\232<\364\276\210\274\371\351\222=\"WF<\263\307\357\273\300\203\262\274\360\021\322;\306\314\005=f\035\211;\000\'\264<R\370(\275\365d\331<*\330\261\273\t\361&=(_\006\274T\256\204\274\313\010\332<3\311m\275A\236~\274)\212V\275\307\252&\275\022\314\252\274\241\247\262<\206\033\271\275Nf\264=$\267`\273\372\302\002\274\3043d\275\334\035V\275u\371,=\253\346\373\273\013*\246\273\310\264\367<}\"\005\275\242\307\251<\341\014M<\\\303\375;\034\337\344\274_vA=\3277\347<\366C\300\273ld\027\275\005G\205\274f\243!=\221\321\302\273\267\267\346\2744r\321<:\224\260<q\310\243<\316K\336;:\265\026<\016\263\275\273)\323\341:\313{@\274\323F\275<\021d\223\274\034Z\330;f\223\244\274\236#\274<\364\216\035=\345\233k=\241\363\234<\371\374\217\274Pl$\275\177\204\305<|\325\310\274\243\226\"\275\034\351\303\274`\342\255<\353\276\243\273L\371>\275\325uH<\221\334\222\274\3319$\274\262\024\370\273\014\270\023\275\364i\237\274\022\272q\274\357a\266\274X\245z\274\255\342\316:\n\224\350\274\302\345\310\274\021\016H\274SK\026\275\357+\231=\340\nP\274\030\205X\275\036\001\036\275E\225\227\274\235\344\316\271\301V\305\274@\335v<5\336U\275U\317\265;;O\207\274T\274\241<\035#l\274\210U\256;+\025\315\274s\257\356\273\004\356\274\275F9>\275j\214\010=\005:\315\273;\310\251\274 jc;L\203\241<\247;\r\274\266\3139\275\364 \350\274I\252\343\274>\227c\275\227\252\030;\357R\260<\021\315~\275\273\256\261<3X\254\274?q=;\364\025T\275\332\351e\274\217\331C\274\320E\310\274\247\342\200\274\317\310?\275\322<\\<\013\221\251\274\265\361\340\273\213\027\251<\035\225\250;6+4\275\253\264\316\274\020\333)\274\007V/\275\256<\357\273\230\270P<\334\004\237\274\235\370\'<\272%;\275\332\033\353\273\345\377\251<`\020\334;XF\225\274\303\254S\274i~8<\221\0219\275\2300\356\274M\021\r\275\227\360\243\274\336\273\317\273\243\222\371\274\003s\201<\037,:\274\310\000\312<aa\034\275H\252\005;\320\303=\275!\240\232\274\204J\000\275\177\315[\275\217B\316\273e\223\326<I@\202\274b\027\"\275\\+\273;\272\206R=7&\207\274y\'h;\300=0\274\277\276?\275\372\342\236\274\350\2200\275\222\336\330\274\205\367\027\275\365\312#\275\245\240\363\274\243\244\013\275\300\271$\274%\035)\275\'\n8\275\212hl=\341To<\t\201\235\273\210\320\023\274\256\233\255\274\020\014\010\274B\211\036\274)\304\"\275\246\005\025=\026\365\266\274\245t\014\275\275Q^\275\364\352\316;t\356C\274B\n\177\274\"\264\220\273\374o\002\274\253}\311<yT\254\273\270\302\345\274\334L\211\273-)\315<\001\032Y\275\327\2160=\0007\216<~\365\337\274\232\364c\275\264^U<\035\304\352\272\'k\345<s\312\267\274\023)\003=\3652\310=\204\265\236<%\234\026=hI+>\035v$;\2522\206\275\031\206l\274\211 \\\274Hr\233\274|\233\313<{\326z\274\241\355g\274\347C\003\2754x5\273\035\204J\273\207\357\310;\rg6\274L:\215\273\206\004I\275\362\334\266\274\356Q\027=\'\227\235\274\274A@;_\032\241\274!\006\235<&n\240<jZ\022\274\201OD\275\375u\320\274\337\364\247\274Y\305\033\275\341\021\241\274RV\242\274J\363\236<X\310T\275\225\004\201\273\000K);\367\023\240\274,\202\337\274G~\031\275*F\261\274\240b\005<\363\236\336<\303\355u<\234\245\226\273L\354\217\274\275\003q\274T\332V<\31163\275\364e5\274\337(j=\241e\014\275\303\020$\275\356\241,=7\026C\275\336\327\332<F\002\266\273\252\"\263<\252\177B\275\276\344\355\274\022dx\275\030\371\205\274\243\253s\273~,\027\273\277<\261\273\275r\003\275f\267\272:F\021\333=\310\010\373\273\324\016\t\275\307\303\373\272\264i\236\274LS\234\274\235Mc=\370\275I\271Gi\244\274b\257\217\274\266$o\273\240\237 \275=v\320\274\232U\247<\276|\275\275\2341!\275\037\262x\274\201\375u\274.Q3\275n&f\274A\020i=\356\tJ\275\234\n\224:\027oR\274\226\033V=\233/d=\316P5<6\353\223\274\335e%\275\262\025<<\365\035\234<)\266D<\373\004\221\274\314\262\236;O;\002\275\022\022\373<|;\230<\227\352\211\275\210\270_\27469\354<\301\210\357\272E\002\325<\314a\001\274\360\246\232<\374\246\002=\230\215\217\272\257\330G\273\022\0256<i\222\224\275x5\r=\334f_\275\241\216\326\274\271\2267<n\253\316\274\276\003B\275\200%;\275\202Z;\274,\016Z;v\331/<:\271\313\274\263hM\275q]\003\275\032d\254\274\374\365\2569\354n\233\274\022=\207<\325\3338\274xy\251\274V\016p=6\374\230<u\366\224<uA\250\274\014\237\212<K\0361<\307\370&<\316\352\200<Y\217\312<T\001\267;\274\007\\<\266\2056\273\305\215)\275\200\310\220<\004\227\241<\304N\246\274<\263\030<\330\002\346\273\332\n7\274\241h~\274\267,a\272,\363\320<\370\216\333\274\355e\275\274\215}\036\274\020\267\207\274\327\214\224<\n\264\313\273.\2540:\250S\264<M\245\346:hi\267\274\326\014G<p\207\276\273<#\273<,\n\220;\030b\223\273\006i\360\273j\227k\274\"\324>:\330\202\204\274\357\304\023\275\344\260m<\363\217R\273\247l\247\270~1\307\274\027>\346<<;X\275\036\237\214<\333jQ\274\027 \332\274\003\323)\275\375\033\037\274\214/$\273\225/P\275\r\215\375\271\007Q\262<\367\332\003=\013\263)=\272>\242<Z\3513=\202\231j\275\\4P\274{\311\314\274\r\371O\274\270\244n<\001X\333\274\337\213\347<p\211T<\303^\320;q4x:\316.\223<G=\217<L\214\350\273t\246\236;p,\236\275<\270\221<t\207\220:\233\241\024\275<\361r;Z\224\325<-s\255:Vm\226\274\211\266u=U\362\346\273\340\306\242;\367G:<\2453^=\333\243r:\242\330\257=\332\351\017\274\221|\247\274JC?=\353&v\275\257V\031\275\204g\213\271\t\362[<\r$\255=$R;\275\346<A<(!\317<\376}\332\272%\1774\275\341\312\025=\355\235\262\274Z&\374\2741\016\232=\202\226\232;\326w\230\274\032\213\267\274\224DK<\ng\274\274\355\247\033=e\326O\275\317\t\353\274\355\005\006;\001\006J:*\227{<\225\232^\274\310\2131\275\205=\205\272yt\t\275\312\016\037\275\277F\017\275e\004\007\275\331\036C\275`I\004\275\013E\005<\376L\204\274\277\257N=i\242\256=\243\250b<\312\301\257\274_\230X= \221\363;\261\327\024\275\201\366\026<\354\247\373\272A\2743=-\373\024<}\313n\275\2444\245<nt\016\275\245\335\253\274C4=<\363C\252\274\270i(\2753\367\006\275\215\341\337\273\023\302\361;\245\277\254<e\225\254<aH\201<\"\2022=\211,\321\274\243\352,\274\375\014^\273[\313\016=\204}\000;\001\236\250<\231\024\341<\346\213m\273\221\345\023\275\257\332\263<\331\350\215\274`\252\212\274\217\262\266\274\3609.=\035V\211<\241\n-=\353\346\033=WW\003\275\376N\247\274v\3227\275\221\332\213\275\247\354\005=\301\370\023\274\307Q;\275K\344G<F\267\023\275\351\251N\274\307B\203\275\000\232\003\275N7\361\274\032A&\274\224J\264\274\242a\016\275\335\2102\274P\034\255<+\355\312<{\237\016=0PB;\267\271\211<\007b\003\275\313VW\274\002Y!=\341\342X= \241\331\274\212l\263\273B?\211\274_\312\257<\365\367=\273TQ\010\274\372\203\217\274\272\356\206\274\307\322q<\214\337-;\'Q\035\273S\330\031:n\r<\275OX\014=\243\3122\275E\254(=\225\301\362\273|j)\275\371\036\245\272\375]0\275\367\204\346\274\003\350\321\272\231&(=\232\302H<\274ss\275\004\211$<\300\0020\273\306X\261;5w}\275\253\355\010=\350\342`\274\000\262\245<\300{\237\274\004\335}\274A\353\326:\325\0034=N\257\274\271\320\340\016\273\236e\336\274\254;\264\274Q\324C=#\313W\275\2743\245<\252\351\320<l\\\223\274;k\371<\344\r\306\273\211\253t\274\016\\D\275\357\275\366<\016\334\033\274}\237C\274\261R\213<a\270\213\274!,\345<\363\253\257\275\276\210o<\274\270\343\274h \274\274\376\207\010\275\201\205[\275\036~ <\006\031\325\272\022\325I\275\322\002L\274~\264\341;\214\245\361\2732\310\300\274\337\005\005\274t.\315<\377Z\017<\212\235\322\274I?\006<\260\344\356\273\305C\335\274b\224f=\006\333\331\274\252T\013\275\237\341D:\313\200s\272\004\004\334\272\001\357\203=yM\365;P}\'\275\320\275\260\274\250\241\203\274f\017#=\316\354\354\273\311\200\003\275\274\356\001=<\310\331\274\257r\271\274\255 U\275\363\375\263\274\024\2768\272a,*\275\202\t\343<d\003|\274\302\303\r\275m\235\022<{\350\016\273\375\376\267:\241\341d\274NnF<L\237]\273\311\361\351<\337j\036\274z;\001\275\267Q\215\274\177\272_;\0058\021=\206RJ\273\205BH=K{\025\275$\300G\272P\225\206\273W\275\345\2742\247\001\273f\236_\274\036\221\245\275\002\217\264;\352\344I\272n!\236\2738\230\347\274C\365\255\274\313\343\227\273\257^\322\274\245f\235\275\035\343C\274@\365K<\227\321\234=\301\200%=x\355\243;\205y\230<\036\321\236\272\021\032\032\274uT\337\272M#\000=\031\034L=f\272\312<\016\027\374;\177k\237\274\001\241\222\273\221\355_\273k\345);\342V\222<\360\351\t=\017\365,\274\345\205\206;\r\035\t;\312\303\215\274&\234\363\272\023\227\364<lt\'\275\242}\323\274\254A\277<\364y\251\274>]\245\274e:\030\274\027B\021\275\025\202\223<&v\251\273\tHf;\022t\235<J\267\262:nH*\273\223\221\024\275\352!\374\273&\241&=\215Y\325\274\3312\261\274\025\335\232\273dwr\2743g\177\274\243\320\313\2740\223\266<R\260\224<\242\262$\275{r\n=\037\237m=.\220>;%@\337;\217\006d\274\221\302\027\274\331A2\274\337\313\262;\347\222)\275m\327L<e{q\275\264L\211\274R\316\033<\273\347\030\274pe\024\267\000\257S<\300|\225\274\000V\005\275\267\003\365:W\344\243\272ZJ_<\336\t\215\274~(o\2739\037l<m\241\352\273wA/\273\274mX<\266\316\314\273s\2218\275`P\340\274\t\303)=bVA<\255\342B\273\365I5\274&%I\275H\263\254\274k\362&=\267$\t=l\375\236\273\3308a;\2209\202<c\356\325;\027\2707\275\323\'F=\230K\302<\235vS<,(2\273I\371\340<\\\013w<3\023\312\274\351\347e\273;\230\034=\373~\\\275}\310V<%#N\274J\346\364\273\333W0\2742o\377<\264\355\313<\225\234\037\275q\257\346\274]|\337\274vJ\"\275\301\331\000\275\203\265\251\273\366\016\244<\345a\301\274K\006\014\273g\002\214<\005Z\261\275GW\207\274jg|<\313\241\024\274C\342\254< \302W=\237I6\275\275\222\002;\302\337V;L|n\274\235\360\235\274 eA\275ka\371\272\272\312u\273\005C\214\274w\302+\274\364]\356\274\361\n5\273\317\025y\274\324>\034<\221\302B\2741\300\277\274i\253\337\273\357\206\277<*$Y;\251f\263\274\327\267\343\274\351!;<_\356\201<\375\237\321:\221L\223\274*Q\277\273\267C\037\273;s\373<Z\224\374\274\021S\374;\344V@\273\270\316\303<*Lb\274\3059\376\271\034\036\365\274\2403/<\314oh;\255\345T<\320\007\337\274W\035H\274f\317\003\274\tf\364\274\263i\226\274\240\320B=\317\367\354\274\343\037\300\273\254\024D\273\014\2047<<\363\314\273H\371\n\274\231\343k;\344\201\006<q\241N\274\236\241\024;\027l\343;R\034\373<\211\327K=\253`\321:\022\214\201\275Ht\371\273\271\253\270\2749{\234\274B\025\205\274\2174\035\2743\'\270;\003(Y<K|E;\003\036\241\274\364\335\265;0R\266:\3010\025\275\321\273/<\326,:<\352\364\263\272\244\010\230<D\005\241\275\303ZP\275\\\356\263=\357\2003=[\271\346\274\013\255G<n\213;\275\327\353\027\275\010o\022\275\320\035o=\315b\006=3$\236\273\312\264\n\274\302\306\266;\321l\020<d\274\001<\2126\202;x`U\274|V\034\275\\x\245\273\276]<\275\265{\243;\n\214[\272\335\333\222\273\314\030\022\274\236r\355;\272\001P<\357X\264\273Ll\231<\226\202F\275\017^#\274\0211\211\274\023\343\210\274\307\305;<w\t\217<\220Q\370<\002\213$\274\nr\027\275\314\034\277\274\356\314\331\273\356e\352<?)y\275@\355\303\273\334\323B\275\263\020\222\275S\037\006\275\357\273\246\274z\363\366\275s\226\351\274\322.\272\273\263\005.<\357\200\216\274L\255J\275\345\352\277<\360\364\344\274iB\261\275J,H\274\220\265\006<\024\210J<2\211`<\363\375\202<_\203\211\275\026\312\266<GU\032=\261>\"\274\364\3274\275\230\371\224\274\307;m\274\254\271&\275\035\331\005\275\014\026\000<2\376w<\007\324\n=\236\177L<P\336\t\275\357}\330\274\317\216r\275\\%\255;!z\364\274\334\367f<\227\217\263\274\363m\243\274\375\177\320\274\005\026\262;t\377>\274)\306U=\347\214\364\274\356e\031=\356\343&=\227\203\312\273\315o\324\274\273\251\363<>\007\034\274\220\341z<\322\374\222;\252\371&\274\007\033\036=J\272\200\275\355A\254\274\357\354\262;\276hS<\357,\212<-T\311\274T\324>\274\034\006v<\014\303\032=\363\020\241\274\206\335\003\272\036\240\031=G\034\032\274GT$<\201\023%\275\320@\253\273\370\200\202=\342\375\010\273]\200\034\275J\322><\026\367\253\275\004\014w\274\273!=\275\371D&\274\201_k<J@\305;m\270\323;\215\367\316\273\016\305\230<\321Z\022\275\000.\340<\272\211\342;\013\207\245<\003\000\255\273\020\366\224\274\t\237\010\2754\035_\274\251\270\177<\330\024\"\274\265\206\014\275\312U\363;\347y\265\274/7\324\274\361N\r\275\342\003\311:\324\251\2638i\336\031<g,\276<\257~>\274\276f\t=\2141\210<\200\024U\275\342\377\t=\227\376\211<\300\327\007\275\247\205\007\273\0314\036=\006\247\331<\337\266\247<\243\'\r\275M\021w=\"\321>\274\306m\023\274i\313\304<\022e\013\272\360\353\255\275\306\370\265\273j\323\201\274~\260\034\275\234\277\274\274UJ\230<<\303\205\275\214r\014;\375uQ\274b\036\032<p\374\300<\303\205=\274\220#=\275\257\002\220={&\016\275\270\315\204\274yi\273\274\373\307\362;\025 w<`\367\307=\245\265\353</2H<Q\313\031=\326;\202\274\241.F=\260\240\002<w\t\274:/\262T<\210$4\272l\334\217<\205\341\320\274\260\264j=\365\000\020\275\337!@\274,#\334\273^\310*\274\250/\032\274\230W\034;\215\234\252\275N\251\001<\376\300\203\275M|\326<\354\236C;XO\323\274P<\341\274v\327d<Q]\235\274\207\014\323\273w\241\036\275\374s\331;\315[\221\273\010\346D\274@p\212;\3445\371;\362\020U\275\206\254D\272\256Ps<a*<<\244n\252<\302\266`=\001\334C\274\203\343\226\274&w\210\274<`\223\274\216F\247\274\317\240c<\224uO\274\013o\306<\230\343\370\271\326k\355\272$\315\315\274\031I\230\274\357\367\020\275`i\036\2744,\200;\241b\330<\020\214\t\275\314\336>\275\350\275\270\274N\017\235<\225\213T\273\371\324\024<\356\350\340;T\027\"<\333N\006\274\2468V<\377\217,\274\2365_<\327\002\004=\371s%<\313\2005;}\203\206:\204\0331<C\377\311<\274%\360;N\310\247:\314?\362<\354\260\211\274\267\2018\275\237\003\r=\241\024f<@!)<\037\205\032\2754\377\t\275S\231\005\275\322\264\343\274\255\243\203\275\231\235\320<\274\"b\274\331\271\304\274:7\335\274\274\233\345:F\3025\275\357\371\326\274S\3608\275\n{.=\000\223B\274\331F\316\273\307U\225\274\343\231\251;8\t\326\275`\335*\275\303E.\274d\313F\275x=\"=\340\234\234<Qy-\274\007_-\275\234\250\262<\346l\204<\250\013\244<\013\332P<\331\007\215\274\236\227\223\2740\245<\274\313\344\375<\221\363\010<\035\344\206\274h^\350<\026\367\020\273\374\035\225\275\213}\366<H\234\315;\254\r\231\274\265\345\204<\347D\375\274\275\341\254\274r\364\222\274\277\034\031\275\363o\200\275\020\373\216\275\221\321\325<D(\353\274\311\263\306\274\243\374\007\273\251\252\301\275\354\303\265\273\'\273p;E\242O\275\275S\027\275\367@J=\251\250&\275\334P\013\274p\330P<HjU<9\324\2058L\270\245\274\3100\211\274BL\333<\211\315\252<\252\324\226:\206\216\275<\335\240\014\275\350v\250\273Z$\210\275?O+=\037F=\275\366%\006\274l\203\330\274\343\004\002<j\321\005\275\035C\256<3\274|\274T)\205\273QO\222\275\213\320\227\275\021\231\006\275\r\001\003\275Rq2\275\206$f=\334\335\032<\237\352b\275:B\365<LQ\033\275$\302\276\274\347\233\004;\323R\204\274\322>l\275J\373\021\274\034\177\027\275\254\200e<Z\206\020=\253\3576\274\250z\225<M\301\026=\302mG\273\206S\265<\351?\250\274\000\330>\273&\213\265\272S\007\243\275R\000.\275?\351\212<\311\0012=\350\005\216\274\3707\023\274\220yR\275\305g\212\275\2759{\274]\211G\275I\002Q\275\241\2008\274\333B\320\274Q\305\217:\361o\257</\361\363<\257qL=\030\205\227\275.\2706\275\207E\257\274\014\220j\274zS\002\275V\031?\274\210\3565\273\254\244+\274t\375\324\274~\240\246\274_\"\373;#\020:\275u\030\272\273aX\226<pT\377:\306A\314\274\301<e<\2004\342\274\364\216/\274\202\354-\275\001Je\273\361\002\036\272-:\237\274%\273\201<\200\346\220<\247K\327<\350<=\2747\0109\275|\'\251\272\365\020\331\274\215v\237\274\032\232\037<\021\217\245\274\273\260\222:\201\031:\274a\221J\2750\013^\275\274\215\321\274\364\303/\274\022{~\274\256 l\274E`\207\274\353\341\215\274\364\027\305\274\241\203K\272\031K#\273\245?*\275\376Mn\273&\273S\275\014p\211;\002c$\275\202\271\341\274\203]&\274<H\212\275\344\253\307\274\265 \200;\370\352U\275X\275\364<\022m\267<\347\236\205=\224\035\262<O\315K\275\231\343\211\274<-\317\274\214\207r\275\262\255l;\240e\344;\255E\310;5q\230<\337d\352\273r\324\351\273wD==Sx\014\275\022\001\313\274\372\225\322\274\366/\260=L\315\271\272\014\354\314:\320\231\336\274\027\207\350<\031\333\316;I\346\021<\226|\342<\337R\363\274\006g\001\2750+\211<\024\322\220\274mz;\273\321>O=\334\000~:5WI\274\252|\202\275\265.,\275Z\037\036\275\205\240\2629\231\227\226<\356z\t<\214\272\266\274\025v\033\275\225\360V\275\3050\000<\342NN<\233\254\005\275\177\030\212\273\023\322K\275p81=dr\020<\334!\002<*\333\035\275\007\324\205<)\326l\274\206\232\241\274\350i^\272#\350\'<\227U\270\275K\004\033\274E\256\377:;\307$\275\363)\311\274\351&\211<\367\026\314\2740n<<\177\177\215\275[\315W\275-\273\'\275\256Y\215;\332E|\274W2\005<\024\276\210</\007L<5\320[\275\347[K=d,\261<\244\027x\275\013!7=\227\371\n=\251\000$\274\3659\201=\362\241\317;\355z\232<\313\031-\275\013\370\234<\005\255;;8\222+\274VpT\275s\223\226\274\267\201\343;\034u\330\274\246.\254\273.\345\020=\000\'\020\275\205n\222\274\357w\202\275\014\016\023\275\210\247\030\274\277&o<[\021\326\274\363@\004\275\221\315\303;\035\361\242:\303\365=\274\226JQ\274\221#\210\272\014J\376<\303;\350<\022\030\301<#\025\255\274\256\205\341\274\225\207\234\274\220\223f\275\003\371\353\274,L\023\274\316*\376<\230\250\n\275]\260l<\363\354\014\275\336\213\n;\371\212\233<(\017\n\274@\265R;W\276\254<\215%\007\274\252Z\253; \220\254\2735\266+=\3704\342\274\335OR\275Z\377e\273\257\367t\275\371\336$=\306 \215<\036\372\027\274\360\024\323\273\213\372\036\274(\007`<\217\037\256\275\321\246,\273\300>\325\274\355l\244\274\233\217\311\273\316c\037\274\342\277\347;\370\022\303:M7\341\274\225\256\322<\314\357 \275\333h\312\274[HK\272\345\362\266<\363?!\274\016]\005\274\203\301\244\274(\273\350;\221E\223\275\326\230n\275}$\177<\016\343\205\274H\252\262\274]i\266\274\245\315\370<{i\307:V\233b;\267\\\014<\227\021\276<\272\367\366\273z\326g\274\252\343\245\273.\360Y<M\035\255\275$\246\223\274z\036$;\332\035\246<\266\t\355\273To\206<\203$\324\274\275\314|\274\357\2452=\355\355\016\274\215T\375\271\312\230\'\275R\254\212\274kh\010<\247\377\035\275\352\277\006<C\301\256<P\032c\275\205O\306\274r4\000<v\341(\275\201\t\210<\207g\222<aV\217<P\356\340;\346am\274\254H\000\275Z&\240\274K\030%\275(k\255\273ul\224=W3\227\274\021\305\022\275\247p.=)\221J=D\261.\275\376\226$=\347\237\023=\374PX;\202?\342\274:\036\321<\031\257k\273LT\376;&\220\267\274\003\227\204<,\313L\274\213\004\r\274n\217\t=\231\357\260\273\300\035\230</A\305<\260\200Y\273\266\300c<F\261\016\275t\202\';\260\266\223=o\274\360\274\001w\265\274l\311\371;\367\026\t<=\315\236;\000v\035\275\242\254T\274Y\004\206\275e\265\221\275\333\345\312\274]\265\272\274\025\303G<\246\371\007\273+\370\320<A\347\253\274\312\346\274\274\371\265p=\314\224\227;S\t\340\274\024{d\274\2611\202<\373\242\305<\326\275d<xZ#=~<\2719V\233<\272|\373\200;\321yz\274\rY\237\275C\326\237<\022\025\'\274\2752\014<&7\216\274\343H\373\274#Ot=J>\245<\t\205\027<r\251&=\017\315\212\274\210\342\336<\215\376\327\273\2046\262<D\360\025\275!\032<\275H\313V\274\266\350\212;\277\233\206:z\233\002\274\366\376\005\275ZaG<\363\'\034=\350\252W\274\241rs\275\307\307z;%B?\275\326S\325:\303!\363\273\205\005\223=\216W\345<qE\247\274\0049\225:\220\000\025\275\330\252\245\274\370D\377\274\365\300!=\356\327\016\275\240&>\275\227\214\311<\025j\036\273\301g\247;5\371L<\205\373\340<N\217\\=\302\200\253;\300i\335\274\230\265\310\274\226\027\033\275\007\2652<\350G\023;\263\302\266<\320\256K\275\034\240\351;hM\334<\031\247\256\274\303\223\010;\354\372\303\274\3759`<\304\234\355\274\342\347 \275B\351%\272\254\"\266:\005\017\220\274q\244$=+\367\335\274\251\331Q\275\357\247!=+\355\005=\021\310\257;\227\004%=n\177\250<\340\240\215;\326\2004\275\314\367\331\273Mjd\272\371\024<<R\216\224<xm\023\275\331\024\357\274\377\326\333\274\245*\227\275\275\242\362\274b\3567\274B\374H\274\315.\370\273\332\245S<C\225Z\275\020\242G<\372\254\313\274\303\366.\274T\276\024\274W\375{<\275\247f<\247g\346<\351\320\267\273\223\221\254<\233\332\210<\252\257\r\275\363\t\344: \334\014\274\307~_\274g\371m\273G\361\262;\2544\273\274\313\264\326;\2403\013\275\326\273\034\275\025q\200\275Sc\020\275G*\344\2719[p\273\305\366?\273b\234\004\275\223\177\223<\260\3128\275^@\374<\234\354\010=\004\352\005\275;\001\252;\347\242^\274RP_9\035\035\177\275\373\035s\274\031C\264\273e J\273\333\367\205\274\202\010\024\272\200\362\216<\356\270\250\272\332\337\365<Wf\027\274K@P\275\250\315\034=\003\2524<\355\022\020<\277\333\340\274$+I\275\376{;\275\276\234A\274-\206\350\2748\353B=\2754\301\273*\222\2149\236eZ\273\'\310]<l\211\n=.]\230\275d \203\274r0\276<\025\235\204;\264\351D\273\200~\323\274\206\242\022<`k\364:U?\267<\0035\323<\311zq<T\337\013\274f\337\316\274\310\313\331;U\322\306<\343\221\256\274\314\350,\274\"\360\007\275\227\207\025=\0364\005=\355_\214\274\274\036B\273\270\333\356\274\371\353\211<\270\350.\275\3530\352<\177\301\242;)2\325\274ir\343<^s\303\274z\364\205\273+\007\\\275d\243\022\274#[y\274\271\276\330:1\'/\274V\211\343\274\214+\234\274\276\211\370;$\342\320=\210\336G=t\276\216\274\010E\306\274O\0242\274\307\203d\274\327\352\300\275g\023\002\275\376\343\205\274\262Ee\275\326\033J\275\225\230\346<\034\242\036\275\274\232)\275\3057\016<\"\007o;\305\330\261<b\3462\273\025\261\t=\273\261B\275\355=\225\274\234\323\265\274\324\230J\273*SK<<nC:\016\276\006\275\262\023&=\262\245\003=\236\251\005\274M\275g=h\031h:8\260\222\274\314\221\243;L\267\016:{\030u< \024\017\273\256D\202\274d\271\r\275\311\257\214\274|\302\361\274\375\2634=5\2603=f\326\243\274o\032\300;F\206d\272\036^\001=7\361\210\274\'\036\210\275\016f\025\274\254\022\376\274w\346H\274\225\002\366\274\022\222\035\275\017e\002\275\375?%\274l\277X<@\355\266\273\270:_\275\220hB=\257\272\014\275\0038\344;\336\270\000\275i7_<W\221\225\274\000\274\251<rO\003=\034\273\270<<~\020\275eBs\274:w\234;\272|\231\275\240\030(=v\350\024<\364\305\211\273\363\004)\274\312\037q;\337r\324\2721Da\274;\021{=\241\0075\275\035\247\246\272\361\365\202\274G\233\303<\030\261\202\272\037a\277\272\210\205\374;\245}\364\272\274\247\013\275\310\375\216\274\255\247\244\274\270\206\317\273\351\2409\274A\201\205<N\351\207\274\221\317\331\274\253\327$\274,\216\200\274\254\244>\275\314\001\005\275 \036\031\271-\334\335;-\363\261\274\315\224\346\274\374\374B\275\321^R\274\372\005\251<-gi\2751\244Q=\\\244\001\275\353\370s\274xZ\244\274|\037\030;\233+\357\273\306\253\207\275\306\371q\2721Z\027\273\277s{<3\354!\275\235\200\327;\2258\014=\245@\321\273c\336\241;\364\312\002\275\331\321\'\272\211\\\000\275\335\225\211<\214\332\305<O\371F\27519B\275Y\245\222\275\352\374l\274\367\225\317\274\230\320M;]\361\226\274\374\020\205<\177\311\220\274\0144\001\275re\374;\372MX<\353u\313<\3367]\274\371\0148\274\301\355\377\274\304\353q\273\346^\237\274V\017\r\275YL\262\273\341\276\\<\310C\021=\342/<\275\351\366\277\274{\352)=up\347<\343\253\337\274L\210\031\273>\255b\275-\026E\274\000\350\237\273gP\271<j\005\347<\320.e\275\226\002\235\274G\307\003=6\340\034\274\2045\372\273\\_\217\274@L\251;>\215\224\274\364\022J\2743\010s<\2325f\274\324\005\267<\270\233\021=\273Kp\274\330\237g;`\030\236<<\350s=\371\213\025\275\037\360\222\275i\224\212<S\361\001=\rUl=og\355\273\254\253u<E\226\332\274\303\331\223\274\037\352\325;\376s\244\274\346\344\307<A\222\255;L\322\005\275\017\374W<\323!\\<\032_\217;\024h\276\274\337[\033=\277b\t=\205\274R\274d#\223<m\212f:9\374\347\273+\007\350;+]\177<\303w+\274\235\263\355:E\325u;(<.=\366\3577=k\000\267\274\te\234\273\035\2725\274\020\207;\273\260\276\340\274\202\023\363\274\325\326\372;\253\0160\275\006;:=\032\275[\275V`\351\274g\241\244\274\274|\024=\340]\236\273c\274Q;\255\343 <\255\364>\2753\r\244\2740\237\230\274\315\221\252\274\351\306X\273\356J\026\274\230B&=\010t\257<\221\232\264;y; \275\232H\256\274\353\300b\274}\024\002\274W\204d\274\302s\361;\025Q\364\274&\227/\274H\361n\275\343\232\340\270a\371e\274$*c\274\356\347\316;\365\313\030<\250\242\000\275\"Q\334\274\007\206\024\274\266\273B\274\277\227D\272\266\301\036\274\237\302H\275q\223\205:`\322\032<\005-\253<\016\356\333<\361]\362\274\257\267\212;\353\373\312\274M-\341;\3372\341\273\304\020\326<\370\234\351\274\207/J\275\334){\274\023\330\023=\220\323X=\235\334\304\274\035>\031=\271{\205\274*\371\016:\333\263w\274\247AI\274\243\231V=|\372+=e\263\240=\273\221\232\274\336\371D\275\001w9;\272Nx\274,\247v<\262\030K=\327\001m;\247.c<\347\346Q=\247\010\243<H\331O\267 \325I\274>j\200<N-\201=\277y\347<\362R\022\275Q\231<\275\225xI\273\374\307\010<\376\346\316<\246\300\262;~\214\324\274\001\025\371<%7\236<X\222\253=\245N,\275\364\n\266\273\344f?\274\177\340\344\274\232\001z\273TX\343\274\203\\\334:M\262\004\274w\013\353\273\306\305\363\274\327/h=\360!+=\300\014\315;\n\025\251\274Gi\006\275\014\330\\=g\3260\274\217\320\013=\3513I=\256\260\205\274\365\260\362\274\354<@=n^\331\273\201\374\031=\306\352\025=\2112\177<j\314\271\274T\010\t\275\350\260\246:\2468\220=\0301\313\274\'\003\001\274\334\022\266<\267h\"=r\330\030=\021\021\201<Y\330]=7\362\007\272=Zs\2742W-\273j(\225\274i\247\375;\336\224\226<0\233\312\274(\t><\255\360\027=l\326\231<S50\273\270\033k<\311o\260\274\223\362\035=\334\353\204:\203)3\275W$o\274\250f\210<\201\316\017\274q>6\275\316;\036\274k\364\277;a\005\032\275\010k\314\2746\375~\274\273\023\264\273k\216\032\274jZ\034\275\236t\035\275\357z\013\275]\270\310\274(\352\237\274\227o\353\2745\330\274\274\342T==/]\256;&yK\275\262\034\214\274\211\261?;\342\326\205\272?\351\225\274\313z6\274D\361N\273\337\207\017<\177\227\016\275\323\344\256<f\271h\274\210\362\355<X\253~\273c\276}\273\376b\010\275\352\214\307\274\321\r\205<\300L\212\274W\372T\273\020\210\242<g\t*\275M\365=\273\277\321C\275\231\370\200<\300\300}\274\370,)\275\257\035\215\274\000\227B\275;\355\201\275\276H\317<\0326\231\274\264a(=Xv\024\275\214\371\260<\3001{<GR\366\274\354\3006<\336\001\327\274,z\310<\376\006\344\273\t.\222\274\275\314\204\274\024\n}\273\3546\\\275\021\233\023\275\323\372>\271L\003@\274f \205;\231n\214<\265\247\264;H,.=\255(\001\2752}\347\274\216\221\372<w\031\310<\336\202\211\274\312k\236<\024\037\330<hmO\275\271\354\216\274\316\316\005\275Sn\203<P[\263;u\010\343\273\372\351\242=\257\256\220;\233\337d\274\272\327\354\274>\n\332<\310R\021\275\342e\222\272\033\016\000\275+>_\274M\303\232\274\006k\247<\257h\t\275V\356\216\274\3630$\273M\002\272<\026j\314\274\330+I<\351\354\202\274Y\014w;(\314\273\274\023r\204\274\237>\363\274j\334d\275vzX\272P\325\006\275p\324|9\335>\217\274k#\"\275:90\275\310\250\201=\337m1<~\305=<\305\221\231\273W\237J=\334Y\"\275\226\272\273\273\024?E\275\336)V=\002O)\274\213\003\255\274\330\224\346\2746\342\016=x\3254\273OF\023\273\313\t\321\2740\347\334\274\363l\000=G\351,:\230\014\006\275MO\256:\035\225]<\251jr\275~\326\260\272]\2645<+\250\257\274\245V\n<J\032\223:\230\355\003=^\256}=\t\326\300\2744\t;=\216\217\323\273\007!\203<^r\242<\002l\230=\377\263\270\274e\331!\275Z\363\222\274F\257\360\273\265\"\007=\350\341\251\274:VB<\362\020\223\2741\026C:\252\003H\274\016\232\023\275w\327\217\274#\244-\2748\371\337\274\237\\c\275|\224\307\274\200kC=1\3478\275\3701\312<\202d\326\274\324\224\327<X\007u=gQ\371\273\333\207n\274j\301\204=0\220\022\274\023\356-<\325\200\005\274\314\005p\274\372\nc;\2673O\275\267l\221\274\217\317a<\006\305\"\271\220\025\037\275\350\340\240\274\"e\353\271c?x;g\225\231\274~\367\374;K\334W\275\254\034Q\274\021m\314\274\376$6\273\n\373_\275y\005\250\273\3525H=uxz\274V\307\344\274!\021\010<\337\370*\274`L\320\272\203\301\033;\034\226\271\273k\233\202\274\210\266\367\274\313?8;-+\375:|\026#\275\023\312.<6,\265\275\336\032\001\275``.\275=`r=n \331\273\363b\267<k\253\247\274\241\373\340\274d@L={\244}=Z\277\230\2731\266e\275\3047\205\2734\024\t\274\372\267\3579\323\325\211\274\251\371\305<\246\217\241= 9\014\275\257\000\257\274$(G\275\235\302\"\275&\250\212<VB\006=\347\360)\275w\262!<\311\245\260\274\226\357\001=\306\230\207<r\257\337<\004\270\244;(\344R\275\200\226\253<\231\342\355<\035\263\216=\354=\222\274B\204\207\274\n\314\364\274}?X=\353\265.\274\016\036\247\275p\314\363\274\032\207\254\273\203\207\277\274B\347\331;\301\333\212\273\221k\372<\177\377\014=\262\317\361<d\033\332\2741\322\302\273H?\213\274q7\017=;\037F;\245\332\026\274\366\343\017<\345\207\007\274\270>\273\274\267j\343<\314\030\r=\216\272\213\274\025o\237<i\346\321\274\007\r/\275f\234v<\004\212S\274K\274\233\274\341\234\331\274\225\226\333<\376\036z=\330\231\013\274\275\013\327<\360{\304\274\201FH=;\367\t\274N\021\337\274\244\366\311\274C\334C\274?\241\214<\224\303O\272\251s\030\273\027SI;\342\\$=\332\346\310\274f\317}=\330\022\205;\266tF=\371f,\275\010\0010;\211,\310<\243\355/=\216\376[\274\234|N\274\271\025\005\274\275J\216\274\221X\r<]\304v=J3\'<\223Q\312<\335\350\303\274n$\007=\273\354\360\274\'\222\371\274\262\252 \275\263\303\367<\027\340\253;\274w\220\274wh\017\275&\344\340\27382\"\274\357\215\203\273x\257\240\273|\244*\275\364\033\002\275*\206P\275\214|\r\273\355\311\033\275\255\337/<\252EX\274\213\264-\275yr\037\274\270N\030\275(N\256\274\362\236\311\274\322\'\027<\372\226f<Ro?\272\224\312\342\273\255|\037\275\323\'\t\275\216\362\266<kl\212=\216\234<\275n\333\256<\322\326\256\274\312\351\004\275\301g\244<\232\204\254\274.2F=\352\014@=\363\335\355\273E\262\001\274\322i\020;\242\364k\274U=\320:\260\323\244=\222\313\211\275{\200\221\274\225\370\242<\252a\025\274\243\362$\275\232e\326\274\217\025\273<\3747\r<\316ZL=\365\230\345<dH\235<,4/\275\342\370\362<\000\315_\273\020$\267=\213\021<\274\232y\210\274\304?k<\235\2679\274\233,2\275k\344\372;}/k:)tJ=\323\010\313\274_,\310;\306\262\257<\235ZC< \364\033<\215(\177=%\360\001\275{\334\364\274\232\233\032=\244\330\020\273\372\377\237\274B\223,<\017\023\203\274n\373\351\274k\301\241<A\322&\275\000\000Z\274\t\376N\273\204\220\217\274\305\233\242\275\2169\321;i\277\r\275\312\261\271\272\257\346\014=\232L\204\272\037\227\226\275\375\006\301<z\353\355\274\247r\233<s\017\277</\016\377<\025\201\215=X\221\n=\010X\224:\023\230\274\274\375\323\375<l\237<<Zf\204=$\337\212<\302Di\274I\262\236=#\312N<\325\212\331\274\226L\301<\030\333\225\272>\325\200<\0019\031=|\021\\\274\375\245\207\275%\271\306;0\370f\274\014\257v;kC\345:\347\357\234\274\233\247\244<\362\365c\273W\333\330<\276\204\n<X]\021<\346\t|;4\320\004\274Qj\366<\031f\250<180<1\230m\274.s\375:\355\022\267\2723!\202<6H\0009\227S\036:U\222x<gBN<\356\236\214\274\220-\311\274\247\300\351\273\373\352\016\275\000K\226\273Z\362\035\274\032\275\253\274\030L\035\274\237\213\313\274i\317\024\275p\306\204:\315\260\266\274\035\202\030<\004\310-\274\225\350\r=<\213\231\274\032B\032<~\016\031\275\357 \336\274]\335\262;\334\020-=\375bw\273\376\373\032<\346=\366\274F\202\014\275]O2=_z\341<u\370\305;\224\247Q\273+\235l\274A\2129<yKt\273<\207\201<[k\210\272T\263\000\275)\304\270<\274\340\265\274\361\204\t\275`\205\206\274\334\343\227\274\373\217\366<\217\230\240\273r\343\017=id)\275,\222?\274\\\243\310<M\336\326\274\220\003\344\274\245\366\265\2744\354\216=\002\312\034<\236\200\374\274N \206\273\352}U\272\300\261\005<\\\237\265\274\261(\001=\032\177\307\274C0\303\273;J\362\274\273\267\264\274\316\260f\275x\036\030\274\240\322\245\274\223\361\256<\375n\030\273\245;\300\274\214a\346;\014\346\t\274G\222\205<\312_\230\272l}?\274\317\031\014\274\200\372c\274ti6<Y\370{\275\375y\307=\346_j\272\336\220\372\273\203\'\026\274\366\311x<\262\222\234<\035V\030\275_\316\253\274\275\020<:\034\376\256\274\242\000\326<:@\210\274\365\023\320\273a\0050\274\036\234#\275\240\254i\274\374\261{<\320\001\003\275\2549\272\274\3733\256\273Jf\t\274\255k!\275\265Z\263\274k\303i\274c\310\231\274\233T\255<Op\210\274\002#[\272w\223\323\273\204#\t=\215\363\207\273\320\017\237\275\031\3217\274\023\317\346\274q#\302\274\210\220\026=\355\216\305\273\002\241\344<\276\260\241<\375;X\273\231\314\256<Fc\272\274\206\261\036\274\325\333K<a0\035<\322f\322;m|\340\274\316l\310<\307\331\265\274d&k\273cb\001<\303{\001=40\246:\322\346\007\275\340\030(\274P\031;\273\370\0058=\026\253\241:n\'\223\274\234\255\200\274\276f\265<c\274\304<\273\027t\274\302\027\236=N\t;<\270\204k<-\0370\274\306\225\023=\216D\226<\350\255\274\274\205\332\033\2738\371\327;;\003\356\273T\023\257::u\023\275y\330*\274\261\025\211<\306\017*\274\316\305\265\274eJY<\346\334\311<\373D\255\274Vs\006=\263d\200;E\312\354<\256\322\'\273\202O\376;\2031\312<\r\377S;\376\303P=[x+=\366\226\372;\326[c\273\334\362\377;\253\234\000=\014\311\362\274\276\311\021<p)}\275\212\037+=X\206\374<\177\356\037\274\331\311!=\357,`<S5\347<\275(\264\274\311\273\302\274\032Z\004\274)\315\231\275\213p\375\273C\035O\274\363\207A\273\244\317\346<\300\223\262;>`\236<}\260E<\362\336q\275\266\260\007=\023@\236\274\317\005\225\274\177\233\006=\260\216\206\273!\257\367\274D\216=\274e\274\212\274\221>\007=\365\300\037\275\373\371\237\271\315\276\325\271\322\341\257=w\255\005=\370\323n=\246\234\035=\341\264D\274\206\r]\275\230U\242;\035\340;<+\3507<\023p\245\274h\234\360\274\301\253\340\274\271L\355:\r\323\264<3\307\315<<\261N\273Q[a\2749\351*\274\023AY<5$];\366\313\311\273V\006\362<0/\213\274\313\255\351;4\336\243\271\222\n\265\273=@(\274\357^=<(\327\222;?+\364\274M\362/\275\324\006b<K\222\256\273\215uC<x\200\010\275G\273\334<\375x\t\275\334\362\243\274)\004\226;[\313@\2747\034&\275\300~U<\250\315W\266\233i\231\274\251\037\366<\035\346h\273\241i\000=\024\023\020=\nh\014\275\264\300\270;\223j\257<\210\034\236\274x\252e=\354y\210\2757\370\336\273\036(\277;\371j\371\274\220\004\231;\024\356:\274\271\205\253<\357t\227:\263\254V\275=|\032\275\216DE;f\265(\274\252\013\235\271\321\001\026=\355\245\003\275\370\006\214\274\002G\000\275\017k\236<%=\307:\244\253\212<N\347t\274\200\301\361:\363TB=\"\265c<\253\321\r<b|6\274\207E<<k\263v\273\265Z\024\275i\367\2059\023T\266\273\350\335\302\272}\266\001\275\2636\271\274\032V\347<E\332\340\274C\0265=@\2372\275\311y\233\274-?\277\273c((\273\234\224\323;\314\006\275\273\267\364d\273\351B\371\274B\230\220<Q\357\211\274\201\226\344\274\227\005N<\010W\025\2724\\\326\274\326\272N\274A\372\243<\244w!\275\355C\001\275\366D\026\274\273\220\231\274\210\275M\275\277\254\205\275v\336\304;\252\347\343<\026\210\311\274\267+\"\273\341\371\250\274\254W!;\310\243\305\274\004\375t=\264\'`<\313\357\341\274-c|\273Du\3706\236xh;\337b\014=\303\274I\274FK\020\274\334p\337\274\323*\256;\241\007\257:\365\276\313<_\330\034<\246\244\025<uY\252\273\320\\`\274z\277\003\274cHL\274\244\314\013\275\231\275\204\274}p*\275\303f\300<\233\260h;\231P\023\274\302#\227\274\311\343\204\273O\000\006\275l0l<\313\3604<P\360%\273\"M\355;l\216@\275\0078N\275f\247l=`T\331\272.\223\374\274\270f5\275\256?\\\275\033/#\275\276\245\027\275|h\026\273\tD\334\274G\270\344\274\t\017\306;i[\204<>0\370<\302\302\022;\027\250%\274\334\320H\275=\261\275;\336H\373;\206\337\342;\230\0025=nEf;\022J@\274\227\265<\273>\2153<\201\010\t=\317\3169<U40\275\016\205<\275{1\337;\332\022\211\274^\326\347\272\316\020\303;\316b\271;\305~:<\272\257\254;\360P\306\274\\>\305\274D\232\267\274\244\023\232=c\001s<\232hI\274\331\211$\275\016\244\345\272\212)%\274E\3136\275\377\301\276\274\327\341\330\274|\356Q<\207\272F<>\272H\274\343\314y\275\326+\261<\224S\311\274\256\244 \275i\207\032\2750\351m;\225\352\305\274\020\333\036<\201\322\254<-\203\227\275V[\355<\000V\024=V*\004<`e\257\274\014\203/\275\303\271*\275=\306J\275\376\001\025\274\324\201\354\274\322FZ=D\306\343\271[\272\004=\260\030k;\345\274\375;\276\352\346\274B]\267\274\021Wg<4\377@<\245\341a\275J\360\273\274@w.\275;F\235\273\2068\277<\264\260\303<\246\332\302\274N\263\206<\177<\206<,\267\016\274\203\311\007\274\\\223\316\273a\254\361<U\245\251<\271\305e\274\352{D\274\373\002\213=&\243\206\274\340\033\256;\331D\244\273+\341<\274\232\000\025:i\021\r\273\250\203\':\322$\344\274zV@;\013\245\213\273h\303\032:g\315\357<\234fm\2745hn\2742\232\034\274\330Rb\273C\037\377<\220\341\376\273\r\207\324\272\251\013\363\274\367\010\031\275)\001#;\323\357\374\274\223\201F\274J\017\314;\201>\204=2:\333:\322\006\276\273O\t\341\273l\357@\272\315\t8<\366\331\212\274\240\261\240\274\032\265,\275?\254\223<\342\231\014\275{t\302\274\255\007\242<\201\217\034\275\327\372\214<I\365\336\272\245Q\357;\215\301\017\274aqX\274\326d\216\274;V\"=kQ\321;S0\223;(\337k:|\006\237<F|\027=\212\tP\275\277\026,<\324\206P\274\334\260\000\275]\272\206\274\261\321~;\350%\233\274Q\\:\274\217\233\212\275\025c\246\274\205\222\246\273\013\376)\273\374\254;<\004s==\254\356\364\274\250\226\003\274\242R <\033`\246\274\254\204\225;K\270d=(\230\354\273\021Ge<k\234\321\273\344\360\310\271:m:<\033\232^\272m}]\275\233@x<\323\225\033\275\272\261}\274}\254\357\274\314\000\t\274KT\206=\027yN=3\3734;\021\332\314<\333-\235<\261~\237</\327\370;\204\002\243<\t\357\334:\276\005\217\274\233>B\275\221nR\273A\251%:\035s\350:?\344\303\273s\341\274\274\332\267\206\274\301\370@=g\302\265\274qq\032:\270\246\302\274\260g\010;\220\214\210\274>N\003=!hh<t7\007\275\311\246><\013\374\256\274\317\375\247<U\341\234\273\322\333\304;\264\r\200\274W\245\022=\251\375\252\2741\211\300\274\224\202Z<@vf\275\n\317\201<9\t\237\274A\375\303\272\303\001m\274\350\210\324;\357K\216\272BeX\274\241F\206\274*\343q9\254\033\252;CF\331\274\240n\211\272\311\355\005;\374g\225<R]F<@ \304\274\326\275y:\000IM\275\255<\020\274\220\226\256\274\2643B\274$Ox\273\n\240J\274\262\3172<J*\241<\337\n]\274\036tj;4\211P=\355\n\004=z\\\270\273P\213\n;\370\036\220:\365*f<(\201\202\274\376\276\t\274\003\214\300;\017m]\273\243\032p\273\200:\376\272\3669\371\271\317\364\346\274\270L\221\272~8\023;\362-\207\274v\307\353<\235\342n\273\311\356\203<*1\035\275\004\\&<m>\033\2749\032<\275\352Q=\275\2450\2739\016\251D\275t\301\354\272Y\002\314\274\364+\375\273\031\352n<\305\340\262\273\243\266l=-\310\265;\312\220\005\274\375\315\315;\216\204P\2755\200k\274\027}\225\275z\025\312\274\274\035]\274\032\242\320\272\241,\222<\022\264\024\273\220\271%;r\3006\275\2128N\273\212\330\264;M\237\255<\313\2343\275\265\021r\274\330\224\357\274\232\333{=B\"\222;\346\244h\274\355P\347\274b\3136;Z\r$<\354R\212\275\235\364\262\274\2062\354\274\364\307\014\275~B5<\256,\006\275\242NP;8\377\373\274\255K\023\274\346A\256\275\354\367\204\275\273U\210<\271fX\275\335&G\2750\264\350;u\0060\275i9\366;B\211\350\271\027\027\010\275\255\353\253\274\t\001\244\274\025\376\312\273\033>\225\274h]P\274\363B\261<\374n\206<\200go<{1\374\274\030\332\272;\005]\342<\302\271@\275\020\370\022<!H\177\275\264\2057<t\216q\274\032\233\320\274\201\204\373\274?\340\007\271\245\037\336;\030*\362<\237\257Q\273^j\346<\217\350\177\274~\242\342\274Z\225l\275h\032@\275?~\366\274\032g\362<TZa\275\235PC\274\203b\002=2]\210\275\267\267)=\365\205W\275\302a\314\274^\342\203\274}\004\376\274\234.\023\275n$d;\275\271\034\275\363I\021= \2667=\212##=\225\2328<\267yA=\264\301\371<\347p0:\247? \274<\340:<\251U\200<\232tQ\275\277\203r\275\323\242\303</\017C\274\272\337\263<mV\240\274\275S]\275\267>\037\275X\370\351\273\243\324~\273\321\263!\275\026\342\251\274:\371\034\274\246\227#=\003M\352<#}\027<\263\332\301=K\362\210\275\276\353>\275/q\214\272\032%c\274z\347\304\274\254\307\210\273;\303\016\275r\004\243\274\314\200\230\274\200\2458:\347-\242:R\261\230\27499\233\274\014:\203\273\246\300P;\320\267\254<f9\352;\247\003\333\274\356-\254;c\227i\275\315_\3569g\323\265<Z\351\033\274=n\366<\236\360\264<\266\271\301<\223\320\266;SV0\275N\245\357\274(\346\207\275>\331\220\2742Wl\274k_\203\274\033\362\366:\245\2057\275\271Xw\274D\010\330\273\274@\007:\275\331g\274U*\002\275\366\374!\274\204\351o<H\005\370\27486\204\274\026 \230\273\000]\266\273\257\314\271\274\350\243!<KW\024\275R%O\273%Ez\272\310mu<\347w\020\275\n\034\320\274E\357b\272\006\\@<u\245o\274\301]\207\273\302@c<{r\013=\021\\\216<\3268.\275\005\305L<\232@Y;G\216\212\275!\211\331;y\340+\274\355:\252\273%\313O\273\037\r\027\275lv\255;R\303r=\016\343\303\274\000\347\273\272(\302\024\275\315\010\375<\377>\212=v\022\321\274\\\001O\274\212(\315<\003\026,=\264\014#7\215\256\233\273V\231\341\274\205\345\230\274\302R\351\273\334\001\321\274\243\212\215;[\340*=U\321%=\027\315:\273S\244f\275\372\242\262<0\366\214\274\226\026\250\274[<\234;Fn\266\272X\016\201\274\333\000\213;\204\227v\275\321\276\021=@%R;\217\277o\2743-\"\275\347\337u<e\203\261\274\325\276\273\274(\315>\274\300\371\035\275;\377\332\274H=\341;\037p\307<\365Fa\274\tQ\262;\02470\275\260\305><f\375l;\026\254\031=\362\234\336\274\345\022\031<\243\0102;\224\357\376\274\017G\022\275\267v\000\275\002\020\314:UK\253\274\233\255\206\274\177Z\343<VB\201\273\222!\330\274e\274%\275\242\325\022=\205\277\200<\032R\355\274E\253\245=\231\315\207\274\347\331\203\273\033\310?=\016\033\200\273\212\022\027<(\031\037\272\215k\207=\260\276=<)\373\330\274y\237\003\275\340\010u<\273\314\"\274\351\035!\274\212\362\272\273\005\252\320<\230\003M\273/x\014\274\312\332\351\273\256\233\026\275\"k\371\274\n\255\013\274\244\351\270\274\231\334Q\274\'qv\274}\231\216:1A\317\274Xy%\273I\232\305\274G5\006<\021j\200:\332\370\243<@\255a<,\251\244;hct\274\246re\275\227\346\201\275f\205\201\272\336\004\246\274\352\005\301\274\235F\304\272Z\252\246;\242$\263:\231q\344<\242\377\260;\227\r\277\273\256v\215;\301\343a\274\345\244\355\274\330\263\004\271\322\311\367<W\003\021\274\273vX\275\310\217=;\264\250U\275H#\023<\230+\263<\343\244\310<\250\220R\275a\303\276\272\234w?=\022\233\n\275\334}\265<z\024\313;Q\000\010\275\373\021\t;D\371\t\275\2233\274\272\224/l\274s0U\273k|\240;\362\325v\275\321\264>\273\007\364f=\\\301\256=\033\036\250\273\244\265\035\273\002\013@\274Y\036\t\274`\364\274\2746\230\023\275B\214=\274\325\"\2779-\262b<\207\2178\274\341)\000=\310\376\330<i\002\363\2730\304\321\274]\245$\273;#\241:\211\033\261\2746\222\365\274\211\334Q=`\tb\275y\310J<Qe%<\377\000.\272\371\345\307<\\\372N<\006G\247<\320\362\246\273\355S]<\264\'\317\274]\3657;\325\006C\273\205\313\276\274\260A<<M3\265\274vE\037=\365\360\252<\336uD\275X\346\266\274>\235\314;\335~\355<\003\317\362<\"L\022<\301(\251\273\232\306\371<\276\013f\275\023\325\024<\r\033\363\274\301yM\275\377\263\263<\264\254!=W\321\235;R\021\037\275\032\357\307<Yz\204;\300\001\333\274k\302\360;}o\206<\206\317:<\206\252G=\265\247l<\201`#\275{\333\032<\324IW\275\014!\342\273\222\017\360\272\353W|<\212\024\"=\006&\276\273\r\373\356\272Cq\220\274{\"q\274\200\314\272\2745]\360\274`\036\356\273\277R-=aX\246=\235\033\023\275q%\327\274}q\026\274\372\014\250<a\030\017\275\3405\244<\026\267\021\275\304^)\275O\006\032\274\321C\223\273g%\231\274\355$\211\274Gh9<-\207\361\273\330(]:s{\255\272\305\303\344\273\273\260\317;\336\241\334<\030+z=\245\323\031\274\330m6=\251\375\345</sk:\305\371\221<\225l,<z\214\214\274\265\347\366\273\t\201\225<\220\020=\274u\017\262;\267\263\375;\272\020n;\214^\024\275,_3=\260Q\020=\332\3667\273\337Q\300\274\204@\'\275\000\370s;O\3649\275\362\337\235:\341j\025\275\353\275\376;\234\233\010<z\217\030;\241&\272<+C\340<\302\277\233=\217\233\016=\315x}\274\330\337@\275?\324\307\273\"T\256\274\255]\345<\377YK<\0264w=c\2615\273/+\375;\325}><\362\242[\274\304\233\002\275G\361K\275,\230\267<ai\022<Mi8\275\301\257\215<\twe\274i\303\207<d)\206\273\204\372Q\275\211\032)<I\332`\275\352\t\203\273\024\224\261;\332b\203\275\302RB<\205\210y\275\237\021%=\314\351\342\274\037\311\n\274\260P\273\274\036\ti<\247u\263<\344n\005\274\202\336`\273$V\347\274\237L\005\275\262\342\266;\361T-\274+\330\360\273Y^\301\273r\303\274\274\004]\013\275\010\223`;<k\000=\tU\014=\346O\t=\220\010\235<\300D\361\273\331\026\002\275:\300@\273\200m4\274u\353\307<>Q\255<D%\356;\027\207P\273C$\037\275\212\247\206<\0376\244\274\036\355>\275\370\023\036\272E\303\233\274\"\022\204\2736\303\300\274\366\241\226;\214\010W;\313=\335\274\255\265\336\273JbL\274\327Y0\274\360c_;$SK\274\340\301\251;\362\242\237;|\204\017\270\375\214\037\275k\013\347\274\"9t\274\243\033\332;\242y\006\273\211#\373\273\035\346W\274\225\236m;\360\355T=lHF;\243\323\367;\230.\3509\373\036\233\273D\216\303\272\373\232=\275\311P\307\274G\363\202\274\261\247\242<g\376\263<\033\352\273\274mj\016\274\nS\325\274\335M\355\274\036\n\006\275,\213\270\274%\251\357\273~o\026=\037\336\261\274S\020 \274\377\347\243;\3005\353<\217#\265;\014\330\273\2749\247\255\275\363g\253<\022\264\265\274&\304s\274*\376/\2756\207\256;\220\340\244\273\301\222\010\275\007\024\230\274\027\307\030=\205\253\342\274]\277\213\275\234\310\212;J\212\202<2\024g\274;b4\275\363:g<\013k/=*\'&<\356\023\257\274\310)D\274\242\215\023\274\306\230\306\274iq\227;\372\007\372<.S\271\273\220\234\240\274\357\312A\275\301t\033\275\220\010\001<\301\313\262\274CUy;\313^\336\274\354\235\224\273y\316Y\274\270\206\213\274\r\022#\275\316\221\252\273\022=X<\274\346Z\274T\323c\273\326\211\351;\301\272\357\274\352d\006;1\006\324\273\252\370\343\272?\226H<U\030\262<\375a\007\273\273\323\373:\0313\311;xe\265<\376,\273<\277\226\234<\351MO=\223fZ=w\220P\274l\261\205\274\240/\030\275\0142\024\274\305Y\203<\354Z\025\275\346X*\273F\263f\275\026uq\275/\267\361;\327\303O\274\335\020)\274qN\264;1sm\274r\2211=\363zD=9\304\352<\271\321\324<\206d\017;\373a\355;L\020\217<\231\371\320\274\004\000\006\274\'\206\354\273\260\301\252\271E\342\321<y\302)<\256\253\321<\214\267\024=\200n\013<\322\370\377\273\332\244\230<P\345r\274\232\006\323<\312^\243\274\207\027;\275E\371\n=AmD\275\201\017\204\274\207~O=W\235R\275\334\327\320;\226N\276<\234)\306;\242N\200\274\265\2665\275;f\244\27384\370<o\255\222\275\365\244\272\274+\036\307\274\023\0315<\210-\274<\006\276Y=\275m\264=\332\276\315\273\365\207\365\273\275u\243\274\264\001{\273\331\207\256=\017\300\337:4\345\031<\3326\360:$@\004<\030]\245<\317\244Z\275a\010\241\273HC|<;\356\251\274\341\320B=U\241\r\274\316\210\251;\030K\323\273\254m7<Q\267\347\274\333/=\274r\2179=:d\034\275\237\353\250<\177\2262:\201\037P=\337\372\214\273x2\223\273\374=\375\273\007\214\002<\t\252v\275\356\340\013\275\376*g\273\355\377C\274,oQ=\3026\212<4\216\217;q\352\207\273n`\017\274\312\252\305;F\266\215\275\003\261\223\274\241\344\316<\'\342\356\273\305\002\014\275\027\225w;w\304\270\274\304\276\004\274\301\372?=\357\2172\275\031\032\207=\204\336:\270\2676\367:s:\376;\013\254\315<\t\365\016\275\350\260P<{M\256\273E\352_\274DJ\254<\317\306Z\275\365\006\264;\024\365\225<\272\020-\272\377m\256;\227\317\327\2748\311h\274\264\360^\2743x\203\273\177\301\340;\321\312\013\275Y\2634\275\253\t\021\275C\3250\274P\253\001\272\"\373\363\272\246\257d\274\330\247\211<^\025\231:\3035\002\275\370\273\262<\234\224)\275\277_\356<\244=\322\274\207\306\r\275Ti\262\274`\234\211<H\274K\273\300\005\370<\225\305\337;u\351\206\274\0036\232;\020u\375;\344\264L<\255q\014=\334\236C\274\365lT\274C\2342\275\366\350\200\274[\273\253;6\245\353<\\)\267\274A}\226\274U\177\010\274[\237N\274\305\330u<{\233<<\022<\350:\336e\205\275\373c\376\272\261\202\014\275\217\3349\274\330X\036<\204\367\264<(t\351\274\322\276\310\2748/\014\274\240\333\203;\336\222\323<\304\277 =\0041\223;,\177F\275\275\204\t\274<r@=\235\326U=\311s\203\2741\0315\274\216n\331\273\214j\032\274w\374\254\274\211,\276\274\353\014f<o8\265<\030\327\211<?\306\020=\243\177E=\2525B<\357(\204\274\243\246\305\274\242&\212<\226\016\361\273\331\316\007=\306\330\263\272\273\245\354\273\301G\312<\306\351\241:\'\233=\272\373\020\204<\377#7\274\330\345\005<\273\024\030<\302\225\245\274\325o\361<\0104<\275\031\035\217\275\353\272\003\275j.\260\274\302\014\230<\257\344><f\333\351<|V\202\272\2112\016\275l@\025<F\220h<\035<\037\273\345\021\216<wm\030\274\207\026\343\274\204\3767\274\317\032D\274\377\321\356<\226|\215:\241\005e;\361\351\234\274\241\361\327\272\302\200\220;\326q\036;\304\212\211;\214t`\274\260\236(\274\332\241\215\274\351+\375;\257\356\355<~t\343\273t\375\003\274\\8\214<\234\003l\27445\306\274\320\240%=j\034\220<\274\024H:jB_\271y\332\223\274\371\277\227<p\232\022<lL\345\274\"aP\275@\256X<J@\272:s$V=r!\366;m\276\222\274.\016I;\206b\214<\322\302A:\n4\210;\017\362\351;\341\002\343\274\023\244\272\274\360S\311<\307\224\360<e\203\025=sNL\275=jn=\027\r/\274R\010\376\273.\245\325<U\n\345\274?\352\327;\323\215\213<\367\315\334:\345y8\274\306\213F\275\010\224\031<\2235\265<\257h1<\332XF=t0\004\275\351\253I<#\236e=\274!\262<\277\204\302\274\240j-\274&wW;w}\236<\213\343\010\274\333\334A\275\270\334\010\275@\224\236<\201\251^\274\027t\236<\025\371,=l[K\274\263b\277;\317\272\304\273E\000I<e}\271\273\224\366\251<<z\022</Z\212\274w\201W<\005\010\266;\024y>\274\224\036\303\273J\367;\275\200\323z<\022\335\032<\022f\000=\367En\274F\275\001\275?L\r\274~`\221=\347\300~\273\363E\234<\253\265\030=\246\021\210\2748Mb\275\270\340\234<\233\215d\273\324\006\214<T\274(=7\367\266;\314J#\275X\2527\274\023\034\353\274\023U\352<\004\300\t\275\333\245\220<\n\217\016\275\366\321N<a\252\357<}u\244\274\351\244\376<\277!\213;\"\013\372:\371\353\207<\001]\224;>\374\240\273C\204x\274\233\231&\275\010([=>\304-\274\356\2142\275\257\374\264\274\363\320\302<\017r\351\274\322\356=\274O7V\2731\262\024:\030\241\2759\251\262\236\273h\347+\275\230\325:\275V\373\r\274\260N$=\003\377Z\274\311i\027\274\212\360\274<v\307\021:\2223\026=\023QD\275\373\014\r\275\\oS\274`$\301<\205\203\222<\214\334\t\275\003\273=<0<<<\211\232\240<\217\025!<f\364\006=\010\365\332<\244\026\025<\345\262\006\274\273kF\275k\271\364<\262~\276\274\026\335\214\274\027e\203<N\3670<\200hk6U\001\252<\213\376\224\273\024;\334\273\273rt\274*\033\256<\274\253=\275\360\326)\274\221\212\002<\272\303\336\274dc\222:\240\250p\275]\337\014={J\244<\336\350\314\274N\024\233;\207\237\315\274=\301f\275\241h\242<\315\240\244\274\373\361\222<\2672\322\274\016\314\225<\222\366\334<|\207U\275q\025B=\236\006\225<\257\227\354<\000\277\215\274\343\344B\274}\2570\275\234\004\246\274X\333g\275\'\222\306\274\222\300\214\273S\326\357<\235\213\026<=(\245\272\020\347\021\274\214I\211</Z\330\274Z\242\300\274\255E\357;9\317\203=\005\263\255<\351DQ=\320\256\373<\320\223t\275\246y.\274\230x\027\275\376\027\220\274\316H\014<\274,\234;\027\271_=d\344|<L\247\277\273Q\027\341\273\003\0335\274\025=\330<\244\326\260\274xQ$\273\242\014o<\304){\274\026\275\235<\244\362j\275\306\033o;\230\026\217\273\013\265\235\2753\207\r\275\255r\024<RR\277\272WnR=\200\276\343\271j\243}\275\034\021\331\274z\014+\275\3137W\274\330X\225\274\255\252\020\273E\027\031;\272\205\230==;\017\273\270{\245\274\032\r\322\273X\030=<\240<(\274\376\350-=s=\277\274}\025J;\034\320+\275D\364\276;\022C\326\274J\370\214\274 \327\020\274l\026\244</sN\274J\3322\275\000\264\310\273J\260\010\275&\005\016=B`\333\273\202;.\274\247F\312\2741\246\304;\242C-<\'S\010\275\341\227x\273|nQ\275$v2=\223\320C<\355\231\351;G\271&=\'\316\034\2755G\005=\373\311\301\275\\\231\331\274\250t;\275\342\306)\275\242\250\262\274\335\001\177:tT,\275x\314\270\273\026 Z= \222\254\274\274\304\340<T\357\014\275$\346\310\273\375\352\244\274\027d\277<\277\357?\275\322\232\260\273BG\022=dx$\275\346\2620\275\330\0311=H\n\226\272\356\274\031\275\007\235\n>R\010\036\2741.\036<\247z\005<\217M8\274\334\242\247\274>1\002\2757l\211\274\257\363\r<\030\313p=\3078\306<\253!\177\275x\366\033\275\235\231A=m\033E\274\334\316\027=8\272\222\274c\226\333<\235m\366\274\237\324K<l\354\340\274\030E`\275\366}\025<\030\374H<\240\n#=q\311\224\274\212\207\307<V\263\310\274\360\321\023\274\367\024\325\274\371\210+=\365q/=\316\226\313\2749\220W\275o\3371\273\020k\362<\256\302L;\262\327\r;\206\367?=\231\304\365\274\241\265\032\274\220\226\376\274\322X\013\275\370f\200<%\373]\274\320\255\032\275\266\306\215\274\240\247\226\274H\245&\274\350\r\356\274\360\300\340:t2p<\363\027X\275?\374\3559f\300\022\275|\312\037\275g\274y\274c\303\354\271HTF=\316\323-=+\361X=\326\372s\275\234\200A=\351|9=\377Z\016\275Y\3220=\371\331e<\t\005\001=\365\346\353<\177\227\033<\232>X\273\335\370\351\274\226$\251\274\325\344M\275\016\3575<\341\331\225<<\346o;\263IG<\206s\213=[@\376;\022\032N\274o_\332<\374\026\235;\270\342D\275\377Y|<G\246\305\273M\372\244\274I-r\275\355\031\365;\257\215\202\274\204\213\206:1\203\267;\262\016\256<P\332#\274\177\315s\274\212.;;?0\267\274\243\227\006\274\3344\232<:\251\245<\313\257\375;\026\\\370\274j\332\276<\223\030\221\274\261\343\000\2757jW\275;U\317<\006!5\274\321$a\275\214\001|\274<\261\014\274\034f\255\274,\021f\274q\377\251<\333\224O\274\324F1;\254e\204\274\005h\027\274\207\320\353\274\346hs\274\237\273\373\273k\244\207<\016\222\001\273t`[=\005&\300;\'O\236<%\322_\274\240\356 =\224\3117\2753\353\206<\217y{<\274\247\202=\337#\254<\371\363\212=\032L\021\275\336\224\227<\371\323\324\273\032Y\007\275,\020@<OF\000=\310\377\313\273\334\010\010=\334\r@\272\340h\354;\327Ej<\231\241\325\273t\232\365;\353\270\202\274\237\017\001\275\372\236\226\274IH;=\321\207\016\274\225\260\017\275T6C\275\004\235 \275+x\023\275\206\225\365;\266a\266\274\247h\'\275\313\301V<\260EK=j\327\305\273 \001B\275T+7:s(\005\275]\370\255\273\215\n\257<c\020\232\274qj\306;L\t\232<)\201o\2749\215\001<\025\231|;>\235\324\274\253y\265<C\343\270\274\251dw\274\027\354\r;\201\251\202<&\326\243<\313I1<}\235u=>\027+<\005\027\330\274\356\261\006\275[\014\236;\264\267\236\275\n@\246\274#M\225:h2\026\275\014\274E\274\245\364\014\274\313\t\003\275\232\312L\275\014|\031=\337;\322\274t\365\364\274\004\360\213\274V\207\032\274\357\360\317<\007\037\340\273\312E\254\275K}\300\272p*\263<\210~\335<M\332\r<\312\333\271<\272\307\261\274\243\274\341;\247\'t\273C#\377<\266#\026\275\357st\274Tni=s\247\010\274\337\371\215=5/\327\274 PO\275\362\274\014=\215e$<\207\275!=\3101\354;\347aY=\233\032\327<\362\250\033\275\264\360\002\274\207\0174<6n\221\274t)\367\273z\205)\275Q\354\215;F,\000;\224\206H\274}\241\343\274\232\365\361\274\234lr\274\334\037\240;\300\002\271\274\227\302&=\304.\317;vU\246\274*\nu=\377\260d=(9t\273\245w\364\2747Dg\2740(\307<\276\'\006\275@\234\324\274\323\373\016\273\237\233\010\275\203\020\224=\342\000+=\022\206=\275W\331\354:\0051\027=\320\324\037\275RW1<\250\177;\275<Dm=\306r\253\274]\3165;\'\236\321;\226\373\306<BC,\273\330E\005\274\300#y9\0371\227<\3661\205=\220\356U=:\034\002\273\206mw\274\276hm<\007\323\017\275]Y\002\274g\314\001\2758\344\375<\225\252\236<@\014y\274\343\200\275<\207\n\037\275\177)/\2748\032\235\273d\363\307;-?2<\007\n\226<&\0253;h\020\227\274\365\2601=\350\316S<\307PZ\273\3374\320\274\001x\362;\2715\215\273\275\217\276<u\356b\274jh\270\274\326\000\314<3Yb\275\202\353]<;\220\332;\351\242l;\277t0\275.\332-<1-\327\273\017/\207<.\274s=\324\217o\273\332\317&=\316\323_\273\034\225_;\314h\177;M<\200\275\253\022\306;\252\254\352;n\375\035=\262\230\244\273\302$*=>\006\224\274\323\303\311<\003V@<\311y\211\275\n\203-<\305D\203\274\375\274\306\273\270.\311<\376\000\024\274\025)\210\275\260sX=\207.\010<E\311\204\273\346\005\256\273,\036\221\274\242\341\004\2758z\317<\322J\205<\262<s\274\371)$\275\256\215\231\274,\331\224\273@;3<\275$<\275d\030L\273\302z\255<\215\311\361;Z\3105<\022fs\275D6\266<\022\370\302\274>\334\367\273\336\310\337\273V\224\371\273u_\253:\220W\005\2742\375\246<4\036\220\273\370K\275\274\240\360M\2736\351c\275+\2541<q\203#=q\215\"=\223U.=\2363\354<\207\346\310<\375\367\010\273\257\357\025<B3\000\275\rt\373\273\3746\350\273q\215\'=\245\274m<\367\333\326\273\337\022\206=~\334\360\274\223W\014\275\354\320\343\274A\206\022<\237\002\035\275Q\007\301\272B}x\275\016L\236\274\376\213;;\305\2406=\246\372\324\274\031\205(\275-O\355<L\245\343\274y\356\t\274q\204O\274I\350\202;\237\270i;\372\251 \275\212\351\342\273>WM\274/\334\030<\022\332\321<\327\375K\274y{\215\274\233\014\221\273)p<\274\257\317|<\347\0348;\313FD<q\026\276<o\223\013< \313\271\274\n\255\232\273\355/\';\217\355\266<~\274\266\273[/T<\361\366\303:Nn\362<O\004\022<\323\334\342\273\364\210-<5\030\375\274\321,\204\273$\344x<\321\023g\274D-*;\321b\230\275\033[v\274\360\203\224;\022\327\004=\213\224\033\273\017\243\270\274\252\016\355;\007\010\237\273\0058\025\273\377d\354;+\256\300<\"\353\356=\321\365Y\2752\354\227\2743\373k=0\313\277<\2704\2128\375wB\274\001\346\352<\231-\227\271l`8\274\3004\017\275\026v\236\273\273(1\275\364Q\261<kAK<\037\350\003\275\331\360\240\274\264\354u\275tgD<\222\376):\204q6;\375\001\201<Q^\240<\243\023x<qF\"<\302\202q\274\177\224\216<\267;\266\274\000\246\036=-0\035\274t\t\322;s|\355\272|J\242\274\',\241:@\313j\2746\256\204\275\014{b\274+}\221\273\265\006\034\274\000\324\235\274/.\234\274D\'\350<>_\315\2745\347+\275\\\202\035\275\355\013\213\273\221\355\333\273\003\311\030\274\203\261\n\275\273\344-\2739;\021;,j\014\275\027!H\273\232\212\030\2751\217_;\300\3369<\316\311\310\273im\244;}P\223<\336\225m<\366\205\006\274\261\226g<\341\231n<]W\240\273\335\3362=l\277\014\275\316]\212<\003\003\303:\347\231\250\274\0263Z\274Z\261\372:\237\rm\274=s$;\022\270\316;\241\370\272\274\215\327\213\273B\250\327\273\246/\376;6|\233<\221x\032=L`P=i\371\323:\n\373\304\272)\024_\274\250\237\n\2747#\340\274\274\225\004\275\343\210\'\275\315P\233\274-\"*\274Y\362\013\275\202\322$\274\267l\003=\236\201\252\274\363\214a\272L\367\016<B1\024<u\347\335\272\373\331\361\274\335\367\257<c\373\031=.\301\350\274!w\264<\353\314\277;3\213\t<\337\276\256<R\005\222;XV[\274Tu8\273\320\367\206;$3f\274o3\033<T\037B<\253|\022\274\256\033\2338$]\032=\376\371\274<\271\037w\274\377A\246\274_\361\315\274\027)H<[\275`=y\017\226\274\263@f<_\221\003\274\267\220\203\274\005j\360<\347\244\244\273\217\216\006\2741\340\000=I\255\367\274t\373v<\357\200\242<-\212\231\274o\262\306<\215.\002\275+\337\223\272\232\r\345;\231@\n=X\226\032\273og\254\274\030\233\243<\370B\222<\227\241#\275\251w\311<\034\\\206\275U\246\354<;5\244<\303w\264<\246\252\242<\006\316\006<t\352+=\224p\220\275\330\350p\274\027\306\201\273(\347#=\341\3110\275\037\344\304<K2\215;\364\265\370\274\006^\013\275v\035\303\274N\310\006\275\\\237\226\274\354\205U\274\204\321\322\274\\\332\013\275K\244\364\274\220\002\007\275\243\252K\274b\216\177\274r\rE\272o\306\254\272P\301\030=\243\203\337:Hte<\007\361i\274\305\264]=\275\'\223<\305\304\376\274SI\357<f\272\270;\031\036\254\273D\300\331:\267\256\207\274\0060\271<\303\357\222<$\273\342\274\264\361\276\274\037\341\214<\263\255\314\274\001\214~\273\220Ch\274\n\215\347\274-\366\006\274`2\224=\\\247G=\362m\223\274@\325\253<\307R\030=\227\000\017<\020\250\201\275\254\204\361\274\250\005V\274\217\331\255\273<\020\033=\005>\024\275\324\007\253\2751\203\275<$\341\331<\305\210\236=\314pj=\177\374\255;\374\212\342\274f<7<\350\207{\274\274\014\260\272b\031\226<AP\277;\326!\250\274\370\260.=c\250\374\273\262\0356\274\247\310\201\274\027\265\021<\323GN=\202\3361<p\277\027=\272\340\323\273O\tL\274=\360\211\275\2451\013\275pV\363\274\205\377\326<\360\352L<\235\036g\274\373\374z<\376\226\206<-6\014=M\377\201\274\322\264\253;S\2364=\026\247\316<J\022\361\274!%\035\275\031(.=9\032\001\274\016O6\273\217\375\270\274\017\031P\275l\352\375<\350Z<<\'\004\241\2744He\275\212\203\177\274:T\342\273\320\346#\274\311\032\332\273\324\262\023\274L\375\370\274E\361\200\273\030e\232\274p\342\270\274+\251\240\273hRu<\022\227\255<\355\0308\275y9\'=\025\244\323\274=\214\352<\242\023\337<\215R\327<\272!\350\274\270\260\240<\020d\207<\016t\306;oq\206\275h\300\367\273\275\037\276<\326\215\311;\364B\032\273\343\344#<\310k\326\273\204*\331<\341\205\200<\006j9=o\326\025<\254:\371\273\377\270\324\274\024\037\363<\237\177\200\274\303\035\315<v#6\274r\t+\274\322\220-;HH\275<\010\271\n=0\257\236\274\225\201B\275\375\005*<\324h\310\272ed\222<\232\337w<\'X\343\274\\f>\275\365pW<\265\353\204<\266\031\260<\220\250\020<7 \264=\273+\243:\336D\211\274\033\021\035=\'K\275<\337\373\231;\237\376\032\274b0\3739\365\346\350<\250\260\342\274\030s$<\363\353\211<OV\247<\331T\262\274(\241\363;X)\232\274!\365\306\274<b\'\275\207H\354;\313yM9\001\331\215=\277T\216<\\\017\306<$\225\202<zr\274<i\235\235=\303\003\315;\0052\314<\303\252>\274\306\372\337<\216{\002\275*w)\275\347\252\\\274X(\237\272\370\321\315<\202]3\272R\263\203\274Z!\346<~$\220\274\026\274z\274&\200~\274\235\2635\274\3437\256<\222\211?\275\254\210\215\274n\370\034\274$\031\013=\333\335\255<+<\307<py)\275j\002\361;\013A;\274\tU\000\275J\205\t=\336\202C\275\251h\220\274\306p\033=\252\253\201<\375\031\000\275\026n\204<a\250|\274q\227\2209g\354\373<H\014\340<<N\\<\241\n\177\274\254\224@=\351\346\312<J\343D:{\272\216\275\177\034\032=W\2519\275\022n\202<\024\360\321\274\032\206\221\274\243\210\227<\266\267%\274\035\305\033<\315\021\331\273\016^\003=O\230\207:\305p\226<Vz\022=\372\220\323\273\347s\310\274\223b\001<9\3076;\346\243\257<\367\\\361\274\377R.=\267\225L=y\300\267<\270H\311\274\3756\t=b\210\262\274\213\274\240\274kg\003\274\257e\016=\365\n\033\275B\264\262;S\234\303\274\230H\2149\227\361\016\275\206k\213<\362\241E\2746\253\374\274\212\010\351;}\320\247;\277`\025=\355\315D\2727kb\274L\337e<\225N\213;\334|M\273\354)!=F>\372<\343\003\233;\262!`;Z\333>\274\372\351\024\274\034\302F=\220\310\177\274\036\035\205<\025\217G<\317>P;8<\325<\221\315\332;;\353\215\273y$\255=6\274\335<\272\227f\273Pf\323\2742\333\362\272\327\317\265\274\357\000\006\274\213b\267<\213\361\212;\241\363c\274\306\031-<X\336_\273&\203\375<n\304\273\274\212\361+<\203VU\274\2118\211\273v\262\223\274\335\277\377\274\036\246\277:<_\244\274\362\253\304\273S\224-;\240\216\007\274\200\212i\274\364\332\262\275g\0018<\004\227\254<P$\327\274\014\264\243\274i\017\320<:\332\347<\010<\006\275\203\355\260;\033uI\275\375\220\'=\324\200\202\274Lz\326\274\357_\346;\373\372\253\273\023\351w<\302o\242<\301\323\037=\013\034\336<F\203\212\274\t\214M<\025w6<c\260K=\036e\234<\365\"\207;\242\311\257<\276J\210<\007\227\325\274z\005\225\273\307\201\025\275\2465\200\274Xl\035=\245\272\326\273\3076\326;\034E\252\274~\3724\275\216=?\274\221=\255\274\305\023G=\326\306v;N\206$<\300\2109\273\023\037\345\274\240j\262=j\305\364<\311\340\252<d\r\220\274\203\274O\275\260\330\223<g\233(<=\363g\273\256)\023\272\314Mk;2\257\376\274\237\255l:\245\331\007\274p\301\n\275\226e\272<y\023\301\271\360/\216\273\204&\247\2746\n\003=b\325\320\272Bx\276<qk\373<(\300\025\275~G\215\274\n\254\033\275\003,\242;\237\032\250\274\226\352q\274\337\346\271\274\355U\223\275+\346)=\200f\223<\242\252\025<\217v\362\274\200b\350;D\257x\275<\355\363<\320l\000\275\023\220I\275\271&\031\274\367\204\365<\276\341\212:\177\273\234<\232\316)=b\260\342\274\351\331\363<\370\n\020\275DX\357<\'\320\326\272\275\020$\274\360\305\016;:4\317\274\275\302\304\273z\211s; \231\233\273\342\266\261<,%\021</\302\035<e\020L<\244\301\331<\221\254Q\275A\026\313\273O\202\373<*u\332<]}\255<\241,\271<\277\242J\275\260\"==\266@X\275\2769\227\274\013\330\247\274Q\204Q=\002\316F;\r\\\357<\320Q$<\232;c<;\2763\272\247_A<W\353\r=\255\334\314\274\337\243\266<\374\354\025\275h\210?\274JV\033<N{\215\273\352\035z\275q\215\363\274]\036X=\346zw;t~\310<8\034e\273xZ\275<\341cU<\253T:\274\037\350\020\275S\016\326<\331\335\223\273\024\016\203\274\306\240\332\273\373\300\225<\t\272M<\241\200\320<\342F\271<\372\002\335<q\364u\274iTq\274\276\317\345:\262\035R=\267\345\265\274\027\377K\274\341#3;\337\362\337;\334\032h\274\301u\245\272\332A\274;\177\346\327<C\330\315;\200\357\236\274\217\363\303\273H\270\273\273u{v<\332\212\017<\325\257C<P\311\032\273\376fY<\301)\231\274>\225_<\362\357\235\274\000\213\203\274\367q\221<\200p\033\274k\305\243\2738\t\205\273(.\201\274\032\rm<\240Xi<\366\274\263\273_\032X;\'G\314\274\341;\006=<\232{<h\205c\274\275\315\017\274N\3125;s\035\027=P\334;\275\340\275c\273m\035\303\273\377\035\013\275,|\213<\205\226h=\003j2\2744\013\211<\214\005\226\273i\350\316\274\260X\246<v\317\000<}\325\277<1*M\275\320I\010\274\220NO\274\354\023\352\274kG\224<\003\220\360<U\240\001=}\206\326;\374\233\001\274U\301\200\274#\347m\274\210\362I<\215\347\200\275\233\242\231\2747\333C\275\266\313\251<\253_\"\275\336\024\340;,\353\225\274\240%\277<r)%=\227\374\333;\304\342\323\273n\031\264<\217I =H\333\277\274iI`<\317\017\324\274\311\232\267;\225x\344:>\035M;(\273 \274q\025\356\274R\367x\273\266\016=<\271\017\001\275S.\240\274J\3454\273\366\326\310=\341?\021\275\241\030\353\274\212\\\030=\022\204\243=\004RO\2749Kd;Y\017\014=\341\017\036\274\325\325\275\273\234\033\215\275\031Cb<\206\210\253\273\364\231\343;?\270\214<]\366\337<|\221\365\273.45=JP\372\274\226<\372\274!\226\235<\236\376\200\274\345\324\225<.\324\260\273\301%0=\234UY\274\013W\213\274\350\037\245\272\360j\211<\003[\232\274\253\207\322<\037\347\026\274\262\232\034\274w\034\"\273\250O\231\274\344\030!=\314\330\336\272\261\")<\237,\270;\256`\363\273\327x\n\275\266\220\305\275rQe\274\363\255\231;\027(\316\274\371\332\221<\001\233B<\006\2628\274\340\3121<_f\216<\367q\317<\030\241\241\273AY\315<\212\177\354\273\252\217P\273\367\343\016=R\024\017\275\212\211e=\317\210\217<\246\276\3359,Y[<\034qH=\034\331\324\274\272e\352\272k\303\307<\366\200`\273\275N\030;\226\253\304\274\234\246\346\274A7\240<\00430\274\3063E\2752\256\026<\204t\003<\365\340\341:\340w5<GF\233;\204u\021\275\306\263\030\274(\034\214<\217C\322\274\256H\030\275\275\023\227\275\373\200\212=\370\270\020<\263D\000<O\242\257<\205\026\211<`\324\r\275#\323[\274\n!\272<K,\333\272\221d\251<?\200\241\273\372\245\325:N\320y<\301\336\221\274n}i=~\375\221\275\037\205\367\274\356\030\3109\270\035$\275\r\235\177\275\226\031\261;\254?Q\274\315j\336<\032\033;=!\312\274\274\002k?\275\331\353\327\274\202\004n\274^\251\344;\342U!=\247\267\327<\177\241i;\203\366\006\275\373lz<\020)\211:\232w\301;</\r\275\350e\241=\245&\014<&B\206:T=Y<\227\313\020=\216)=\275\3643\031\275\241\005\024\274R\206\207<\2454\203\274\343\343\255<\316\212z\274B\014y=\374\200!\273\223\227\307<@\221L\274\006\342\030=\003:<\275\347].\274\334L~\274i\340\214\274\250b\325;\257%\264<\300CP\275\254\223\206;7\003x\273\322\231\351=nO}=\215\314\344<A\245\021=\337\347\n\275\265\036\333<{o\243\274\327\310\365\271\357\360\300\273d\346\006<\205\036\245<\232\353\032=cp\252\274\262\330\317<\255\346\217:K\201\307<\000\276\\:F\001\n\275\343V6;k\251z=W\322!=&\317\204\273sZ\375\274&\375\242<Re\344\274\263\354\253;\ne\272\2721\023^=)\251o<\307q\227\274\321\204\234<\021\006\021\274\024/o<f\201J\274\375\363R;KC\321<\005\213\030=:/\277<|\225\312=x\264\025\274\333\230\242\274\021,(=\352\324\245\274\035N,\274\362z)=\326\257\200\274F@S=\302\200z\274U\177\200;.t\257\274\305:\355;\324x)=0}\273<i[T<R\031\301\274G\367\363;\337\305\242<\024\215*;\363\225)=\031\272\207=\374.\333\274\235u\266\274\213\277\332\274\014\301\357\273\221u\231\274\343\362\244\274\rQ\204\271\237\234.\275\021\250\367;\'\027Y\275\275\"b=\007\276\375<\001B\272;n3\013\272\'V&\275?\251(\274&\223\270=\365\002\003\275\320pE:\234\255\373\274 \345\225\275=\"T=\232\337,\275=\322a\274\333WW\275\3256a\274\005m\246<\355\365*\275e\340A<\233o?\274\375x\322\274\026\233\242;\\!\036=\342>\013=\274d\016<\203\036\257<fj\254<\203\213\013\275\301\230\347<l\222\212<\274*\014\275\311X\024\275\\P\275\275d\362.;\326`\t;:\261\263<\014:\216\275]\033\331\274\222%\212\275\265\315\036<sY\026<\316-\354;\336\316w\273\267\022\010\274\346\240(\275\262\310\253<\025\361\215\273~\017%\275\237\300\027\273\007\356\341<\220\341F\273\204\324\270\274\213\031i\275\034 n\275\022(s\275\373\333C\275\221\333p<\332t\022\274h\274\224\273@\302\327\2745\371\020\275%F\202<\242\340\260<\2129\007=\241\027a\274\027!\n\275\303\226\014<[FB\275\232n\003<\007\302\272\272\201\202\245\274O\302\327\274$t6<\275\\\227\273\034pf=\221\035\341\274\033\332\206<\320q\355\274\326\256\314\274\010x\023=b\242!<9\206\222\274\252\037\351\274>( \275\332\314\346\274\275[\335\274h\r9\273\276\236\316\274r!)\275\265=\373\274\177\227m;\013\204U\273P_\223<\t\2679<u\225\336\274\241\354B<6\020\371\274\266)$\275&nK<\320\237\322\273\3576\347\273l8\244\274\243\346F<\371\ns\273\361#\234\273\004\r\212\274\000\317\230<\227<C=\215\223\345\273K\253\357;\342\221\021<\364EJ\274r\213\215<\311\033\243;\216\351\320\274-wG<RI\25379\020\233<D\320s<lm\310\274\227v8\274\027\342\207\275\253\014\376<c\"\026<\225Z\333\274\013\252\202<\363\277\324\274\020\2119\274\234>l\274\264?C;>{\037\273q\314\343\274\204\354\207;p\331\031\275\274\256\351\274\320\367\027=\325u@\275\022\336\023\275c\273\001<\003?\204\275\2142\236<w\342\026;\016\346\200<[\274k\275\351\306\307<\313\316\302<\272Z\227;Pl5\274]A\214<\326(\252\274\313Q\200\272\020\376\033=\000\006\002=Mt\204=\022\357\035\275E\257\016=\036!\367\274cO9\274&\246C<P\273\301\274C\356#\274\274\305\202\274[C\343\274\240W\255\274\005\274\016\274@j_\272\323}\252=Q\006`\274\024 j\2749\022\266\273\034\352\242<\333\341\313\274\246\343V\274/\212J<7[v\2743r\236\274\223\337\260\273ntb\2747;A\275q-z\273\237\327\244;\032\003\321<tBO<\210#\267=\220E\005\275d\357_\274\250\212\216\274\004\324\313\274\\\271_\274\251\024\210\275o\335{<\340\271\335\274\315OA\273\223;o\274\342]W\275\235\257\252:\205\037}<\373\325\347\274\271\014\361\274\334\252\356\274\227s\203<#\236\342\2745k8\274\025\271\362<\323\243\376;\334j\326<EL>;\357\375x\275\014|Y<\233\245\325:\030\241\303<\261HZ\275\333w\221;\310\361\341;\t\324\345;\300\350K;>14<\236=j\274\010%\004\274\202b\004<\272}\014;\312\233\355\273\206\372\201\274\347K\207<`J\266=;\247\022=\216\265\217\274\303\341\241\273L\344\350<\335\350j<C\371\361\274?\r\010\273\220\000\223\274\227\207\204<\377\356\272\274h%\003\275\267\007(=:\210{\273#h\216\275\267\025\2439\241\373\246<\203I\017\273rtq=\332\204\260\272\316Ix\275=n\t\274\033\251R\274\210\332?\275\312\256\307<\n\001\205\274\277\362\342:\030\304\027\273\250F%;\333\261:<\317O\222\274\257\312\026\273\212\001]:\2364\221<@\332_\2742\266\212\272x`7=\234\265\204<\021\231\265;\360-2\272z5\263;r\"\232\274\204\354\026\274\207\357S=`\t\224;>\0044\275=#\370\272\017\213\263<\305-\200<\306}\004\275\351,\000=\310\002\220\274\222\240\201<\372\224\023\275x\030\337\273aw\374\274u:\245<u#7=c\357F<\026Q}\275\320\370\204\274G\324\001\274\346#]\274S\246\026\274\007vZ\274\256\376G<\307\317W<G\021\362\274\3271\320;\327Q\032\273\206\336K\275\r\013\273\273\3319(\275\002V\265\273\034\361\324<\300J$=\376 \324:7\325\243<e\032\347:\334\300\346\274=\031\001\274d,\356<\336\210\300\274jB\227\274\017\222\230\274\337\021\204\274\301`.\274\326\301\346;\375\353\200<d\333\004=x\330\211<\237cK<\233\n)\275$\207<=<O\316\274\355\005`\2757\234\343<\371\006\212;`\016\005=\025\206\204=b\250\216<Y\006\341<\014$\311\274\261O\251\274\304\013\255\274\250\261\227\274\332\214K=R\210P=\2346\312\273\251W\301\273\004?\221\274\216\343\016<\367\250$\275\223\357$\275\037.\032=\312\345\014\274mR5<\235w\250\274@m\036\275\310!\002\275\336\242N</\227]\274\034\234{\274\360.D=\203\202\020\273\177\242\354;\'\200\267\274\001\237Y=\253\237=<\026H\207<T\240\234<\320=\036:a\330p\274\257\253\310<\244\264\315;\327]\217\274,t\235\274\006\264\274;\347\306\237\274\030\034\r\275q\320\350\274J\273V<\206\320\006\273Z\224 ;l\243\212\2741a\231;^\205\313\274^\232\004\274\210\014\247<\205x\034=\243Im\274\350F\352\274q<\301\274\216a:\273\225\343\200\273\263\320:\274\337\233\034\275{\t~\275\256\304\307\274\374\2467=\232\000\021\274\351\336J<\274\013\306:\215\343\307:n\346i\274\010Km<.\000\271\273\242\037v\274G\315\201\274\024D\255\274\365k\206\274@\240\013=\035g\030=\372\366\026\275\2267\267\271S#\222\275\320rj\274 \206\370<\272\243\014\274\020&\220\274_\022\260<`\307\350\274\030\322G\274\033\241\002\273S\261\266\2735\246\215=eF\267;)\300\257<nCY\273r\321\332\274\242b\013=B|%\2750H\023\275\023>\365\273\026jQ\274\263\307+=*4\251\273x6\n\274\034G\256\274\262\'\032=\305\2633\275fJ\243\274\342q\230;\253\373\305<\333\272)\274J\365\346\274\300\r\246\275\276\034\262\274\365\026\215\273\365\200\275\273w\010\245<\255\026\006\274\306\217\376<L\254\246<\252\2172<\222b\030\274\324\345\304<\314\336\001\273\356\020Y=\376a\322\273\2666P\274\215\n\010\275\377\342c\274\031\321j\273Z/\327\274U\013\004\273VB\377\274\211\352==k,\003\273\234\330\r\275B\370\374\271\205s\353\273\253N\371\272y\252\003\275\'\250x<\260\315\314<\203\0251\274Z\361s<\314I&=\220m{\274\010\t\350\274T\216\346<s\216\265\274m\334\210\274\231\342\037\275\321\205R\275\327Y!=d\313#\274\244\230\276\274_\326\207;<\333\307\274\332\034\026=\220w\205=/\325\230\274\231\336~<\030\242\363\274\255\264\013\274\346f%\275\263\275\364\274G\017\204=\2349\361<5\300\031<c\253;=\2345\245==\257%;\217(\016\275\243o\203\275q\2142\2755\223\007>\336H&;\000\266\014\274\002\037\272\274\360\225\020<k\310\001\275\305\325\225<9\350\210\273\276}x<Z\340t=9\035\333\273\314\035-\275X\314|;.\024\274<W\037\317\274g\244l<t\240\034\2750\262\003\273j\225\007=yk\276<\374%P\275\270h\315\274\224y\300<k\214Y\274x\021\275<\220d\003=J\273\277\273\0208[\274S\2275=\344\025\010\274T99\274\214~\036=\324\031*=\336\335D\274b;\354<\002\206\211<!T\n=\025$+={\006\377\274B\024\323\274<XX\274F\035k\274\036\207\306<\235(\341\274#co;)W\033=9\002u\274\037=*\274\013\333\257<)\020\352\274\2635\341;\310!<\275\364\301 \274\254\026\320;\222g\032\273QH%<I\272A=\240)u<\335S\305\274\221\372@=\004\235#;B\\\036\273\351\367\341\273\311J\317<\363\2466<S*\232:R\252\372\274\203Q\000=\3531K\274\226\364\215;\027\3233\274\331\322\252;[\274&=^\220`=\377\367!<Z\346U=+\001&;\257\3449\275\236!\013=T\0223=\272.\377\273\271\377\342\273G3?\273\306,\314<\2607\243\274\324\013\032\2758\007,\274\231I\361<\200_.<\277)\377\274y#\334;\177\332\033=fa\326\274\256Yw<)\350\025\274\315e\025<k\353q\274\375\374/=\252\263E\275\335$ \275[D#\274\2208\320\274\3756\373\274e\313\361\273SU7\274UZ\336\273\357\264l;V\372E;S\275\034\275T\022\020=\0251\322<k\315\022\273\247C3\274\276\341\n=\006\275\372\274\302\253r\272\204\244\023<\220\315\370:\211a\300;\016\226\216\2745T0\274\361\260\301\273\2378\323;l\242\222;6\215_\274\235s\022\275\273\314v\274\313\254\246<6\223\305<\224\245O\273&\236\354\274G\357*\275:\030%<\003\036\267\274\001q\250<\304#\313\273\037N#\274#\2401<\365\352\036\274\312`n\274\022wp<T\243\365<\312\370\311;\213.\324\273r\034\343\273\235\350o=]\016=<f\275^\274(6\301;C\301\273\272\265\373p<4\222\325\274\025\017\361\273\216\310|\275g_\010\274\273\212\236\274\256\376n\275)O~\2744w\001\275,\005\215\274+\363\232\274\364m\367\274\350\263\t\273\224\365f<}\257\324<\331[\r\273^\"\270<=\211\265\274Y\317|\274\302\010\205\274J<\300\2749\336\032=A\323\032\274\375-\270\273t`\':\371\237<\275\367Y\010\275\330\367\037<\016\325\300\274\305g\000\275 _E\273\230_\234\273\250`\350;22\032<\202f\307\274\375\274R<\312\2444\275\357\265\341\274\365\377:\275\255lt\274\313\340\021=\021W_9\366!\233\273\351\205\006\275\336|\236<\020\311\217\274\301\2750;\331\273\314\274\315\263\000=\324\3152\273=\221\005=/n\n=\210\210_\274\365\320,<$%\004;\022l\007\275\366-m= \\\346\274\327\350\244<\2663\327<!\226\264\274AW\302\272x.c\273T!\313<\267!J=u\032\343;\020\323\324<\"\370\222\274\243V\356;\200KO=\020\210\225\273\261\023\022=\314\342\020\275\366\206\307\274h\223\211\274\254\016\264<\341n\201\274gC2\275\212)\t\274t\257\315;d1\244<\300\217$\275\002}\024\275\251\331$\275\301]\322\274*\347 <]\371\367<\234\0328<\364\025\246\274F\254\221\272a\371\357\274\036\312\356<WH\034<\351\360\330<\026\326\313:\200P\270\273\3605\004\274YU\212\273J\211\267\275\346\245\335\274\000\244\216\275\030\371\"\275K\037\237\273Q~\271;\332ZX\274\366\353\030\274qQW\275\350\306\243\273\373\263]\275a\335*<\rO\034\275\341\205\277\272s\024\223\273X\330\227:\233L\024=\346\240\014=\226\311\377<7J\271;\324\nJ=@\004\347\273D3\206\274>\244\005=\366\324u<\326G`=Q\324\";\322\373\201\275\326>\245\274\237H\026\274\364\221!\274l\341\360\274\327\036-;)M4\274\205\273\261\274\275\202\243<A\244}<k\355\363<\246c\277<\373|\306;\257p\205\274\355e\242<\304$\330<\005\264 <\377@\324\274\223\241>\275\354\214\277<\310\030\335;~TC\274\270\007\010=\013\365o<\262\331x<\330\025\270\274\353\257\354<?\201\016\275|\252\0108\t\n\021=B\352\320\273\325\014\261<5\360\330\274\332\303<\274\277(\333\274\220\220\004\275$\t\340;i6}<\3768\356\273\267\277\t<\225\201\324\273G\246\025\275R]\243\274\017\217\205\274.\210\022\275\354S\231\274u]n<\005\237\362\273\302\252\317:+\263\002\275\200\360<\274\201\252\320<\232\353k<\343\307m\2754\244\235<\221\344\320\274\224y\226<\210\325d;}\332\032={\224\'\275\232*\267<\000>,\273&\366\212=(U\025<\377\002b=\321\365/\274c\365_<\215\365x\274\2610\366\274\234F\231;\252\001\034=\260\276\257<\366\322\201=\331\277\367<D)\255<p\020(9=d\211\273\244\034\2349\323\2432<\331\326\002=\nI\221\274\364\003\306<\213kz\274\264\033U\273K\367W=\270te\274\367#\327\274\350M}<+5\372\273:\272\016\275\031mA=/\376\250<\371\372Q\274\365pl\275\233oM\274\013\320\323<\221\346\212<\216\306\227<x\316C\274\233\344\241;\246\206);\226\007Q\274\256\223\005\275%b\003;\223\317\257\274\002\345\344\274J\027\364\274mS;\274\271\003\233\275:.\004\275\302\204\254<\354\325Z\2731\303\003=\277\365#\274\267\177s\273n x<\272\371\267;\207\244\203\274\240\371d<(\321\245\273\240\272\330\274\211\302\251\274\375U\371<B\352*\275\332g\035\275s\315\013=\330\314?\275\216v\"\274\"\340\033<\034\177\035=\005d^<\032=\217< w\327<1x\306\272\245.Q<\200\256\267\274{\317\232\273\3472%\274\360g\032\275\322\361\016<\332\'w<!u\206<\301gT=\235\373\202=A\202$=%\020\276;\270\242\364<{D\300\274\311\303 \274<\263H\274\t\340\n<UOh=-\327\242\273j\032E<\220\344F\275\361\350\000\274>*b;)\343\321<j\351+\275U\322\242<\246\262(\275\036\220/<\207\201P\274@J\302\273\376\363\223;\340(\245\275\310f\330\274\245 x;\202\351\017<\340x~=\233\203\313\273?\027\312\274\331\305\210=r#\203=K\327\201<\251\322T<\021\213\020\275S\023\241<\362\000&\275<\376\314\2734\362\025\274\242\262\312<\005\317(=7!\351\273\321]D\274)\326>\273\237w%=\230.w\275?\2711\274qf\335\274I\265><\270P\225\273\231\236W\274\n<\t\274\023+\200\275{{\301<\352\177\275<\363yq\272\362w\271\274\356\221\255\273\000S\334<\262\257\231\274\330t\263\274\226\003\277<V\r%\274RWi;\262+&\275EV\353<\246\343M=d\275\307<\237Z\026=Z{\205\275\271\237\207\272\357\306\367\273\271\276\303<iz\224<|\025\237;\002Co\274\330\027\201\267\375+\204\273\2522\021<\024\023:\275\343\242!;\345\016\n\275>\007\343\274\241\370\224\273\361\336\262\274\003\374\300\274\235\031\031=g\212\025\275\216r\304<\261\271+\274<=\257\272`\264\023\275m5\236\273]\235C\273\344\017\217\275FNE=\360\005\r<\tSx=V\227\024=[\371\230<\3215\362\272\334\2404\275\223\177\263;R\016\033\274`\352n=Z\014\266<b\220/=\227\356^<C\321\356<\366\232u<\305\335\215\275\276Sw\270\326\"\373\274\214\302\234\272D8\206\274\301\010c\274\262t]\274\246\306P<m!\306\272\366\021\214\274\347X\322\274\204C\010\274\346\312$\274\316Q\243:\223\233D<\357\r\025\275Te\024\275\202v\273\274\312\027\307\273I\352s;\243\223b\275H \036<\twf\273\025\317\'\275\211\266a\274\033\237\216\274\235\016\013=\177\220l\274i\350:<\357\216\274\273!\375\003\275v\314B;\336\353\010;f\203\234\274C*\215\274\354\031\'\274\367\201\032<\360\030\030\275\013\226\222;i\233A<gu4\273\256I\217=\257\366\342<^\340D<^\'\016\275\365\3152\274\022\242\006\275\215\274\330;\234\263Z\274;s\314<|\236\223\270\374\207\304\274j\205\210<;(\315\274\375\360:\275p\242\373\273\330\026\343\272\304\033\201;\370\344\346\272|8u\275\237X\326;\026\373\227\274\036:\210\273\267\245C\2750\334\007\275\276\340\004\275\305\314\r\275\010\236.\274w\200\323\274hsp<#\333\3119]6E\275\302V\223<y\351\226\274\002\205\202<\370\353j\274\354\333O\274\273uC\275\021\003\004\274\016\205\036\274\215Q\356<\341\343t:\304c\244<G\272\006=\005b\3278\306-\203\274\010\246U\274\266\007\242;\037=\025<\362\014\006\275\202\340{<\245\275\373\274\214\342\327<\030A\235;\257kQ\274\025\235\347<\330\257/\275\345\314\377<\327\261g\274\200\211\216\274b\342\276\274\270xY\275K\234\030\274A\025\330\270\374\244\336<\222\243X<\260\203u<7\330\356<\353\027\351\273\023(\214\274\260\275\277<\343\010\036<\331T\221;\370\273\251\274\017\331\256\274\322\302\312<C&q;g\303>;H\372,:>\310\333</\242\311:I\334x\275Z0\034\275\t}\264\272\234\033\017<\226\007\252<-c9\275\245\177H\274\2007\362\271\321A\337\274H\'\272\273f\025\004<\322\205\270\273ZqT\273\342\355\234\274\326\303~<\362\320\\<\267i\021<\274^\347\274?\324\027\273\205\344!=\320\\\261\272\245+\t\273wH\345;D\227\367\273G\260\177\273<\274\317<Tq\027\275\346\244o<U\233\177\274a\252\030<\316W\002\275F7\313\272\265$6<\352;\346;]8\374\274 \302\321\273\\E\021<\354\365\242<\327l2<o]V\274&Q\333:0\311q\273\343\035\277;\230\302\276\274\372z\"\275l\270&<\225#\303<\351X\032=\035y\034\274\007\302\372\274\377%\216<8\333\010\274\234\023\260\274\333R\250;&\227\224\274o\210\213<!\244\n\275^\275*<T*\253\273[\034P\275\324@\301\274m\323\002\273\330\250\206\274:YH\273Ak\252<\322$\216\274K\356\324\274\202\332\007<vd\\\274\t\333\300:y@[<<\310\347<\265\216\306<\271\233\364<\373\301\346;\020\270&=\311\372\255\274\262\004\200\274\376Z\030\275\021\364\t\274\347\370\036\270\013\211\247\274Aa\356;\324\324+\274\030\350s;\002\275\177<g\213_<A\316\221\274\r\3457\274\207\345\277;.\267\376<\007\211r\272#\220\305;Bm_:\315\235\221\272\177\274\013<\300`a\271PD\240<\317\201\230<\334\'\000=\347zY<B\003\212\274\035\313\232<\373\321\205\273\373\036\210\274\310\252\326\273\030\365\212<\020uZ\2733\350\311\274=\326\214\273\362i\222\2742\316\246;l@\014=\217\331\253\274\257\036\244\274\025C\2659\'\036L\2746\3736=\357\261\221;t:n\274*H\223<\216\212\232\274\025\\\230;#\253\242;\3439\013\274\305\360\215<\004\312l\275\334@\347\272W\035\017=\336}W\270\024\\\262\274U\374\201<W\322\254<\"\'\303\274\217lm\274\336\345\007<2\033\016\275\247Q\016\274\3254\022<\247q\327\274)kI<k\205\266\273Fh)=\310\030\013\275\224\346@\274\301}\377\273d\036C<\352\033P\275\305=\352<\026\263\030<\263\235\231\2743%\026\275\352H\037\275\326\021\272<\203W\225\273\340\360\351<#\026W\275\023x\332\273\344\2677<uvo\2748\232\332\274k8W\274\030H\311;\334\333\304<Z\223\334<\342_\340<Y0\025;}\325\273;\211\035\244<\t\tq<9\032!\273c\356\244<O\213\033=\312y\343\274\301\275\205<@\237\336\273\354bZ=z\227\216;\227\355\362\275\237\313\241\274\006He;!\024\216\274\244G-<`\357L\274\332}\251\274\246\002\264\272\352\006\216=\207\251Q=\261T9<fX4<\346\350\207=.CC<\325\271\234\274\211\301\240<M\205\244:\25190<K\230X<E\260\325\275\276\\[\275\247\t\246<\034\351\217\273\rt0=\253\342C<\354\226\224\275\333\000\014\275\361\3350=Pe\241\273S\334H\274}\321\t<\342\231\025=\221Y\306\2741~\210=\206\026\234\274\217\352\035\275mL/<\276\321\205\274\327\253h=P\266\020\275\005\000\020<\350\270)\275\"q\023\275\354\023\024\2756\331\220<\234\217\0179N\032!<\r\177\200\274\30164\275\306O\027\272\237\342\341;\300(q<;+\026\274\314\365\026\274\354\232\362<\230\001\347<>\035\023\275q:B\275\254\332\036=:\230\031\273\r@\314\274\337\363\230<\230\034\344\2733\031\373<\262N\235\275?\027\313\274\256\326M\275z\300)\274\355$\307\274\270\234\307\274p\204D<\331\t\324\274\315\024\271\274\002\002\271<\253\275\222\274\253\272\372\273\322\346\034<^ZR<\310\317\300\275\274\007\027\275me;=\215\022\361:\357JY=\020\240\020<w}\244=\270\'\'\274\236\324\335\273\225\242\035<\243\2440\274\022i\327<\205\367\260<M\2602=n;\201\274\312hq\275\362t\323<\2004}\2745\020]=\222\024\332:b\303\211<\356\272n=&\"\t\274\333\221\274; \317\302;}-u<\360Mw<~\227\326<=\013\263\274\351\355\022=\262\257\302;\336\327\244<\252\002\257;\360\273\243\273\023\310\363<&\302\361<\330^\247;h\006\246\273\271a\017\275\014w\002\275\r\030)<\350b\346:\232\005\t<\275x\177=s:\220=.t\273:\223D\224\274\3166\005\275\204\"N=r\311\022\275.S\250\273Io\017\274\356\014\'=\360\016\300\273Oh\246;}6\233<\215\246\007\274\267\005\361\274\027\236\202\274;\\\372\274k\371\232\274j\320\235<\322\347\027=\252\001\206\273\002Y\213=\373\360F<q\313\277\274&c9=\016\245\034=\325a\035=\345\256\225\275\264\006\r=J\225,<\022W\214:)d\247\273Cbx\274\021\267Q\274!\010\275\274\230.\266<8u\026\275\357\031\317\274\201\031\232\2738I\372\274\006\224>:W:\022\275\007\0015;\266\373\267;\206\004\246\274\'\245\351\273\253\\\016<\177\207\234<\300ey\272P\245\343<\023\003\013\275#4\204\2749\347\316<S\324\255\274v\266\002=1\214\025\275v\307\306:\016I{=\002\037\016=\357\037=\2732\261\036<j\331+\267Kl\310<\224\303\231\272\277\334\017=\3471;=\\\274\024<\332\213\205<\032d <\346\177A8\001\013e\275W9!\273jO\025\275\321\314)\274\227f\347\274\335\335\201\273)N!\275\361R$\275\227\221\005=:\327K<aI4=\377u\222\274\214[\203=\251\2446=\n\021\034\274\007\334\031;\363\035R\274r\377{;3WS<\r\207\245:\032\274\275;P\037\266<\266\260C<-{\302\274np\353<%3 \275>\250\007=\300\244\252<\356\225\346<\177\226\004\275\262\324\006\272\027\341\263\272+\"p<\213\2064\275[\224\202<@\215\262<>\244\222\274\023\332\342;\350q\303\274\303\375\303<}L\370<\256f@;H\252\304;\333\230\241<\232\240C<\242\353\235\272n\2761=\312nu=fZJ\273Z\364\271:h\237\372\274\t\364X=\227\\K\274e\017\013<\237\251p<R\225s<\270}\313<|\177(<\226\257\\\2748\370\252=\312\214\372<<\231C\273\031k\222\274\3476\353<-\255\350\274z\347A=\273r\324<*\016\252:&\226\206\273\264\312\364<h\366\325\274\376;\035=\\~\314\2741\030\250\273\313\306\201\273\252\233c\274\207q\n\273g6-<A\002D<\010l\223=\321\310\347<\262\314O;\344\017I\275\000X\000<\303\231:\275\030q}\275\236=\2319\267!\254\273P\335\342<{M\212;o7\235;\\o>\275`&\000=\316\2520\2745\016\236\274\305>A\274\363\336)\275:3?\274\275\251}\274\270\206\245\273\376\266\314<w\314p=q\235\340\274\177\245@\273\0256\237\273\227\312\035\275#\360\336\273\177\360\264<+\210\333<\022\224f<\031\2543\274dt\326<\351(M;\370\306,\274\014\351e\274v\306F</\347\303\273#_\261;\217\321x\274|\246#\274\357\027q\274\347\206\330\274\366\t/\273e\224\004\274\370\036\242<U\224\316\272I\\=\275\231\355\247=\210I\023=\211a\343<i\334\226<\376-\027\275o\244\246<\266(c\2730\242\245;\262c\024\274J=\207\274Y\341#\275\361\354\t\274\017\323F<\030\361/=@\366\366<$,\206\273B\346\\\275<\201\362<\036k`=C\342\004<W\3008=W\334\030\274\t\250\241\274j\256\212\274\345y\203\274\306\301U=\266\024\220\273I\206\303<8\227-\2753\204:\275\242\230\272<\211\027\220<\205\325\221\272_l\035\275\365\367\317\272e\333\225\275\334O\272<\007\026\025<\212\252\035\275\301\203d\274*\013I;\200\273-\274\366\213\2718\r-v=\256\010\223<\304r\000<\221\240\033\275\356\366\205<\030\251C=\223 \024\274qu\333<U\250\203\274S\212\034<\222\000\303;\020\343\226\274G\261\223\274)\264\353<\330\277@;\004\256\246<\335\022S<\206\220|\273\257\330(\273\030\321\274;e\240F=\241\372\306<x\267\001=m\010\016\274]\252\007=O\234\037\274\227\302\035\274\013\030c\274\245\022y<1\210\271\274&&=<\360&\023=\342j\300<\357\273\034\274[j\005=BS\272<y*\366\274\373\223\226<\'H\037\275\037G:<\341\241@\274\273\035Y;\262U\351\272\311\316\022\274\231\014(<\371\213h<?R\027<\266Z\000\274\320\363_;\262l\205;\343\036\036=\032\230\223<\"\317\210<\344\367\3359\241o\265\274r\343\326\274v\321\245<\3120*\273\273\205\021=\331\221\372<\230\027\341\274%\037\200\274AhU\274\217\335\020\274P\266\227\274\324\2262\275\354$\375\273\220\251\006<\325\333\003=\344N=\274\033\007\022\274\245G\220\274\036U <y\354\324<\3464\357\274\350\226\315<yc\374\272\314et\272\234\231\233\272\330\004\"\274\360\200\361\274\200em\274O\323\273\274h`0<\013\242\247\274o\265\003\274\314\\\002=\322\025 \275\201\237T\274K!d\274\335\373D\274\365O&<q\313,<\2428\334\273~\251\026\274#\177\204\274\322\325\022=\014\247\253<\240\316k\274X< \271W\226r\272\214\240\342\274\375\233)\274m<X\274\225\224\214\274l\000\307\274\001\006~=\277\200\306<]\332\267\274f\316\334\272\003\013\374:\004\273\313\274\364v\270=\3224 \274\023\271b=\351\317\277\274\343mV<Nw\021\274D\333E\274Ro\032={\227G\275>\223/<\373\036\222<S\334\343\274\0366\334;.>4\274~H\013=k\275\260<A|\331<m\267\363:\345\236B\275\264\271Z\275\370&\246\275\367#\376\274uh\021=~w_=5\t,<g\344$=\361\370\242<\375\306\324<\242\223\340<\t2\326\274l\036\235\274\252\002g\2736\225.=\367\224\326<\343\327x\274\305\301\266\274\330vw\272)\210\325\274-\374J\274z\360\222=\312\246\005=(a-=\320\346\304\274\357\0320\275\034\021\205=B\210\031=\377\036\315\273\246\"\001\275\265x\030=\026\022\037\274\232\035\221\274\256\346}9\350\353\"<\003\241\026=\261\235\270<\271A\370\274\234\234\214=uyL<@8\317\274\013\212G<\205J\200<\275&\270<\010\230\346\272\025\200\2469\243\357\037<\n\364\021=|\255\003=\024\244\362\274H\203s\274W\025C<\027\266f<\016\300\314\2713\037\374:/R\007<Q\3278\275\261\021L\273\363\376f<\352\325\375\273\370_\031;2\354\216\274\213B =\355o\270\274cV\022\275\270\017\231\267\261\024\\<\236\232\032=\005\364\306<L\217\223<\243\206\321\274\035\236\014\274\262\305\306<\342\314\244<\276~\303\274\024\244\030<\321-5\274V\254/;L\r\244\274\345s\004\274D9*=\315\356\013<\341\354_\275X\364\203<\270G\n=j=k\274bC\357\2729\263\216<e\r\246\274: \225;\365\315\207\274\363q\0048O\273\334<\256\202\246<^\265\010\273\037\314\264;x\0312<b \246;\234\364\304;D\201\025\275(5\025<\336\342\r:\360f3\274\352\324\236\274\212T\212\274\016^a\275\253>n=\326\216\351<\026jB\275\370\375\014\273\013H\203\273\360\2263\274\340\243\2658\025\223\345<6\272U\272j\374\356<\367\331d\275\007_\177\274\241?\240<\321E\217\273\265\327\207=\247\377X\275F\034\251\274\217*\"<\034\203\000\275\276\257\030\275S\0312<4\305\210\274\244\004\t<D\202\336<)X\257\274j\252~\274\324\345\201\274A~8=q\356\350:\206\370\036=\346*\032\273\373\245\333\273\254\362%\274N\246\265\272\025T\n\275\210\235\311\273\\\214[\275\234\347\302=\361\302L;\364\312\177<U\361\354\274/\262\360<\211\277\354\274\034R\357;\212\272\253<\227\325\225<&\276\\\272\307\'\224<\263\365\003<\026\374\226<\003g\305:W\313\260\274{\311\250\274_\rR=T\303*\275\247\307]<#\3612\274\246\200\347<\354W\255<*s ;\250\3175\275i;\263<7!\264\274VcW=\"\006G\274u\345\017<\365\254\004=!\014\265<\020\032\027=x[\026;\'S\323<L\250\221<\266\310m\275\256\243\257<\227\352\007=\264\021\300;S\352r=\212`\201\274g\025\353:\0363}\272\027#\'\275S\316\273;A#\033<\311\336\311\273\241\227\237\275\216_b\274\277\206\241\274\355X\211\274\343I^<h\222\324\274\310K)=\016\346!\275H\326\255\274\214k\315<`\321\240<\002k\365<\261\007\324\272\276\006T<\031\0171\274\250\360e:\360}\263=J?\001=\221\202\265\274\371\234\367\274\357\212\350<A\364\262\274\034w4\271\263\017\217=v\030\006\275\350=\225<h\335\262\274|\262\212\272h\377J\274\320\264\273;\017o\026<\337\322\013\274z\245\200\274\340\217/\274\320\323\352;o\202\223<&\217z\273\216b\264=}\367\354<\004\017\324\274\350\0349\272:\026+\274V4\353;\345&$<,\234\302;=\214\274;\230\020\\\275\370\354\232\274\272\330\221\275\272\230\274</\310\205=M\243\330\272\272\037\322<\235\307\032\275\n\356\271\273!K7=D\331\356\274\275O\311<\265\343\357\274\254\262\213\275\206\224\374<\035\334\006\275\034\216#<qJ\363\2742\204\205;\242\004\373\273wr^\275\311\313\221<\274\266\005\275\215-*\275\256>!\274\001^\205<z\237\263<\274#\025\274\263u\272<\235\344\254\271\261\016>\274\025\377\271<f\037\236\274\346{l\275X\224\261\274\335\352\215\275\320\300\036<`\225\310:\307Z\336<\206\252R\275\023\255\260\272W>\333<\003B\r<O\0025\275\366#\032<\346\036\023=\2118\214\275\321\351\360:c)N<~3\232;W\370\270\274\232{\256\274\033\356\201:\026\'\024\275\344\'g\2741\320\227\275m\020!\275\253Z=\275\327\270\033\275\014\215\346<u\377\236;\316wU\275O<\325\274o\003?\2747\325\002\274\356\205\203<\226G\247<\366\250\201<xI\257\274/5\t<\226.}\275\202\373\001\274\236u\215\273\247\321\333\274\0273?<\021\252\254<}M\257\273\227r^=\235\305H<l\301t<\357\224\270\274\310\376\013\275\2150\371\274\004W\211<Ju|\273(\317\257;\027\201/\275\207\251@\2745\330n\274\002\t\316:\340\253\031;\373\254\026\273\203\207\226\273\223Q\217\273\2041\010\274\334\324\347<\355\013l;\225\340]<T-P\274v\202\331\274\317\026=\275\364\276\200:\016\355\237\274l{\217<\271\0240\275e\224\370<\216\221\302\273\357V\242<\323\263/\2758\023\271\273\230`\251;0\207b\272\337\235\202<}\227\333<F\257\277\274\021\302\362:+\321\230<\351\370\362\274\302yH:\014\177\\\274\006\233V\274\n;f;\024\266\263<r\302\256\274\031?\217\274\322\250\026\274^5\321<\222V\005<J\031\312; \330,\275G\273\030\273\336\266\203<\304v\010<i\270\231\273\222\371\241\274\320\014\345\273Q\2669\275\333~\351\274rq\203=H$\013\275Jl\214\274\005\200m:\345\036\222\275\267\327\226<\200\321J\274\\\031h<?\026\230\275\265L9\275En\260<AG><\205\376$\272\307\265\224<\007\374\n\274?\272<\273N\353\375;\207\331[<\n\023\341:\014\231\034\275\023\263E=\300f0\275\307c\310\273\344i\233\272\275\346\344\273+-\210\274Ug\020\275S/\357\274\005\270J\274\256\353\026\275\033\277\226;\247\263\244;\307\311\'\274\347\341\215<2A3\271_cL;\216N\224\273\0316\274\274\\\261\362;&\307\013<\233\003\244\273\213\200\354;\341v\265\274\207\362_\274\276\334S\274\231\\/<b\204\270\273H W<\334f$=\243^r\274g\322J\275\276\354\266\275N\272\233;\342Z\030<\224o4\275e\356\265;\214\276\206\275#\276\212\274\371\010\016\274\000\355e\275\215o\200=$\377@\271\376\210.\275\344\322#\275\234\264\023\275\222\253\251<\350}\235\274\221\331P<\210\355\240<);\211\274\203Y\357\274\351~\223\273P\246\007\275\341\242^\272d\302\347<\026L\247<\245*(\275\3058\027=\210\013\\\274\t\257\200=\002$\221<\246\247\002\275\272\201<\273>\034\243\274\007\025\303;\302\t\357;\260\310\213\274\3517\227\2732\031f\274\376\326\017>\013<\231;\354\347|\274!\200\035\275{\t\000=\204\266\314;\003\256\177\275\371\363\'=Co\234\275_\337\340;FQ\256\274W\027\035\275*KD=\221\177\342\274.\003F\275\r\363\316;\370\312#<\206*\305:\221zX=\0107=\273F\304K\275\252V\215\273B\004M\271\235fw\275T\273\031=9\250B\275\3071K\275\325@\027\273\265@\300\273@\374\316<1c\024\275\204x\021\2715xj\274[\254\363<cd\002\273\037f\326;\277Q\030=a\302\210<\032\357\320;\020\007\006\274\310f4=4U\323\274_+A\271\316\321\n=\307L\231\274\377\2128\275z\311\343\273U\241\257;O8,=3n\374;J(\230<6\'\227;\325\222Z\274-\356\000\275\334\246\362\274\206\243\261\273$*C<\347I\221<(n\020\274\024*\006\275\370\344\371\2746\313\376\274\032\247(\273yL\351\274\377\032\002\275\224=\347<j\244\327;Aq\301\2744\231\205\274@\241\316\274kz\303\275\244\214\337\274\353\034\n\275Qkh\273\373\267@=\232M\347<\266\n\'\274\276\020F;\372\357<\273@g\004\2757\234!\274=\377\030=\234\024\005\275\316e7\274\225u\241\272\233\214\251\274\317\333\337\274G\332\272<\363r\315\273\201U\211<WK\235\274\301\376\023<H\007\t\275\254 \354\274a#g\274\313B\027\275\026\277\020;6\200P\274\356>\274<\030\355\362<|0\300<\313\231A;\364X\260\273\260\224W<\211\253\364;#\353,\274\352\022K;R\020\036=\202\177\034\274a\366\233;{f\021\275\251\336\t\274\014\350\213\274\217r\345\274xR\253<\271\027\233\273\n\314\n<2\253\236\274\213\234\336\274\203\277+;dxT\274\0212\335\273a\027P:\021\245\r=\322\342\374\273\347\002\251<dsL;\344n#=\2550\n\274v\307;=yO\335<\3366\307;\314\247u<\260\304\004<\337\356\250<\274\033\t\275\322\346\261\273%\344\001<l\355\'\274\207\021<\275\363\342\253;\363\002!;&\244\034\275\271\200)\2747Q`\274\342\370\212\274F\215\345\273\262\345\367\273\022\030V\275\207\276\335\274s>\213\273\210\202<\275\345\324\345\274\266\355\037\275`\331\363\273>\206\302;\"!\244\274\361\021B\275LoU\274\201\362U=\232\365\235\273r\256\267\273o2\207;\306\320\030=^\224d\274\"\311T\274\256\0000\275h#\002\275\3662\273\274\332\0001\274g\247\035\275\010J\021=\305a\326<\356\245\005\275i\264V\274\006[l\275\203E\034\274V\273\02698\231\021\274\000i\237\274k\264\022<\035r\304\273\\D$\274\227\210\206;\021\013G;\354-\326<\002jY<\007\177\310<_G\337\274\232\004\364\272\321\331\307<Ru\272\272:\223\020\275\207\340e9\272\237q\274\277I3\274\316\022\375\273U\355\207<\033Cw<?Z/<h\377\207\275xX,\274\303\246\200;\211\367\212\272tC8\274h~1\274\200\025\212\275\275iG\275\2078\261\274\242\023\231;\256Z\303\274\243\323!\273\271\035\360:\212\n@<\204\334\005\274a\204{=\335Q\324<!\356\\\274i\355U=\353\003\212\274\021\0250=q\332^\274=\030\366;\352$\331;\3610\007=\203\2478\274\376\341\223\274j\305\r\274\364\317,9\2466\336<\020_\t=\306^\265;\n\310\032\272\265\3718\273>V\273\273\340\335I=H\251\207\274\220p-\274\374-\213<\220\001A\275\374r\273\273\n)\024<\307\260G\275\346\020\215;X\352\032=0%\365\273\353\326\330\272d\013\254\274\324A\370:\271\324,<\205\277A\274\263E\021\274v:\301<\202p\034=#_\201<Ph\234\272]\325!\275~\272\033\275D\347%\275 \265\246=9j\223\274\375\266j<\353\213\016<\316$\037=TH\032<n\264>\274\337\261 \275\330.\030\275\231\177\217=5x\241\274\344\312\303\274\324t\252\27431\363\272\005m\314\274\367\226\240\274XC\202;Pv\234\273\001\025\227=\324c\013\274G\333\224<0\305\'<\014lv<\206\271\220\274A\313$=\234\310\273\274k\351\017=\265_\353<\026\367\347\273v\351\322\274{\271\002\273\213t,\272t1M=\022\306\372\271\034\264f\274\215\234\030\2753i_<\342z\222;\030\233\006=\000\231\245\272\006V1=\033\026\247;S&T<\007\2132<\3551\264\272\362)\325\274\026\276\341<\275\220\025\275\2305\206;V\033\242\274\0166\254\274\213\0323<\275\336\235;p)E;9\207\t=\224\347\214\274_\014\276\274\341*\000=3\220\355\274\354P\235<\024\t\331<\336<L\273)\271\244\273\321\216\300\274\234\006\361;\206\001e=\233\215k\274\035\032\262\274\32310=\003uh<\365\014\266\274Q\367\273\273\362\227\304\274\244#\010\274(\033J:\244\2051\275\\\241\325<e\346\212\274D\"\2239\375\331\333\274\251@\365\271\254\240\263\274\262\013\237<\017\260&\275\211\354A<\373\222\020\274\340\333\335\274\201a\247=\006?\005=4\312\223\274J\217\356;\232~\245;\210\r\230\274\361S\322\274\017\021\343\274\211\346\311\273M\245\242\274E.R<\3623o<\347o\217<a\201\231<\376\305p<\224M\346;\264\366\211\274Tti\274\330\244\347<\346\034\035;\227\206\216<\347\006g;CY\216\274\377\211w\274+@\317\274\256\242\001\274q\355\247;1\221\224;I\307\275\274\305\337\2219\264x\303\274\215/\000=}\354\223;\351z\332<\260B\341\273\367>]<\337t\007\274k\252\376:mw0=\303r\247;)\030\345:/\224[\274[\331e\273\321\210$\274<\345!\275\250\022\332;\226]\245\274d\226\301\274\0108P\273\r\260\276\274#\241\357;\214\034<\274\007\242\206\275\223\216\212;\"\377*<\277\177\242\273\024G\225<\335\347\270\272b\2279=d\253\321</Q_\273\357\023M<?&f</\333#\274\014G\240;\363\317\256<\016Z\023\274W\036\205=_($\274b\224.\275y\023\216<B~\221<\372\252+=\353\022\341\274\206pd\273\3301@\275\374P\372<\361\317\315\274j<I\274\267\250\271\272\332\264\260\274\3647M\275:(\354\274>\3007\274a\324\030=\207\342\242<\265\005\352<k\031\"=\217\222\234<\255\261\2007tp\232<\305\220\353\274f\266\204\273\252\245\003=\305\351]\275\371\270==4\272&<\367\243?\274\026\312\013\275\215d\270\273\226\235\243:\rI7;fe\203\274\257\237O:.\360\251<,-\211=\344\035\356\274\2245V\273\230\230\025\275w\\\256\274\264\223\346\274\021\315\370\273\203S\022<\207\227\006\274\367w\031<S\275\220\274Q\343G=\306\223\232;W\010\305\272\273\377t</\236I=C\365\231\272\365m\277<r\243\t=<z\272\274z\357\302\273&\035\223\274\366\027\260<9+X\2759\020\354\274\017\243\263<\211\010\020<w\017\353\273\240\260\017\275Uh\255<*\256E<\310\032i=\r\213H=\264\035\350<\024\202i\274P\377\371;\260.\330\274\207\035\231;\333\013\314<\224\007\250\274\333\237q\274\224\006\210<a\006U<V\360\327;\371\257+\275q\210?;O\252\242\274\325\320\264<$\372\362\274\343.\371\274\n\220I;\t>\\\275\326\3209<!^\244<\307\262\334\274\277\364\034\274\203\357\235<\334\313\360\274\344\005\037\274\213\236&<\363\363\266\274\241\320\027\275\200E.<\363\320\201<j\225H=\032=\262\275\013\221\233\274\352G\360\275\'\034\357\274\\a?;?\232\331<\232\373G\274*G\333\274\275\207\341\274\302\327:;\032\2711\275\251\377!<;g8\275\016^\220<\335\r\236\274Z\014K<\321sV\275\246h\027\275\374\335\361;P\354]<\2242\242:\010\234Q\274!\021d<\225\335\313\273\030\376\316<\025^\317\274\204\222\247<\016(L\275?\325\032\275\323\335\350\274V%\311\273*\262\221\275\324\361\231\273;\367`\274\315\002\n\275\034\330\243\274^\300\310\274ag\245<G\023^<9\215)<R%%\274\014O\354<\374d*9\347\253\000=\333(\304\274dfA\275\223\377\217\274s\365\242<\2500c;\'\t\207=\217\0219\275\357\315\270<\350\325#\275\2249\005=q7\225;\264E|;zlh\274\020\350\326\274\202\201\275\273\325\327[\274d\237\207;\206\216B\274\330I\364\274\227R\002=7<\'\274O\216\000\2741\366\376=\025\353R\274\220{_\2753\306\377\274^\316r\274n\327\252\273\235\005\031<W\210\215\274\373[\342<s\305\223;,\315V;$\0162\273\221\2509\274\013\275\246;\267\357\037\275f\251\246\274\360i%\275\375?\322<\314\235\357:)\366\333\273\207~\255\271\006j\204;%\364\006<\017\032\252<\221\226\270\273\327\026\304<\307\255\232\273m\205j=\266\'\274\274\244.\214\275m@\312:\234\262\365<\224\321\237\272\366\222+=\200^\334;\224\326!=]j\201\273\256\275u\274\220\314\007\274D\215\363;\326\350\326;4\2139:A\342\006=\361l\226\274\277\237\022<\265\352\307<\272w\216<\362\214s;R\201\306\274\033\007\217\274]\340>\275s\211\\\274\020}\212;#+h:n@.\275v\332\026\275\355\301 ;\323V\"=Gz8\275JT\031<\251\2736:XW\033=\036\223\007=4\356d\275u@@\274\326\324\344\274\270O\026\275w\352\316\274%W\307\274/\210\207\275\301cM\275J\"\230<\347\023t\275\027\314\000=A=\300\273\301H\005\275\nyZ<\207]\021\274\001\277\212\274\216\245\035\274m\310\n\274\221\344#\275\027\207\375\274\330\016,=T\255\250<\027\353\"\275c\235v=\371\024\030\274d\301|\274\375,\n\275\211\346*=\225\0312\274\034\242\033<\302\202w:mE\351\272^\351\227<V=\250<\354\210}\275t\315/\275\032&\031\275\362\357r;\020\022\007=irL\272KQ.:\0176\034=\216\034\311<;d\302;\231w\263\273\345z\030\275?\223\313\274`\240\030\274\377a\361<\267.G\2745\tY\275\177\200;\273\346=.\275\302*\253<\264\240E\273\326\253\251\273\251XJ<Z\031h\272i\022\224\274\030\233\303\274\016\345\253\274\022\266\216\273\"\325@<\337\026\211\275s\032\002;h\347\370\274u\276\334\274\007\024\246\271\301\322\311\273}S\224\274q\377,\273(c\254=JQt;\340\270\335<\333\257\206\274\360\305M\272\022\177r\275\226\312\262\271\352\024\200\274\267\247a\275N3_\274SN\334;.\206\"\275JH$\275\236\261\310\273}\251\247\275\342\025A\274O\351\001;\275!\010\275\301\216<\2727W\247\274\2175\014;Y\036M\275\351\306\216<\272\250k=J/\244\273k\340\353\274\243\037\260\274\242\366\003\274\010\354\327\274?\316[\275\"]\230<\033xM:\375\003\240\274\340N\024\275\017\303\"\274P\233\230<\235\340\'<\350+U=nk8\275Q\010+=\000\031\021\275\227N7<\016\205\321\272\254}\201\274\347@6\275\204\242\207\274\311BE;#\345H\274\2010\177\274\021\024e\274\026\237\223\274\013p!\275\352\233\003<\002\340\373\274oI#\275,\324\203\273\362X2\275i\236k<\234\346:\274\351\347\231;[o\016\275\314d\034\273\202\221o:*5\003\275\255\225\313;\256\022q\274\001\333V=^\222`<\365Q\343<+\327\010\275\277\003\232\274\205G#;4i\336\274\325\312\237<j\271\321<A\264P\273\021\231\262<\256O\262<\357\242C\272b+\207\275y\207x\274\347\255\367\274\335\213F\273\344#\325<]\006\001\275\022\260o\275\306\006\025<2\355\331\274C\244\225\274\'\377\340\274\342\334\003\275\3108\311\274\037\030\376\274\n\253\202\273f \014\275\033\330#\275S\324\342\274$\t\030\274\021\265q\273`\n(\275\217c\216\272\207$\004=\344\025\234\273\270\206\230<\303y\244;=\023\001=\366\024\322\273?\014`<2Z\305<\3711\205\274\325J\200<\355?\211\274\005\022q\274\273\206\002\274\221\220Q\273\301\006A\274\212\354\307\272\221\261\203\273G\375\354;\240\021P\273\370k\205<\230\352a\274\220\213\355\274W-&\274h\362\021\275A\330\036\275\225c\304:_\335P\275A\263\203;\314\311$\273\n#Q\273\340\006.\274\215\355]\274h\313n\273$\335 \274U?@\274\t\2759=\360\325\373:\263\250i\275^\003\n=R \033<\247\227\025\275\337-b\274\303\373\022\275\241\204\213\275\344\250\205\270JJ9<f\326\210\274+\372\253<;k\347\273\321\326f\275\220L\0259\271\217\240\274\tH_<\256<\021\275\034\330\342\273\256\2641\274\014\233\006\274\240\373W\274z0\033\2757\361\177\274\232\341\214\274\260\263\271\274\236^\326\274G6-\274\272r\033\275\315]V<\266\354\232\274\241V!\275\331\245\004\273+\376\232\2746\361\366\273{\270o\274\002\030\274\274\030{w<\360r\006\275\177-\262<\354\347\207\273\017z\253\274<OG\275\374j\n\275\312\327\263\274\322\361\346\273\030q\364\273?\332,=\370u\330<gi\203;\366\353\345\271@\244\300\274$\326\254;u\273@\273\014\317w\275\021\357&\275\337\345\333\273\221\261\315;\245\024\362\273\250N\304;\237OR\274)lo<\022\267w:&\266\210\275\322\333\004\275Aj\267<u\200%=9\261|<\204\351E\275<\372\325<\016\177\030<u\230\326\273\316-9\275\315\021$<\026\235\t<}T\356\274\177]\262<V\323Z\274miw\274&v\200;=\3008\275\316V\'\274=\317\310;\020\356v;\252\236\036\275\r\340\223<oy\262\273\225\250\243\274\235\306\263;\013R\0339\000\265\231<\rv\336;\310\361I:=\226K\274\274\240\307:\0132\006\274h\364u<\232\370\302\274\266\014\205;\257\024C\273\334\366.<\351\300\312<\354\234\262\274\202\027^\275\354\221\227;/\200\200\274\271\004<\274\314\351w\275\301\214\262<,nM=9\240\364;C$F\274\354s\307\274\020&\310;\2677L\273\260\np\274c\240g\274\374j<\274\3426\235\274\211\261\221\275\020\235\212<\341\373Y<\367_\231\274\320\212\342\273`/j\274\243$\244\274\3615\001\274\336V\341<Hg\351;CO\205\274d\373B\275\315C\336:/}\037\275\211\355\263\274\025\r\254<\237\037F<l\363\325\272\n\227\2718\226\304\232=\0263\374\274@\355\340\274\326\367\261\274\242\253,\274\310\341\247:\320\305\003<\"3!;\207\023 \275\261\240\n\273\333\206\253<\215%\362:)\327\n\275\345\261C\273\300\262\212<xjK=\205\377\277\2745\004\250<\200\027\271:\207n\201\274\376\303\025\274/+\332\274/\024\0149\366\026\367\272V/\2469 \243\311\273\037pu\275\316C\374<\367S\001\273\254\375\362\274\244\034\276\274U\233K\275;R \275\276{\\\275o\365\253\273\374\017\326;<\227\201\274N\255\026<\351A]<[\226P\275\252\034p\2743\264\313;w\321\035=\264\233\365:\234\331\221<\201\223\333\274|\2350\274;\"\"=\337\002\341\274\356\223\235\274\021(\037\274\204vL\275F\251z\273\270\240\220\274\007\246\343\274+\261\n\275COE=()\264\274\017\252\260\274\240\265\252\273\223\231\321\273\353\031\215\273J\003!\275\322&}:4\267\215\274 XA;\261t\206\274\226C\346;o\230\342\273V(\202;\264\317\001\274~\341\002\274\376f\340\2744\275\310<\214\"\014:M\034\235\271.6\365\273S\220\\\2743\302\341<\276\177\024\274\270\330\014=\360S\030\275\002\371\251:K\363\023:\030\006~\274\233\025\227<\313\206\252\274AX\273;\231\327\032=Y@\372\273\217~\304;wd\373\274\246CJ<L\213\207\271U]\316\273D\224\001;\372\023\036\273U\357\"=(J\006\274\025\241\345<qfC\275\024uw=\024\263\301;\375\312\303\2759h\201;\3311\000\273Q\367\303\274\300\177\003\274\264_\302\274\250\336z\273\352\265:\274\252\2112=f\343n=\024\024\255<9\027^\274k\304\017=\235o\204<\226\265\026=\236\312\362<\000\340\177<\221\2653\275\265\250\272\27473\210\275\021\356\240\274fXH\274\360\325\272\274B\226\340<\021\274\013\275\001D{\275\030\246Y\273\025\236\245<(\022V<\242\356A\274\n\206\317;\315\323\363;\262\265\205\274\325<\277<\363\371\027<\276\231s\275\345\n\010<+\223\037\275\361\242A=\316\246\300\273\033\341x\274\373\306%\275\017\340Q\274\305?\227\274\310\026\347<\370\253\000=d\357\247\273\010D\342\274\253.\377\274\241]\354\273\211\342\376\274\264\367\017\274?\354\254<K\305\223\274\275:\227\274f\363\235;]\243\277\274\004e=\275\003k\324<m\363?\274n\3421\275\332\032\332\274\237\262\343<\212\242\202\274\0327\372\275p>^\273(\207\026\275\034\210\025\274=o~\274\004\231\'\274\326\271\275\274\'7\270\274\204\202,\274\265-\207=\212\023\355\273;\214\371\274\222}\325<\t\3261<\242\215c\274\306\305\013=P\354I=$\313U\274\234tb:C\r\032=\364gg=\340\335\351\272\303\341\021\275^\2262\273`Yd;28*=\256\215N\273\223\351\032;S\372\250\274\315\2528\275\304Y\330<\310\006\030\275\013\034\362;8\006j<\363^f<\265\035\212=`\372\221\274\342\207\004=\335\331\030\275\035\'E<\254\253\301<\270\031\262<LJ\261\273\271?\247==x\225;\205b\014\274\027,2;\245\0027;\312\253e<c\242\203<OeX\273n\315\354\274\277e\353\274x\351\274\272ZT&<\215}\355\274\"w\230\274\333\367\305=\263\335H=\3738\221<\265\352\021;\205\341[\275\300D\245<l\262\355\274;\033\217<\251\034\035=8\200-=E_D:\014\334P\273\341\250|<\240\";\275\022\225\023\275|\377\351\274\355&\006\275\tk$\274A\307\036=\004\326,<\351\256 \274\374\2315=\021\263\031\274\t/\024\275\023\214\220<\336\310\021\271\254;\n\274\240:\234\275\016/\374<? \020=;\013\034\275\033\367\230\273\265/\364;\341\356\210<r\230^\274\357\257C;2\313\256\275\234\251z\274&F\212:\205B\201\274N\214\242;\036\001\202\274\005\245\313\272]\303\323\273\271\245\022\275!\203?\274\302\003\340<\331\207a\274\0375]<L\324 =\014\300\205<|\340[\274\357\221P\2734\034\337:\005+\277;\371\200\022\275\236\236\034<\244\274N\275\317W\253\274\003B\250<>\221H\274\334\254-\274\335]\320<l\035\365\2745\340\023=\214vh<\214\277\237<;5\230\274\214\022\207=\246m[<\205\211G\275\366U\200\275\331#N\275\016o\331\273\004\333\324\274u\267D\274\270gx\275\014\025\244\273\346\263\207<O\246\307;\n^\002=W\345\003\275\211<\255<\252Ja<;2I\274\275Ur<\210\2260;m\257 <*\021\346;\247c\213\274L0\235\274\036\355\300\274\262\341\026<\020{*\274\354\311\305<\216\366\335\274\010\216\325<K\336\375<\'\236\275;\342\250r;\341\363G;\221\335\207;A\216[\273\265\201\017\274\240@|\274/!=<\204\027\275\274q\225<<\345\247\357\274\034\310l<\375\336\002=\226`\360<\303\261C;\345.\212;Y\005\007;\264n\254<\021\2572=\352.\204<BQ\273:Is\365\273\367c&\275\260n_=\255\313\365\2748\246\210\274?\212\215<\317\344\236<\006\335\223<\357\261w<\336<\035;\256\037^=\032\030P<z\2168\272\233\300\315\274\257:\273<\035\257\016\275\"\026\224<\n\243I:\265\355\327;\013\004\370\273Q\222?<\377\350r\274\002 &<@H\250\274\375\t\207\274F]\257\274\3254f\272\220\234<<\200\246\005=\240\222\2049:\314\273=\306^1\274TU\006<\324\262\245\275.\217v\274\255\277\004=\356^\204\275\331\000\003\275\036g\007=\024\227^<\322H\205;L\313\\\274\373\311\027\275\261\006\217<\226\343\214<\224H\265\274\313Xp\273\363\331\307\274&\257\273\274\004H\022\275+\367:\274\023m\220<\273\343t=\331\346\346\274|\222\261\273\343\324\023\274\260\247\256\274\337dR\2759T\325<\376\264\224<\347K2\274\336O\326\274`GI<~=\203<\036\255\306<K\371\221\274\325\003X\274\311\320\237\274nn\235<Z7\270<\324\233&=\037&+\274\227\014\355\274\307\313 \275\224~ <\303z\272<{\375\033<uQ\210\275\353\t\r<\022\335\324;G\3151=\321\205\026=\215\370\014\273=a\272<!\307{\274\362^\230\273\223}\014\275Pc\375\274\372\223\026\275\302%\226\273\206\231\251<3\311\232=\035\363><.\220K\273\340\360\361\274\013\002 =\221\241G=\357\257B\275\355\244\326<_\323\177\272\305\323\215\275?F}\274MR\337\274\3726\272<\035\207\335<6X+=bO\002\275E;i\273;08<P\036\341<\035\3224\273\316q7\275\250^\002\275\020~s\275\263\226\335<\204\366\214<vC\330\273\366\332\377\274\245.\036\275\252\006\367\274\370\234\321<O]o<C~e=\275\276\240;u~\026\274\315\274\027\274\351\274\342\274x#\316\273\347\267\254=C\205 \275\207\3147<\324x\350\2714\304C;\027.\274\274\226?\'=;\275\021\2742a\356\270\277\251.<\177\006\345<\226a\205;<D\033\274\337\333[<1\223C<\355\316\332<Q\355\353<\341\3238=\0066A=\232y\373;d\316\261\2747F\t\275R4\361\274w\236\241\273\371\242\377<\021O\337\271\330j[\274n\177\030=x^\350;\373:\301\274-\266r\275\307j-\275\310\363\377\2707N:\275\323\251\034\274\035\326k<\210\266\265\2736~\033\274\363\256F\274Ub\315\274\201\252+< .\255:%\301L\274\\\006\032<C\005O=\014\247/:\002_Z\272\336q4\275z\311\016\274\200\236\356:\365R\231\274]\206\240<\345\233q<i\024>\275\344\026\035\274\032\274r\274\251\260z<\300F\005\275F\337^\275\265\235\371\273\267\027\333;\003\020F=q\325\013;\351\200\037<*\\\212\271\336<\243;\352KK=|\363\303\273\013[\013;L\247E<Xb\345\273\255\344\203\274\261\207U\274z \227\275\277\'_\275Un\377\274\003\3149<\265]\024\275\330\004Q\272PF\026=(\340I\273\036\031\314<DX\020<\016\030\277\273\336\2616;\372W\031\273Yi\036\274\026v\316;M\261\211\274\377\345\251<VN&\273\320\203\206\274e\267\030:k[\021\272\256V)\275p\360\265<\256:S\274?rs;\363!$\275\367x\335;I]\214\274s\322\005\274\32701\274w&D\274\031\022\022\275(\346\n=nE\021\271\2517\333;\r\241A\274sj\264\274\262\251\323\274\004*]\274;\320\003=W\346<\274\266Pg\274\220 K=$]\347\274)\276\036<\303\210h\274\245Z\362:\233\324\035=\301\022\r=4(\243:wD\346\274\332$T\275\257\026\r\275\274\265/\275\362\272\221<q\270\215\274`o<;\033\201\261\274\t\307\213<\034\206\316<\016\253\323<j\312\261\274\327\326\000\275A\262\026\271\301=/=\335T\277<\345\016\003\274fp\250\274\033\240\322;\312\002Y\275/\211\371\274\245\0348=\376*\311;\204\273b<\342\200+;\277\225\241\274\273W\321;\373\017i<\026\316\213\273\317\307\306\274\311\247\030\274\361N\212\274B\037\352\274A\314x<5 Q<m\322%\274\326[\260;^\264\033\275,^\304<-i\307\273.W\030\275=\261\204<`\023\026=\366\221W<N\310\203\274Nwi\274\222$R=\217\n\245;\251X\037\274\264\367\022\275@Ne<\205A?;\236o\256<~E\201\274:\310%\274?;_\274xGW\275v!\322<\010)\226<\025J\302\273\337\3750\274a$\241\272\302\202\206<D\r\215\274Y\245-\273\241W\352\274\3521\224<\336 \023=2\035a<\330\'\036=\356\250\343\274c\366b\273\242\025\n<\335\276G=\223\313\260\272\265\333\000<xC\016\275F\002\224;h\200H\275\264\030h;\262\223\277<@p\330;\227-@\275]*\t<\320x\361:eO\251<\027\332e<\211\217\235\271\252\334_\275\353u <0S\007<1p\316\272\376\026i:yiu=c\003\345<\343\315\225<\222=\204=d\265\007=;0\236\272\375\032\304<x\351\016=\372\211\231<\355+\033\275l\310\255\274C>R=\036\033\374\274\311\303\335<^\345\334<Aj\305\274L\260\342\273l\320*;J\036&\274\251\255\243\274)^\013=0\016\352<\213+\242<d\310\242\274\"]M\273u\023\020\273\363W\362<v}4=N\033\337\274b\252L:\263L\345\273\2159\021\274X\216\241:\350Y\247\274\257\317\036\274\024\0021\274\263\2602\273\325nF<\312?\251\272s\274\"\275\217\2300=0Eq<8\010w\273\307\325\266\274o\234p\274&\302\253\273^R\256<\r\325\007\275|M\261\274\276|\200\275m\034\241=\322\363\234;\006E\315\271<-t\274\252J\251<DO\217<\307xY<\003.\'=\271\325\204<,\335\205\274\003A\006<\300l\310\274y\232\004<\212\242\240;\332B\307\274+\266\002\275\027\177\005=\253o\037\275\374\223%\271&\213\341\273\363F\235\273N*\300;\373\331\344\273\374\314l\274\023q\033=\201\370\200\275\375(!\275\343\023\016\275&\0335\273\001\215(\274\020\342\037=\177\002\t<nd\233;\005z\274<\204\366k<\244\200\277\275\206\247\214<\245\005q\274\346BR\274TZ/<&]\370\272-u\310<\270\037\234\274\370j6\275\rF\362\273\271\211\016\275\207mU\275/\255B\275\254\322\361\272\226p{\274<\223\270\273\372\326\340;(B$\274\345z\031=\224u\005\274~\257\302;\314\013\021\274M\002\000\274c\322\312<ceA\273\253\"\2509\242\356\203<;?\317\274\270nX=\336\004\232\275XO\224\274\222\336\211\274O(\273\273u\377Z<\253\021x\274\227\253R\275o%<\274\260+\372\274\030U\242\274\006\271\310\274\270iB\275l.\021\275\320\275\030<g\272M\274#\220\370\274V\363\004<\246I\020;\235\245\007\275?\323\271\273\270\022\207\275~\346\257\274d\223Z\275\027\351\312<\273\223\344\274m\215\t\275\310(H=S\371\327<\221\332\'\2758\323g\275\330\227\005;\353\342\301\274\353\211\'\275!y2=I\002\277\274[b;=\264\013\362\274\217\272\255\273\020\236\210\273\204\2576\275\212+\325\273Z\031c\275\211\360K\275\024\034Q<\267\214\312\273\316\367\362<\335c8\275fIu\274\315\332\037<\312\302\242\275@\321\222;\377gz\275!;\027\275\034\324V\275\313Y^\2755\372\306<;\374\232\274\022\270%<\313\000\333\274B\255\007=jX\201\274\314}O\275\216\314\177\274\316\020\';\242\223s\272\377\014\223;\r\"S;u\327\270\274\220\213\357\274\224+\227:\311c\366=\372.\377\274\004kh\275W\256^:\365_J=\335\026\017\275S\024\272<\357ni\274QA\366\273\374\276\001\275\030+\310\274\247\253T\275\3738\'\275*\231\247\273\363\"\346<\236!\007\275\231\n\234\274\226\'\324<\006\3045<\245\271$<17\010<\315\327\236\274^]\261<\347\000\340\274\247\024\310:\032\232\004\273\021i}<\364\217\264;\307\321\003\274j_\237\2754Q\266\2749\3002\275\271\313\315\274v\366\201\274\260\351\277<\201\343\211\273\224\022J<\025~\235<\214\310N<\203\243,<C\373\270\274\242\333U\274\030q\036<\277&\351\273\016w\276<3\366\266\274\302\227\252\272$\217#\274\234\016{\274\2429\332\2734Z\336<\213Q\221<0\310\036;C\217\230\273}$}\274\031i\025;\357\344R;\354\255x\2742\325d\275\0339\031\275`n\376\272|X\004\275\360-\262<PN6\275\035.\225<\031\331?\274+rK=\234)\021\275*A\316\273+\310i;8hv\274\304\376\354\273\367\002\217\274\350Z\231\274\204t\211\273\224\014\312\273V?\006\275\336\235\005\274\3001\313;N(-\275\251\366\210\273\207\376\026<\247\367\304\274\257\217/\274\273\010\020\274\270x\271<u\301\t=\226\0349\275\345\300m\275\244\010\266\270\373\033\200\272\211\213A\274\226\225?\274\300,\017<3\366\266\271\330\t\204\274\374\356=<\300\320\217<~C\230\273\340\\\200\274\344\007\271\274$)\274:\256\213\271<f\'\342\273\325~\201=\253\370 \275\234\364\335\273\372\001\002\274\245\301\262:\236\372\264<r\232\031;\264\360\312;\351\002\376;\200\007\006\274\243\034\240\273Y\2474\274\247\312\375;\273\373i<<p\347\274\277\n\001\275G\2608<P\035\341\274\020\253~<\307T \275\202>\r\275\302\254\003=\3550X\275\227\032\342<\205C\212\274\245N\250\274)\007\345\274\335\3456;\314\346c\274\213;\025\274\373Z\023\274P\202\314\272\217\257B=$Z<\273\'\217\232<\234\261\342\274;/\277\274\341\353N\274\007\235*\275\035\014\317\274\200\375\276<\207vR\275W\353\327;lL8\274wCz\275\222^\023<\016\367\321<x[8\274\326\035\236<\276\274\270\275\271\357I\274\224\236.\274\343\217\212\275\020X\277=\276|\025\274\300\375\262\273\311j\245\274-}\003\275\035e|=K9\272\274\024\224\341;\355\264\026<\360&K\275\201-@\275\313\343*=rY\207;\350\010@=S\0145=\036\246\350<\225t\312\274\353`\233\274i\307R\274\006X\224=\0227\361;cN\037\275\325\342\222\272\332,\341\274\363rQ\274\022\002\027=\220b-\275\240\261\260\274\261V\226\2753\n/>gB\253\274\2417m\274aj\362\274C\013!<\013\312*=\264\236B\2752U.<\031\262d\275VM\260\273\361\'\021\275L\342\021\275\273\333_=\252\1773\275M!P\275\226\272)\273\331\000\323;x\024\252\274\224\024)\274\021`7\274b\265\002\275\220\003r:\226w\225\274y2b\275\312M\023\275\036\035\276\274p\257V\275v\375R\274\356=\333;\034\215\034\274\274K\235\274\3446\003\274\205\263-\275\354\236\027\274\326\003x\274K\313\240;\234\0044<\357\020\003\274\3640\r\274\344\177\237\274\365\2001=l0W\275\343\214\227<W\304\263<N\336\023\275N~s\274\353=\036\275E1\225\274\372\347\210<\243O ;\342\314\362;\036\0302\272\213\214n\274]_F\275-D\370\274\3561\364\273D\332\322\274\003\374\243\272\305:[\273<UN<\364\342\n\275\001V\247\274\222\343C<\247\377\211\275c\t\327\274\377\342\275<\245\352\005<O\254\370\274\374\314\306\274\250\341\315\274z\376\253\275xR\236;\215\336X\275\332\340\3429k\261,=\2660m;^$\035\2749\346\n\274\306\024\037\275\2124 \275V\260(\274\272\266\034<\007\2542\274\3535\005\274\3262\'=|:\330;fxT\274=\006\226;}\361\263<\232b\303;a\252\242\274\340.\214\274\240\306\261\274|\346\204<\235\236\304\274\312\033\273\274\352f\032\274{\035\034\274\235s\030=\376J\003\274r\211j<t\364\037=\317C\237<\014\275\257\274\202\201\037<\251\256\237\274\2561\347\274/\032\321<w\315\277<P\377\007= \316\007\275\341J\274\273\300\0001\273\254\024\020\275\201\341w<\n\214\222\273*\031\336\272V\035\225;\030\213\002\2742\330\341\271\327b\360:\266|V\273\252S\023\273.\254+\274\264Y\224\274\326`.;\002\305\022=QV\370\273\346\260\000\275i2%=\026\345^=\244e\004\275XQ =z\020\320;\031\266\253\274-\3153\274)\261^;\217\223\213\274\2415\004;\024Lg\275\242\344\355\274lT\224\273\312\376\344\274\356\025\267\274\251T[\274\0164\343\273\230\206\275\274\356\000c\274\365\341\251;\275\246\002\275I\347_\274\007l\035\275\025\017\205<yh\024\275g\202H<\034\224\004<\r\200@<3\274*=Y!\274\273\036\004b\274/(8=q\220\202\274\002\365\007\274\232l\336<\341h\345\273\201\226\361\274|\274\250\274]0@\275\233\271#\274\312\325\320\274w\325\200\274rm\205<\036&\016;\355\273\272;\246H \275\310\206 \275/\003\341\274\261\335\007\275\370\234\215\274v\342!;\260c\227\273@\t-<\262)\023\274[\370f\274\320U@\275\300\000C<&\224q:\023\240\247:N#\310\274\2273\274:\007\021\204<\363\211A\275\036e\341\274j&\335;M\212P\274Bg/\275M\327b<<\247\257<H\001a\275\347\014\206<\177\024\227\275\002${\272\013\233\270\273Pei\274\231\375x\274\316{\345:AK\223\275%\306e\275x\017\004\275a.8\274\022/A\275p\206S\273\266\307\003\275\t59<u\352\300\274\225J\231<\227\320v<\332\335>\274\374\264\227<-\357_\274\021N\330=\032\347\247\274\232|\023\273;\323\260\274\344\213[=\310\\\201<\324\241\010<\020\035\017\2756\n\013\274\252\332E\274ib\235\273<9\262\274\251\336\002=T\353\324<\027@\224\273\254\005\363<\037\304\271\273\215\245z\273\252\223\314\274\226\003*\275\251\032\215:\312*?=6s\002\274)\327\224\274\266\225\263<\376m\340<\226\036m=\260\327\000\275\003\323W=\256\'7\273|\241\206<x*\007;\004s\236<Q\326\214<y\241\300<\250\230\366<\307\201b\275;\251\023\275\277\224X\275}\246\360;\211R;\273\363\241\013=\204\006B\275t<\270=\371.\250;C\250\327\273\251\272\250\274\2418\000\273}\372\213=\374\'\273\274\247\3522\275\253\025i;\007Y$\274\327\254\036<`\221\226\272\323\233\230:~y.=\207\332\344<\356_\"\274\342B<=\341\237\353\273=-!=lS\267<t\206\312;J\030R<Y4<=.\352\237<?T9\275\024\3657<K\006=<\376\342|\274\014\377\203<\200\340\276\273\242i\315\273\3564\302\274g\234\356<\347\032\003\275s\006>=\254M\214\274\241u\203<\335\023\035\275u\004\324;q5\314\274\211F\363\274\034>\r\275I\206W;\337;*\275\322\021\243\273\\\207[<\270\233\033\275\364\255a;k\000\252\273\375RC;\327Y\237\273)4\2159\202\353*\274\260\202\271\272\265$\270\274\360\321\037\274\245l\016=K&\364<\ri\213\274mT\'\275\322\362/:\276\021\202=W\370\027<(W\255<\246\014\013\274\320\347D\272\265\321\000\274\250\243\276\273\004\365\303<\330\204\245\273\362\350\326\274\031q?\275\303P2=~;\327\274g\336\206\270\263\321\036\275\335o\'\274V\341\001\276\374\036;\274XI\203\275\373\033\003<\222)V\274D\033q\274f\031\312;y\313\367<\017.\377\274\247\240\272\273\220\206\342\273\343\261\027\275>\221\227\274\221\230\335<\230\025\331;\265t8\274\326\362^9\222`\247<\302\220n\273\237\342\351<\266f\316<[\"\257\273E\326\177\274\t*\021\275f?\211=\356\354!\275\033wD=f\211\347\274J\276*\274_\325\367\274\312RF\274\331%*\275\330\373\361;e\203\3728+\310W\274.\221\376\272\271Q\327\273\245\346K<~ \344\274m\344\024=-\270\005\273\004j\300\274\333$.\273G\274\013\275\201\360\202<\250|\211\273k\364F\274\202\230\201\272R\322@<\270B)\274\010)K\275`\204\002<\214\026)\275<\235\266\274\345z\345\272\'\034`\274cH\027\273\036\300\257\274.\317\202\275\177[M<\300LV;\033 a\274\324\220L<\331\361\241\273*\363\246==\247\323<g\363\272;\277\027\000\275\005\213\322\274\367\032\006\274\233)J\274\027e\302<~\251\001\275}\026\357\273\375/\022\274\025\216\210\275\275\210W\274\253<\235\274\\\242\230<\333\314r<C\377\220;\240}i\274\326\231\224<z^\n\275_\345*\274>\300E\271\351n\255\274D\232\036\275\007\251\210\274\227}\007\275\352F\223;T$\365<,\214\313<O\3635=\227\347\315\273\202\222\237\273_#N=\210\221\213\274U\n`\274\022r\315<\252\326\001\275X\320G<>FA<\372\362W=$\332\266\274, \'\275\026\357\322;\230\003\"=\302\313\277\274\273\356\350\273\001\216\257<\372|\211=\312\257\242\274\344b_<\026m\361<)\356\010\274\253\366\301\274C\347\304\273C>\222\2737\310\331\273pDu;]\326\272\274\256U\241<oG\002=\222\262\315\272\324\341\023==\226\032=\241A\3339\201\321\025\274n\2564<Q\010\007;h\216L<\312aE\273Q\375\274<5\257\035\275(\213s\274y\354\263<.h*\275\212\350\311;B\025\303\274\244\250+<\246TP<\206\223\370<\306\324\000=\240<\306<\205!`\274\265\177\301\2733\217\205\273\214\264~<\017\374\026\272\036\245t\273|\355;=Rq[=K\032\302<\206\005*\271\025nR<\355\357\034<RV\201\275u\2605<\321\263\203\274\265\212u<\\\264%<H\310\r\275_\262\313;\344W\224\274\257\3328\275\227ak<\364\243\025<\272\2641<\242x>\275nTr\274\303\3536\275\221h\261\273j\3516<\231l\311;\332E\036;\033\016-\275_\375e<G.\312\274\245p%\274\017\331\354;T\227@<\257i\333\274\310\241=\274\252\315B<\352\203\210<D\345\246\274\362!\201<\244\3636\275`\272\347\273\214E\301\274\016\302\272<\366\325\262\275h\374h\275\276\305\2118\265\007\374\273\336w\242\274*zX\275OC!<\261\005\265\274\021\375\327\273J\177\005=\377\013(\273L3e=\306\255k\27501\217\273\"\225\036\275y\223m\275\300pD\274\333\231\255\274\r\251\206\274\267\217\356<\023\"t\274\006\364n\272\341\211&;\312\034*:(\021\267<nKR8\302E\255;\246\213\024=\236\021\242\273\0060\227\274\366[7<\031\330!\274\356\007(\274\021\344f=\025\235\221\275\311\327\254<\026\231\300;&\366\347<5X\377<M1\222\272\232\233$\275\242\017&\274\020\277\032<\315\346\306\274\004}\215;\377\306y\274\305g\317\274\r\217\360<\3148^\274\304\277\323\273C|\277=\222\305\207;\240\002\007\274\037R\271\274*+\330<\034\367^<\253\357\226\272\323\222\'\274l\214\026=\013j\200<\357\331j<\345\t\257<)\'\"<%p;\273X\343;=W\327\236<\023\250\006\274\337\275f=\237\254\326<H]\367\274\361\'\273<\3132\224;\177>g\274o\371\031\274\246\315\375\2721c\001=*\223\024\275\006\203\367:[9`\274K$\233\2756:\216<\3307\034=\212\035P\273ava<\035\372\001=\000\351\270<\352\003J;\'7.\273\356\216$<=\212R<\200\245~\273\243\006m<\001\337!\2726\217\017\274g\023\226;R\031\205\274\330\323\213<!Y3<\255\366\216\273^\324v\274\211{x<\n\313b\274\001F/\274\336?E<\260,\245\274\370}\372\274k\017B<\312\237y=<\223\214\275u\242\021:H7\317\272w\216Q=D\262!;\265VX\275\037,n=\324xw\275\376\021\324\274oO7\274\234\357\244\274\227\314\311<iH\243\274\254\323n<\265\277I\275\340\347\204<?\274\021\274\275:)\274\032\013\220<O\314\226\272[\353P;\230\214Q\274\265\365&\274\225\3236<\303\\\013\274}\024\311<\367\347}\274m\226Q\273\222\364\206=\n(R=\241B9<\300\037\373\273\023\277\305\272\220\022m=\301\250f\274Q\215\251<\375\210\251\273\343\365f;51\010=\245\210\177\275s\014\361\274\0275X\275(\345\215;a\357-=\0079:\273\370\213\227\273;a\232\274\020\3058=Cl\213\274\007\241>\275\361%\000\275w\214\\\275I\335\343<\005Le=D\364e\274\232\215\r\275\367\364\024=\337\305\251\274\232\303\313<\023\001\206\275\310\372\307\274\220\273.<=X\242;\231\026\273<\306:*\275\334\224\240\274\213\267\017\274\363f&\274y\374\021\275\371\220\'=\000+\260\274\034\014\346\274{\277%\275\374\006A\274\316\246\033\274L\326\033=\301\032\321=+Z\200;~\201\\<\213|\231<\360\207\235;\330\276N\275B\255\365;\370 $\274\262\346\256<\220\203\357\271\245\"\276<\370\365\001;\337~7\275\266<\240;\203\340\202\275\365h\231\270.\301V;\370\352\3338\250m\343\273|F\360:Ph\027=\272\2322=vE\323<Z\346\177=\344q\326\273lG}<\244\277G<\320W\207;\"\2579\272*\334\t\275k\263P=\002\363R\274-l\271\274\253$\020<\362\307\266<1k\\\273\230a\025;e\\\374<\013-\212<b\006#=\242\315\227;\322\275@\275\240\205\267\274h<\213\274\327\374)\2757\257\212<A\376(\274\312\020\325\274\267\203\325;>\241\255\274\223\r4\274\275y\"\275\272\025\"=\334E\264\2749\243 \275\310$7\275\003f\223\274\375\234\322<y\3223=\337w\365<\372\253\224\274`x\354;\237\347\353<t\324\215<\375\347\213<\007H\242<7N\254<?\2523<\261F\242<\354\204\322\274Y\255\221<\276W\244;\247\3124;\300\376\257<\335\264\207<\037\315\234\274i\271v<\336\242\260<\246\320\262\274x\002@\275J$\005\275?\371\000\272D\376~<\023\233\362<\320\t$\275\334\227\"\275\014\010;<\352\263\225\274<#\356\273l\374\241<S\343\262\274\265\356T\275r\251\213;\221\312\223\272,,\276:\260&\212\274<\203\021\274\2768{\274\220C\376<x\324\243:O\342d\274\361\206\022=\003D\035=9v_<\033\300\372;T@\301\274Mx\261\274\257\355\317<\361\030)\275\304g/<\210\200\235<\277\206\177\274\262\260Q<T\264\231<\002\264\332\274\301\002A\275\327E =F\010\330<\026\362\200;\242\251\261:%\347Z\274\245\314k;\236 \r\275hu\006<\026\210>\274B8\210\274\213q\223\274\"\275 \275\231\311\242\274\366=l\2745\307\n\275\376\014\210\274\217\265p\274\224\257&:\032\330\324\274U>i\274\370\2325=\206N=<\340\226n\274\005\2524=\022\354\320<\323\341{\274\177\001+=a\226\334\274fj\020\275jB/=\265\033\005<M\367\210\273\235\"\000=\366\210+\274\374\327\312\274\266\232\247\274\025\267\211\2743G\326<\347\246\"\274\274\245\241\274O\n\026=v>Q\273\'`\345\2741\300\221\275\262!\357:\220\013B:\333Q\257\274\257\356\001=\014\366t\274*\\\370\274\032\034\257;\216l|\274S&\243\274\356/S<.\314\016<\007\261\341\273\025K\270<\260\232\206\272\341\304\256\272\234\250\226\274S\027\202\273\350_\002=qT\363\273\265\003\240<\"\024f<\226p\231\274\306\0347\274U\360\017\273\310\343\344<j\233\017;5\355\363\274\014\337\222<&U3<\305l\010\274\346E\236\273\277R\005\275A\245\275\274Qn\232\272\027\002\266\274\217\256\364\272\035Z\2349\323e\026=?\275\000=\217\352\241;{\304\202<\203\030:\274\337D\232<5\0029=\007`\276<\272\266I;\317\220\242;\327\3374<\374\315n\273\223D\006\275\031-0\274\317\017\227\2742\2240<I\225y=\235e\206\274\232Z\330;\027\372\020<W\262\t\273\t\324F<~d4=\325\204):\360\350\006\275\214t\305;\223?\351\274\234*\001\274\272\003\335\272\305\370\350;U\261\333;\t`\267:E\025h\274\232\256H<\023\201\373<\363\240O;\336\204\217;B\t`\274\031\246\004:\000\316\023\2743\274\001=\267\323\206<kZA\275\260\226*\275\034\307\214\272CS1;\355\036\277<\242\177\200\275$\035\211<\001`k= \234\252\274\005\333N\2745\021\310<\223\206\014=\356\243\264\274\256\337^\274\002{\321\274gE\317\272O\372*\275L\013C\274\357I\323<x\320-\274\304oy;9\341\273\271^\217\353\274\016\023\237\274\336\326\251\273\347;\002<\345\033\207\274[\004X\274\262\324c\275\226\321Z<\263\010*\275s\360\266\274\205%\322<\202g\210<\227\031b\275\"@\211\274\177\rK<4=\000\274L\"\277\273\322\314\007\274s\004;\275g2\252\2742\021\263<\236\326\204<\2078&;{c\226\274\216a \274|\270\035;\3421\014\275\360)&<=*\006=I\202r=\036am\273\335\376\372<\231\031\374<\324\377$\274+\363\365<B\337\370<\337Y\005\275xx\246\273>OO;\211;\342\274(\242)\275\0225\225<\200\212\231<\220T\'\274.yK\274\0264]\2755\252\327\274\316\321\035\275\312b\2159\207-\217<)5\377;\220\377\223\274\267\204)<\357\211g\275\275\034\030\274\034\252\002=5\336\230<Pq\212;\275\3529=\266\257\232\274\346\2726<Qh\246<\021\254\307\274\270\n\274\274u\344\273\274*\271\005\275\360\263\010;}\035*\274A\357\273\274f\246\r\275\023\022\265<qeB\2746\022\327\274\265\236\006<\027#\202<c\267H\273\004\327\025\274[*\253;\221\023<<\2106\277\274B\006g\274\220\317\301\273L\250\217\273U\365\034<\007M)\275O\333)<\325b\227:,\371\361;lj\224<\t\000\017\274\037\227\276:\2544\213<\022\024\326;\244=\323\274T\033\017=\340^\003\274\223\013\000=\236\354\306\274O\266U\2742\333]\274\024\350\206\274s\303(\275\206\027\300<.-\033\274\035r\263\274\333[\022\275at\354;L%,<\224[e\271\35281;\025s?<p\004\000<R]Q<\212\330\334;\215\314\224\274\311\320\010<\\\234\265\272\t\243e\275\032\343\224\272\"\031\3449\016V\231\274\366jF\274K3\017\275\317WX<\013g\023<\317\315r<\206\372Z=F&5<\302\222\222\274\004[\036\274\316\177\201<x\277\242<\"5>=i\366Q;\003\307\260\275(\272\314\274\247x\033=\201\217\342<\376\221%\275\003As<\245\371\026\274\367J\201\275\266\346y;\335\005\252<\325\204-=\214\245 <\272H\314\274\024\372\202\270B\350\363<C\037\272<\300\027\3149\031\2233<V\360S\275\320\375\276\273\"m0\275j.\203<\240\315\021\273\232\212A\274\343\204\216\272Pc\247;d\2469\274G\353\365<n+\355\272\177H=\275\034\037D\273\"\324U\274x\325\000\272\020\3435\275Y\004\364\271y\353\323<BG\362\273y\233\357\274\363\355\213\274\356\367\265\274\255\250D\275H\322\305<\3172\363\273\340\227I\275\232:\353\273\221\357\230<\211%\\\274L\025\200\275[\257A\273\216\031@=.a\301\273\310I\212\274/\0244\275J\207?;\023\322\262\274\343v \275t\3234=\211\001\355;\t\205q\273\030\006o=3\244\200<\320\211\335;uJ\212=\303=\007=V\311k;\013$Y\274m1\023=o\010\315<\310X\017\274\377\324\023\275b\204A<\207:\342;\317;G=k\301\224<\231\373\261\274E-\364;s\330\342\274\200\267#<e\270\035\275\\$\272:3\n\213;\0146\203\273\223\235\373<M\277\333\274\273m\272<xb\023\275k\221\367\274sBt<t\035:<S\356\032\274\352\037\224=[V\202<\014/\341;)\272\326<\003\230S\273iP\250\273\003\217\222\273\037=\214<1\3757\274\322D\205\274\301\000\327\273\237hG=Y\226L\274\215\224\223;\303\016G=\304\250W=\263H\240<\310\243U\2746\333\206\274\220\032\222\274\230G\223\274%\245\346<`DU<\234an=\222q%<\372\230\014\273{5~<\306\371\250\274C\373\266\274H?\361\274\366+W\274HA\013\274\035\025[\273Y\261\351\272\212\322\001\274j|Q=\022OR\274UAv\274\313\203m<\311\246\004\274\201\264\030\274\362\216\352\273\277\314\257<\0268\322;bg=<\221\360i\272\365\305\341\273\325\216\252<\337\177\257\274\2228x\273,{\212\275\220l\035\274\rh\331<\336\207i\271x^&\274\354#\207;\002Ka<~\016\335<\322z~\273\273\200m<\332\330K<\327\323(\275\351\322%=\034~C=\277\343\321:o\305\227<\374\277\342\274\346\261`=\372kY\274I\0253:\000\2026=\215\177\022\275\020y\352\274\331\0075;\365\366_\273\306\263\252\274\202A}<`\333\275:\2704E<-N\023\274N.\002=\177*+\2744\232J=\030\330\251<+\207=\275s\004\251<\361\254\362\274\005JL\274\350\343\"\274!\013\267;\343\232L\274\250\252,<\0346\231<\342\024\211\274\370V\334<\022uZ\274\357\304\350<+\324\341<\320h\221\273\316\334\233\273\247K\210<\271\300\033<,j\350\273/\260D=tG0\275\363)\237<\007T\275<\232\317\216\274\343\2573\274\037\'\305\274U\275\003\275\251\344\220<\222\370\017<\212lL<N\261\033\274=\314\212\274\01464\273\341\254=<\r\035\237\274\376\302l\274rn\345\274\201b?<\230\205@\273\243<\261<\014\361\003<\205\0303<l\330\216\271N<\326\273\264<\r=\272\361\233<\2404K<\341\320\006=\222\325\226\273\227\005\017\274\244X\223\274P\270/\274Q\037\217\274\013\346\t\274\371 \325;\177<\024=T\234\255<\034\232\216\273\337\221\344\274\267\2006=\014\356}\274\035\304\r\274\021\236R<\3550\363\273\313\325t\274\013\034\366\274\354\261\255\274\317\306<;\204\304\223\274Z\304\036\273b\337\n<\314\'\311\274%\201\265\273.\212\034\273-\326\013\275\030\213v;Q\202\007=\354\211\207;\r\342\202\273q\252E\274\352^\'\274\247&7<u\373\001\275\356\242\207\273W\223O=\302\342\327\274MV+\275\234;\373;\030\362\014\275\337\277O<0x!\275f\312\207\274\007B\365\274g(\265<\266$W\275\247\262\234\273\357\327\033\274\350l\304\274\352J\034\275c\213\255\274\031\213\352<JE\006=\002\334\300\272\241\257\361\273\254\211\277;\217\262\332\274\345\245\036\275SZ\003<\311\343\337\274\000\221\210\274Zj\033\274\324\013\215\274\362\327\321<Z\364\307<\264\253\006\274R\231%\275n\357\362;y\275\327;+\006\342<\301\005B<\016\350q<\316s\001\275z3\251\274\242Y\200\274\326\367\006<\365\256\037\274\345\325\r\275f\315\323\274\222\271\305\274n\231`\274\331N\247\274\304QO<\240{\320<\341\200\210\274\2147\336\274\277ek\275xu4\274Xg\317\274o\226\202\274\255\037\214<9\215\204\273\271%\030\274\326\220\327;\341I\005\275P\014\002=0\003\362<\035\215\212\275\315\311\277<_0\211=/T\212\275)B+\273<Q\007\275 #\231<\323z\024=m\215\020<v\326T8Nf\325<\330\370\036=\362\316\003<|\357\'\273\343\036\023\274\300V\211\272\3756x\275\357\303\307<\213\275\000\274\352\"\240<\270A\377\273\205\336\273\274\320\013\035\275/R[=:\021\350\274\262y\255;\350\006M<\177\246}\2748$\024\275\014}*\274\361)\023\274]\r\214=\212\rY\275au\003=\352\210\204<\203S\214\274\215l8<\004\213\032<OW\300<\230>\200\274\001\324\032;E#\216<.\244\275;\371\245M\274\215\243N<\275)6\274E1O=\\\021\035<\202~\261\267C\340\360<D)\276<\216!`\273m:p\275\336\334+\274\025X\255<,\264\3308 \316\266\272Ik\323\273\204AZ\274\004\365v\274)X\217\274\347\014\227\275\325j.\275\227\314q\274I\210\037\275\251\020\030\275\375K\216\271\202\324\246;\n\214\272<i\335`\2753\327q\274\002\273\t\274\006\371\320\274\3652\\:\3106\006\272\360\262\242<\376\321\316\267\326\347\262\274\337\336;\274Y\356\323<\210\335\363\274Q\361h\274\\\265)\273\366k\023\274\362\016\340\274\264\363\261\273\014\374+\275\001\247\200<\271\016m:\nXe\274\247\333R\273\"\0253\274EN\037=\206\263\023<B\331\363<l\037\203<f 4\274$r\371<A\257\024\274k\000\307\274\020\343\213<\004y$\274\322\177(<\243\261?\274~\236u\275\370\226\206\275\324\030\350\274\214\203\021<\353\035\221\274r\221\250\272\304\007\370<\014*\264\273\337@h;r;y<x\274\250\273\353\010Z\274\2538\004\274\345D\315;\315\222\224;\325\3572\274d\225\213<\036\334\340\274_\317~\275{g\006\275\331:\370;\016\355D\275D,\332<\355\014\324\274\333\203\272<\020\256#\275\177\304\037\275\305V`\274p\234 \273\332\313\005\275\333\305|\272\3464\325:t\304\363\272\242b~=\317\322\024<,\247\255:\350\200\262\273\316\213\007\275T\001Y\274Z\315\027\2748\203\215=\245r\367:\213+\304<\232\177\215<\023\242\260<z\225\244\274_\014(\275C\\A=\254C\252:u:\302\274\343\247\024\275i\351\231\274\344[\220\273\247\347j=\311]\327:\225\350a\274\034@\207\274\357\226y\275\'\302\363\272<\005\207;\356\244!<\254\375};\n;s:\361\256\242\274\033\330 \273\270G\213;\362\371\363<\033\245d\273q\334#\274y\364g\274\240\351\206\274\221\245\347:iG~<\314\205\377\274\232y\005=xC\260<8D]<\356\2711\274I\322.<HH\007\275\224ik88%O\274\032\255<\275V1\366\273\327\345\252<\332\372\273\275\362\313\020=r\003\324\274\230IX\274\247\227H\274\001\025\216\274O\240\321<\344\222n=\254\266\201<%,\306<\177\036\002\275\025N,=X-\320<\004\302Z\275o\324\310<\351~\010=\373\217\201;\275\202\352<y\200\353;o,\247<\357\214\376\274f;\345\273s\277\342<\227\201+<J\r\214;~W:\274\334z<<\330\354\302\274\203r\324\273D\020U:\212[\276\274\206\333N\274q\203\027\275y:\n\274V8\342:\021F\205\273-3i\273\003\327\212<\211\006\372<zu\202<vU\233<w\333(\274m\320\350;\303\262\313\274m\305\020=\204\253\n=oX\230<\200\373.\274\326\265N\274\371\000\250\272\313\000\005\275\343\032;\272\272fE<\252\325\026\275Q\277A\271U^\323\274D\345\312<|s8<\005\375Z=\274\240\221<v\230\215<\024F\014=vA =\221\233\260\273C\366U=\241\030\327<ny\220<:\021\316\274F\010\n\275\261_\223<\320]\244<\232\250\225<\317\021\256;0QC<^AJ<\325b\357\274\265\372\250;O\010\032\275\210\330\'=\206t\316<\375\003\345;M\303\340<2\201\003<\014\377\345\274\317y\340<$A\002=\254<\234\274U\303\353<06H\274\325\023o\274S;n\274\016n\230\274\334z\2319rq\t\275]\276\271\2740q\031=\224\035\006<\025]9\275*\262\256\273\244K\350<\313\016\032\275v\217\211\274{\364>\273\300\372\322;.\306\2269\024\334\212\2749\'\232\274\211\352E\274r]\234\273\317\245\265<\203\327\017\274\\7\022=D\264\007<\245\335\010=\264X\361\274\214,\355;\264j\013<?S\013\275\225i|<\256\241\304\274\370c\020<\230\255z<Z\370\252:6\3349;\232\217\270<g\2311\275i\375\327\274\335g\300\273\302\207\321\274\215\027\023\275zT\274\273\2673\031\274\372\253\211;\255u\325\274\t\220\313\274MY\243\274\212\235$<?\304\r\274\355\351u=\226\361@\274\241Ju\274-\277*=\276,\235<\306\337\235\275Z\310|<\250\261\016=3U\310<D8\317\274I\207==\276\223\372<\343\230S\273\216\004\003\275j\304\316\274\313\373\323\274\315\222\026\274D\225.=\211\336H\2732;6<u\232\320;\302\247i\273\365\351n<]\377\374\274\224\236\340;s\362K=\377s\024\275\034\377\271\274R\352\251\273\276\223J\2738\374\000=\317\377\024\274\330\255~\274Vn!\275\252hZ\275\002\355*\274B\220\257\274fBg<\3350\026;\262V\312<\376\307\212\274W\317\235\274v\244)\274\031+\035<\013\370\235\274\261\201I\274\2722\035;k\0268;\256A\014\275\266\230\353\273\262,7\273R\021\310<\311\216\025\274\353\nn\273\225\252\244\275p\261\220<\260\357\355\274a\312\026=,^J\273D\363\244\274\240\222\037=\366\377!=\263\260\357\2748\356\327<\r\000R<e)\304<\322\024\321\274E\t\t<\215\214k\273,\003\367<\002\254\363\274\340\226~\272\303\235\222<t\344\252\273u\001~\274\022\371e\275\370\226X<\302r\300\273\3114\213;\214\333\266<pk\236\274\314\031\214\274\010L\266<\304\217\253\274T\361\211<\263\303\016\275,\231\016;\260\005$\275\n\304G\275Zt\304<\177!\346;{@\210<\354}#\275\357\211\377<\357\325\341:\341\211\226\2734\206/\274EQ\313\273\177&\025;@$\367;\301\374A<A\210o\272\356\036z\274\2653\376\273\030\351\332;\321\r\331<\036\243,\275\272\212:<\256\244*=\004wQ\274\304\323\037\274\\O\242\274\361\217\213\274\311)9\274%m\254\274zM\341;\361\027%\274\214\037\264\273\213\330\320\273*c\033\275k\211\221;[9|=\230\210\000=}\020U\274X\225_=\373\020\240\274\362\310\210<\026\025\261\274\371E\036\274\300\202\235\274y\223\204;\376D\250:r\225\025\275\3701\222\275\"H\347\274\014\030\210\275\326\037\023\275oI\221\274\3225\355<;fY;=\360\272:Y\026\357;~g\006\272\336\324x;O\247\005\275\020q\240\273\376[\226\273w%\262\274\004\273V<\232\261P<!wu<\366\243\\;\361$\017\274\334\234V\2745\207\276:-\177\374<0\235\274\273Ez\006<\272m\241\274Pv\370\273\0046\237\274\250\037\264\273r\261\202\2759\'*\274\256I\3339\245\317V<\037\004\n=\317`\007\275;\310\225\2746u\266\274;\204<<\265\344\344;\275\326\242<\3058k<\216\254 \275T\033\201;3\363\362\2746\2075;\"\207\036;4B\253\274\361\327z\273\354\316\352\273#r\265<\347\2508\273b\241\245;\301\273\210\274\254\017\025\275`2\257\274>\340\200<7R#=\2560m<\2352R\275[7\022\275\237\017\350;;\230\311\274\0362 =]\201\004\275\301m\027=\311\277\234\273!\256U<-\350\000=\013%L\275Ih:<\310\222\237\274\242\366\220;\320\201\360<\372\372-=\306E\'<0D9=\204S}\274n[5=o\253\024\274D\326\002;\360\234\243;\335\335\221<\241\257\252<\312#h\272\023\206\020\274qn\370\274\013\367\251<\215\371u<\362\320\214\274\024\261\361\274\376\222\263\274\262\225\321:\"o\001\275\365,\236=h\343\245\274\327\237\026\275\366s\222<\254{\363\274J~\364<M+\241\275il\314\274\336Q-\274\355\203\020<\215\205\037;\303\001\356\274\371\245\365;aL\022;\234s\375<\211N\364;l\273\222\273lD^\274\204D\004\275\004<\323\273\307r\220\2758u\235\274\224\276\t=\305\375J\275\372v\327\273;*\206\273\323\217\223\274\270c\306<\355y\020=V\250\222\273\222T\020\272O\317\257\275gD0\274\200a\000\275h\320\332\274\000\307\271<\313\013C:\204\263\256<\330\255\373\273v\025\347\273<U\004=v\246\240<\327\352\013\274\263 U\273\206\230]\275tRC\274\261\3523=\223\003\177=V\243)<k1L=`\262\224<\254\225(\274\021![\273\243\255\213\274\330b\337<\370\021\231<\363\003\302\274\334\031\000=:\357\264;u\0216\273\026\\U<\340Q\316\274\312\226\236\273\374N@\274y\323\236=\016V*\275\005\343\030\274\375\310\241\273\026c\t\275_\016|=\\\207\373;\221\277\004\275w4\265\274\023\206C\274\025d-\275\316\232\355\274\347\341\265=\247\336\027\274\255\020\001\275m\026\030<\213\351w;]n\206\2740\"\002\275\'\322\272:G\006V\275\266\005\016<j\t*\2752:\327\274\373\217:\275\374\227\032=hu\036\275\303\340C\274\307g\010<u\025\267\274\203\316\226\274\357\314D\275\251\247/\273\323\301\017<\254\332O\272\375\374\356<\225\375k;y\010@\275\207-(\273zI~\272^\006\336<]r\270\274v\2225=*\256\022=u\037\213\274\035S\275\273\354\216/\275Ue\006\274\037\247\335\272\301\\\001\275\311\211(<\3119\342;\347\323K\274\274\262f\275\000D\035;(\345\371;\240M\211\275a\253j<ry|:\366Z\247<,\214\223\274\020a\";\231\024S<W\3240\275\325\260p\274&\010M<*I\335;:\346\037\2745\326\361\274\211Z\276\2726n_\275\325\336\017<\024\337>\275\376\341k<\317\342\376;\322\377o<l\343\035\274\306\330\374\273k\317Q\275|\255\302\274\245LZ\274\335\2502\274\271\313Q;\215\205\261\273_\354\006=\277\347\202=\321(L<]:\272<0\216|<\231\331~;Sk6\273\021\210\303\274\254j\201<!\t\023<\251\307\266\274J_\274\273y8\003\274\232 \246<\002\273Z<\323\226\304\274K\023\244;\236\221-=k\373\323<\022V7;\357t3;e[\034;\033B\301\274\020\257\351\274N/\367<4\315V=\220G\017\275\260\007\223<\301ba<k\257b</1\261<\262\217\007\274\322\335\343<>\375\303\273s\311G\274\324C\331;\210KQ\273\335\3447\273\356\243J<33\247\274%d\244<tG\236<go\r=\304DE\275J.L\273\300\213\256;\222\300\313<\360\261\210<\256\243\004=\344\332\366\272Z\212\304\274]L\340\273\257}\307\272\256\214\t\275\212\337\033<M\337Q\274\234\304\212\274\317\221\242<\320\340\013\275)\357\213\274\200\377\210\274t\253J<\333P\373\273\312\225\010\274\344\223;=\350\033\354\274,\372\036\274\271\327\213<\362A7;qu\352\274\237F\210<\344;\202<-\332\036=\217s\205=k\275S\274\315\234\210\274B_a<\302\017\347\274\216\002A\273\316\216\323<VxZ9P\320\271\274-\243\006=\264\364n\275\324Q\256\274\262|\232\274ha\005\2748\255\006\272\270\363\320\273\354\224{<4 \274\273.\256k\274\251.\223\274b\220\r\275\3656<\274n\3574\273d=\231<d\014K<\207\264\360<\322Y\317\274\223\301L\275\336\3138;*\266\035\274L\220\257\274\231\2014\267\322\337\037\274+\217\371\2732>b\275\222\\j<\344fk\273\217\355]\274\260\215\223\272|9\342<\021\231\352;\331<\231\274Q\324\212<k\347P\275DeE\272\323;X\272Z\266I\275\321\231!<\256u\244;t\300\252\274\210X/\275z#\204\274/\363{:\273\td\274\2328\333;\004X%\272\000m\242<\246\350\216\274\332\345\014\275\355=A\274\363,\024\274L\241\253<p\223/\274W\312\026>s\366\360:\350j4\274\217\036/<uz8\274\217\231\323=\200V\240<\272\336{=\305\2472\274\031\275\017\275\260\305\372\271q}\314;\025\r\266<\242mJ=\177V\245<\302m#;\200\006\367;$\2265;\216\276\024=\323\024\021\2754\216\257<\344\370\t=\nq:<\216\270\004\275\214w\257\274\\^[<4O]<\271 \234\273x\035{<\320\010\356\272\310\336\347;y\307\207<\027\346\205=\205\0337\274\253_<<\276\026\375\274\321\n\005\273b\240\006\275.\022\022\275\226D\213\273_\021E; D.=[\344\202;Hq{=\033@\270<\265\335\216<\366\334\021\274\265`v\274\177d\212=\260FB\274\325\000#\275\020y[<\227z\242\274-\370_\274\314\324\262\272.\246\320\273\254*1=IP\334<a\361%\274\254B\211<K\243\321\274\352\375\240<\256}\026<\225\330\330;6\032\206<1j\333<\257F\034<}\236S\272\312\030\005\274*n\353:\253\233\266:A\357\277<\344H(;4\212\342\274\024\026\372;\260\377\n=\347\256c\274\020n@=\313\310+\273S\031I\273\222\204\275\274\337y\270<\2328O\273\r[\027<\233\356\214\274\215\035\351\273V8\310\274\233\355\323\274\350\207c<\331E\247\274UCq\273*Io\273\363\241\363;\031c\007\275\252\031}\272B\2667\2745\032H\274\357l\014\275j\027\256\274J\"C\274\3201+=\3302\257\274QZ,\275\360\005!<\323\307\227<\037z\204<e ?=\020x\207\274\213\271\"\273\266A\0349:2\317\273\320\211\023=e\306r=\303OI\274\340\256\004\275\261\261\r=\343\310\231\274\021\261\261<\3651\213\272\264{\361\273\242\027\024\275\243Xo<\336\264@\275u,C<FE\"\273\241\216c:\344\t[\2748\256\352<\340\334-\275F\026\222<l\\\357;!\352\307\274\003\027\367\274\022\\\357\274\260>b\274\035\261\217\272\224\261-\274\\==<\332Uu<\201\275\372;\206\312\307<{\307\037\273\354\227\246;\024\221l\274\302\272\214=\027\035\000\275\223\333:<\376iB\275\367\256\213;+\337-\275)]\263\273P\241\217\274(\260\313<\240\256\337\271T#\275\273\354\030\200<n\030\032=\327\225V<\273e\336\274\335\027\032;rZx;\312y\312\274\250\363\255<%\3532\273\262\002`\274\2458$\273g\353,\274\202\310\003<\362a\230<]$\345;?\006\"\275\307\304\336<\251\nd\275\01694;\204\032\266<\032\306Y\272\340\212\\\274\346Dr\274\244$\360\274SF\007\273\2777\355\273>-9<g\335f<\2040\276\273\030\010%=\275o\260;\215\277\362;\367 Y\275a\021\372\271\336 \001=q@l\274\274\022%<g\343Z\275\001\364\232\274\'G\233\274\302\341\231\274\334\033\335\274\346\315N\275$~\300<\177j\2119\315\026\250\273\217\366\203\274\211\254\246:8kW=P\244\245\274\"\200\234:!H\232\274&\370W\274\263wc<\312\002\225\274\353S\'\274\2751\231<\307\365\223<M\366\020<U\203E\274[\316\336\274)\253\373<\027t7\274c\307\346\274\375\376\221<\202\006\'\273\240\320_\274\025\245\034\274/\233C=F\366\330;d\334\245\273\330p\224\2733\004\016=U\022\263:NQ\000\2742$N<d\323\276<\306\267\344;\370Jv<\241w%;\2355\262\274\373v7\274\\\354n:\215\371\364:\301i\373<\250\230)\2756{2\274jF\000=/**=\235T7\274)\016S\273\333j\317\273\223\201\003\274e~\022\275\253\263\310;\034^\3248\244\2270=\246\013\234:\225\364B=\247J\013\270\036\024\351\273fE\357<\022\3742\2754\314\231;>Y\214=\005\322\006\275\362\264>=\274\225\t=n\020\261<c\016\017\273RX\271\274\374v\026\275Q\234\237<y\365C<=Q4\275\3609^<\233\340\177=\030j\022=Pw\260\271;\216\200<%h\311<C)\017\274X\214\207\275\217\347\032\274<1M\275;FB\274\251rJ\274\r\310\010\274\271I2\274\204H\037\275\034\217\357\274Q\350\217<I,\223\274\177\255\262<\033\323\233\274?\306\200\274\177\252\250\273\353-\272<\253]\364\274\220\006x\273i\037\303\275\2364\327\273A\276\001\273\224 \277<\021\n\207\273\'\264S=+\272\267\274\314\346\341\274\nP\313< \005:<\354\2666<\031z\375\274\325i<<nJ\r\275\235\200O\274\350\271\022\275[\007\274<N\302\325<\202\017\355\274/b\032\274\204\373\t\275\356\307\016\274J\315\372\274\250x,=\202\336K\274\317\021\307\2743\276\217<f4^\272)F\\=\226\022{;\005\2004\274\2239<\275m\016X\275#X\275\273V\347\343;\271\241\333:,\206\370\274s\276\"\273\261\202\026=w\233\275\274\237QQ\274\337\035\250\273\272XL\271q\352\257;R\252\215;[\364\003<z\277\375;Q\260\205<F\231\274;<\033\n\275\300\2436=d\225=\274\230\364\250\272\263\376#=9\2479\273\277X\"<\250\337k\273\334\t\345\274\276^S<\t\224p=\216\3348\275e\254\177<\252\376\272\274\000Bu\274\250y\214<\277\216g\273G;\244<\356\354\216=T\013\273;\313+\226=\234\267\200\272\010b\275<\337\234\253\274\375]\224< \206\323\274\363\334\022<\373t\016\273^=\271\273\024\301\213;\t\300j\274M\271\243\273\273\023\233\271\244\030\245=\222\202j;J\250.=\324U$<\220\225g\273\210\246#\273\020~\014=e\367\010\273\003\377G=\303J)\274C(\321;\325\371/\275\271\266\351\274t\014\231;c\222P<\337\247:<v\313\033=\"@\210\274\241\027\222<2\243i<\220\252\376;\372\255\030\275c\200\251;A\235\316;)\006\245\274\3539\300\2740Q,=$\366\032\274s\316\214\274\254\221\t<\241+\002\275\341\340\352\274\351r4\275\004\361\371;\311/D\275.e)<H\014H9\3219I\274J\016$<\022\022#\275\261h4<\014\3655<\257\n\340<\323\344\330<#\376;\273\347B\337\2747\265\003=\027\305.\275\346\336r\274\253g\006=M\331/\275J\000\307\274$\370M;\003k\030\274\277\246\277<\237JV:g[H<uRE<\252\312\024\273\006v\260\274G\254\317<nx\321\274\354\021\332;\202\234\037=\243\336\225<\337\034\323\274\304f\010=X\300\313<\351\261\223\274\253\312\264\275\334\235\256<n\200\254=\204\017\302<\030\255\204<\2361\n<u\342\202\275\317\374\224=\001\262\347\272\266\333(=\003\271i\274\230\374<\274\220\032j\274\037=\230\274\371~\017\275lc\'\275\247\nY\272\230\202\026=\n%\022\272\215Z\352\273o\366]<\274\277~\274y\006N9Y\t\214\274\254\354\\\274\303\032\377\274S\343\t<\336M\342<n=\352\273\277\256\032\274>\215@<\004=\247\274\260\">;\324\267A\275\2364\314\274D\233Z\274\356\027s\274\237%m\2759ml\2750\226\211\274\233\304\031\274w\3036\274#\244\013:w\026\251\273o;.=I\206j\274A\210\323<\320{\213<\007\322W<pL@=\364\342\022=\267m\304;\"TG\275I\216\201;s\030+;\'\006\007=Vs1\275\034i\315\273\313\327\334<h\310h<\353\007\003=\372\220\312<>q\033\275yS\317<\275\032\214\274\355\356\201<\205o\213\274\214\005\243;\252\001\231\272\027q\325;A)\365<\035K%=\227P\025=\270\212\365<\200q\r=o\236d=\322\366\314<h\034G<P\275f<\373\314j\2746SR=F\321\223\273\240Pr\274\330\002\201;\216\217\235<G\006\212\274\315\367\020\273\253\305\335\274J1\326\272Bi\252;\367\235n\275+21\275\030\333p\2733\370\206\273qA\370\274=D\237\274\234D\213\273W7\372\271\350\3768\275d7\014\275\234\261\354\273\344\221\212\274x%{=\327\t\220;\257\217\002\274{{\327\274N*\226;\307\003\304<\360x\312<Q\263.;\234\364\273:\021A\242<C \374<<\332\017=\353\340#\274C\354u<\006\001\327\272\374a\355<\210\341S<\026%\252\274{\232\245<\375\267\333;\326\326\352<\375Y\027<\272\264r\274\354\307*\274\336\264\315\273\025%H\274\371\267\026\275\035\302l\273\333\340\321\274\327\026\256<;We\273\200\231\205\273^\274\302\274\320\253c;8i\031\273\201P\250<\360\222i\274\311ex=\271\330\340\273\013\305\021\275\343\376\332\273\323\032\242<\204;?;\257\247\013<c\316\216;\256\017\263\274\325\014\';\024\277H;x\245\303\274\203X\206\274\374\207!\273\350\r\234\274\367+\002=#q\343\274J\325\311\2746\305\202<&\352\016\275\210[H<\355\313\222\274g?\344\273\306{\023:\205\252A<\245\261\304\274\211{b\275\265\243\215=k)\266<\000\235\343\273\372V\211\274\242\361,\273\263\024p<\021#+\274\252\010\'\274\314\350\223<\241d\341\272\321T\254<\200\220\330\274\000\036\020\275K\226\315\274\320\345!\275\275\360*\274 T\214\273\014D>\275\306ev\274Q\014\026\274k9\205\2742\250\326\274>\252\000=;]w<\216\223\235<\025\352\216<V/!<\222\252\242;.R*;\2070\002=\206\327Y\274\247L\026\275\304\234\211\272\316\334\t\275&\271\372; }G<\205\237\016\2747\354\233<\254\r\304<\374W\323\272\360\037\204<;\251\014\272Q\242\245\274\272F\355\274\225\312\217<\245\316k<\275#\210\274\305+\311<\360\370\227\274\"\271#;\031{\250<\340(\373<~m\326<\356\2753\272.f\013\274\2621 \272\361\013\244<\201\2274;\034\307l\274=U\025\274\241\317\342<SK\246<l\245\265\274#v\230=\217\320\020=\036\343\367;\367\2415\274\032b\022=\346*\343<\221\r\201\274@A\271\274\344\021\327<\231\214\231<\266qV\274\r\013\272\274\312\252\255\273\007\010)\274\242\320\206\273i\347\022;\301YY<\337\217\222;\026\200\214\274e2\013=\357\261\214;:\325\n=\220\263\r\274@\010\273<\367,\331<y\342\231;\246\256\3579(\332#=\002\234\037=\002\2621;I\330\033<\261|\250<\354\033\027;\333\327_<\023\237\030<X<s<\302\331\204<\004 \331\273f\346\331<!\347\263<]\025u<AP\321\273`\350\031\272b\304\237\273\242\206\211\275\234\230\326;`^d\274W=\020=7\361\236<mE\355\271Q3\336<\267U\364<2\001\213\274{\212\264;\272\360\321\273\371U\343\272\252]\010\274\302(p\273\314\372\031<\341\236M:6\263\307\274\227C\016\274x\214U<\264^\275\2746\217%<a1Z=$\032\001;Oq]=N{B:\231g\277\274\033\302\273\273\033}\271<\3479D\274\231\016|\274\235I\341\273\002\276\007\274\0176\234\274\026\\\216<\025H\014=\246\372\000<\225\347+<\231a \274\303\243\027\273V3\276<\364E\230\273\350\010\321\273\260N\217<\322\210\201\274bF\212\274\rp\026\273\210\363m\274`\267\303\274ls\201<\341\273\224<\'\201\n\275\232\342\013\275\222\222=\275\370*w<\3041\316<?-\002\2743[\010:+\211\320\274\353\204S:\261\354Q<9+\264\274\330\035\026\275F\206\007\2759\024p\274\331\316\034<\220\224\230\273\035D@\274Q\312V=\336\\:=c\234\313\273?T\237<\221\202\316\272\220(\362\272L4W=Z\233d\275\005\0343\274\007^\257;u;(\275\200C\007\275\271s\372\271\007-#<\257\363\022\273\332pG\275\350\tT\275\267R2<\206eb\273\030>\206<dL4<\211\036D\274^(\223\274\351\225\2048c\345A<\205\332\234\273\005\"\357;Z\3678;e\272\251;\225\373;=L\307*<\013c\031<\302s\001\275\203\334e\273\223\245C\274z\207d\274\2501\275\274\345fU:WZ\274;\n`\t\275\215\246\342\274\342D\307<\243K\206\274H\006`<\224K\325\274\343*\367< i\261\274#R\020\274\210e\301<\363\177?=s\004,\273\376f\340\274m6\302\2738\020O\274\345|\364\273\354\322,\274\006\001\372\273\005\353%\275E,\020=\303\341r=\272\217x;0a&\275\202\304\354;\002\035\216\274\377\203Q\275<\037U\274\270\342\334\273g\007\372<e\356D\274\245\020\010\274)\373G\275\307\0338<#\272P\274\271\206\021\274\367\030B<\335r\327\274E\372\200\273m\213\017<L\220\001<\332\310\007=\030\005\317\274aj\035=\2630B\273%}\323\273fxc;\221\354n\275\276\233\356\274\276=\247\274x\331(\2755i\370;5\317\034\274\207w4\274\254\307\n\274F_\267\274\304\230\237\273\251)4<](\035<\254\334x=\240\307C\274\344\245\306\274\211\210r\274z\270\242<\364\216u\273\223\036\004=M\356\366;\262\335M\275\251\335\341\274\362\307\027=qi\206=\343>\322\274R\323\263;v\2352\275\206\236m\2758\030c=\276\367\263<\226\304\217\2734\334\244\274\346\325\034\275s\3375\274a\357>=\206\270\212<v\361\323<\243\212\230\274Z\246\205;d@\2408\004\317\212\274\204\350\2318{\313\321\271\365\326\313\274j\363\254;X\207\177<\261\215\356<\027{\006=K\330I\273AJ\353\274B\206\372<\313*5\274\336\225\216<\312\345\245\274\014zY<\232H\223\274\360\006\252;1\230\025\273\334\t \275\205\207w\274\242T\217<\243\002\241=\244\214\215\273n]$\275\037\3744=\333\364\306;\\\376q\274Q\\9\2756\370\200\274\343;a<\3150\202;\016-\024\274\002%\273\2757\201\023=En\261\274L~\036\275\327\033\306\273d\224\205<\027b\222\274\334`\025=3\202\325<_\345*\275$\006\001;\352a\272<\002\'\223<G\036\260<.\206-\274\357%\205\274\t\324\317\274\267?\217\274\3027\270\274\323\314\320\273\301\343\006<c2\022=\315\033c\275\022\375*==/\256\274\235\027g\274Y\360\036<P \365;\251\205\014\275$1\267\274\347\367\226\2747\214)\275I\211\255;\nO\"\275\306V\276\274\033l\327<\010\007\261\2747\223)\274\306\250 =\267\236\207;\330\037\354;\025\3321<V\323\257\274kp\003\274\000\373\200<Z\260\"<\031\223\325;\234\344\337;?x\251\274\375w\037=\342\232\260<\370\177\006;t\322\310\274_\177\310<\271\202\t<3\267m\274\021\365\307<\265\303\005\275q\357\024\273\024!\321<\020D\224<\233\3669=dQ\261\270VgL<\240\3359\273!e\357<4\333f\274\233`F\275\\/\217\274\324\210o\274E\316(<S\000\302;\3378\010\274\025`%=\033\334\211<\271\033f\273\330\336\006\274O\266\366\274I\017\256\274\025\376\340<\361]\341<\rSD\274\335\204@<|\204 \274\nBl;\353qb<\242\277F\272\273\3523<\021\024\201\275?K+\274F\2201=\256n\021\273\235<\360\274!2$;\026\224\372\272j \331<\342\264\003=\373\365C=N\036\237\274cX/\275$4\010;\006]\321;\242\350(\275}J\"\273\377I\027\275_&\337\274\r\312v\274\371\265\000\273Th\214<\265\324H<N\024\245\274\"\t\310\272\273(\266;Y\350\251;\037\3640;\211?\372<q\014\247<\264\354\273=Vt\200<\033\t,\275\322\371\261\274~\371@\273\252-w\274k\261\235:\233o4\275k\301\335\274\363,\266\274\316\301\243\274\320p\222=\352\245l\274\244\323\025:\341+\037=9\240R\272\356\032\247;\255\177\251<[\024q\273m\014\245\273\017h)\274\216\036\260\274\331\346\261<\036\273\277\273\2314H\274\247\266\327\274\263S5<\341\023\311<|\377\360\274G\020\320\274\247\262\017\275o\270\342\274\352\023\321:\230\007K\274\261#\001=\274!\205\274\243\037\357\274\204\250\276<$D\324\274\210\211P<\262\314r\274\020\256\363\273G\207O;\002\322\234<\353\254`\273v\002\236\2748\3647\274U\244\355\274\243\224\'<B\207\332\273Fls;\2736\006\273%\022^<\242\300\\\274\313\263\240\273\241\345I\274y\371\266\274{1\220\274\370\026\330\274!z\323\272\341\255\217<O\201P=\254HA\274\206\336\267\274\301S!=,\222\361\274]SV\274\330/\206\273j\257\363\274\"\333\010<$\245;;\325\352\233<\203\022\251<\006\005:\274Cm-<\222\374\342<\245\271\375;\241\233\353\273\337\323t\274K\032\212\274\t\030V\273O\314\005<h\277\254\274\320\241V;\311!J\274s\307\254\274\313\320\220<[bQ\274\245g\366\274\267\376[<(\203P\273\251\347Z\275\252\262\264\273\026\362\035\275gi\335;\312W,\275\t\332\227;9\341U\275FD\353<\004\203S\275\002\361\335\274~\'C\275Q\241\222\273\227\013\261\274K\221\271\274zFL=EU\202</f.=\222\315\212\274*Iq\274\253\3063\275\310\017S\274k\031P\274\323\212\333\274\027\005`\274-\310W\274gP/\273\013\205\322<\370J\202\272\006H\032<\247\216\313\274\2578\225;\005\027\233;\267\3427\273\3215\302\274\356\354\315\272\003\254\203\274A\'3=j$\267\275\213\322\325\2740/\363\274B\345m;Q\033K\274\320B\034\275\275L\314\274\327g@\275\354&\014\274\360\215\212<\363+\007\275^4\212\273\371U\r\275Kl\257<\271x\354\274E\267\016\2751N\261<\325!\231\274\t\320]\274Qt\214<t\317\245\274\255\273\021=\276\000\033<3\235\202\275\245\363f=o\030U=\214\214\263\274\315\254\374\273\01027\275\334\321\034<\3664\275;\266\240p\274\207a\001\274\250\330\355<S\362R=4\'\347;\256\334\257\274\242|\271\273\352\226\331<b\216\000\275\255\364\035;\004\303\254\274\003\252q<\177\021\225<$\310,<\316L\"\274\020R\217<\224k\271<e\376\371\274\nq\333:v\242\356\273v0\230\275\030\356\333<\335\372\254\274bh\243\274U\235J\275\227#\376<D=\260<\251\233x\274\347\353\232<A\212*\275%\251\224;X\224?\274\2235\271<\351\2650\274g\311u\274\326\tg\274h\365S=W\024\013\275\027l\007=\353\222k<>\017\221\274UH\306\273\245\352p<\032\024\226<\004\277\274;gxT\275\267%$<\204\031\032<\251\"u<#\335v\273\006WA<;q>\275\027\026\337\273\217\263\031\275\337\307+\275ogl:\336\0203;\313\232\367;}\r\036\274-\211\217\273\271\245u=\021\262c\275*\372\344<\027\301\351\273SH\207\274\003\356\247\273)\252\253\272\033\361\323;=[,\274\240\337\246\274te\264<\226a\255<a;\277\273\225\213\216\2748\345\246\274\201\t\367\273\205Q<=,{\001\274}\001\007\275\343G\331:\031\246\333\274\031Tg\274\317[\310<\333\224b\274\356\223\375<q;\312;\235\201\303<\373\360\"<\3666\201\274\225\305\005\274\035\016\005\275x\021\025\273\020\212\250\273\321\326\311\274\252\275\t=7\215\342\274\035\373\244<\270Gy\275\016\366Z\274\371G\252;\272\371\r\275\022\262\214\273\275\010!=\215\037!;\260iX\274\203\326\237\272.>u\273\377CC\274\320\216\273:\002\315\235<\322\322\200\274\261\256n<\321\273\032=\223\322\'\275\371\326\242\274\000\214\270\273BI\246<\325\256\235\274\221\2008\274ep\266\274\022\005\013=;\357\276\273\231s\322\274\254\360\236:\0336\361<@\227\t\275\\\330\031\274!\347\334;\265L\017\275\212~\016=\315%L\275\017s\317<\024\307#<\363\364\317\274\374J0=\362\271\002\275\333\n\334<{\235o=6\273%\274F\231(<\261\345\037=\206\232\250\274N(\226\274>\254\027=\0060\n\275TM)\2745\006\031\275{\356\023\275\371K&;\"\220\377:\205U\240\272o?\320\273\212\321\315\274%!\203;\307*\311\274\211\323\242\273\315\307{\273\260;R<H\200\006<\265\272\274;\321]\001\275\007]\317<\217\300\016=-6a<eZ4\275\243\013\226<_\250\020\275\366,@<v\035;\272L`\027\275\230\332\033\274\333\223\331<\204}\200< \001.\275\361\213A\273\206\367\245\274\240Pb<\331\207\027\274\013\366\277\274\361*\007\275u\203x<\363\217\\\275\215~\337<*+\342:\247\037X\274\370,\242\274e\377\267\274!!W<\217\030c=k\335\306\273a\020\016\272v{\346\274\377\370 \273%\272\010=\201\027w\275@Wi=\214\002-=\021c}<\360\214\037\273+^\263;KN\035=\3527\\\274\231\027E=}4\024=\334\260\356:\350u(\274\334~\266\273\341\342;\273\331v\336\274\021\020T<H\334f\274\020\377\252\273\031\342\353\274\220\0318\274,\234\034\274\317N\240\274Z\177\234<\2161\322\274\3237\335;U\373\245\273&\225\376;\254\177\334\273\023E\300\274\231\351\314\274:\305\312<u\305\030=R\351\372<\370.\266<q\230\320<z\274\366\274\0370\213\273A\303}\275\241\033\275<\352\030L\275ayj\274@b\245;\030\230\365\274\214\275\n=a\010\374<2\032\307<\235\344V<E\036\220:\216\346\267\271U\".<w\214\323:\334A\0179O\017h\272H\234\241<\277I\n\274e\350\010\275\266p \275\306%\303<\321\330D=:*\332\274\377\335|;\213\243\027=\2365[;\023\233\010=\232\326L<\021\205\223:\353\027\315<H\267Z\274G\354\013=`og\274W+S\274T\370\340<\"\302.<\312\t\215;W\3413=\266\241\223\273\017\341\032\273\217\003\013\2750\021\201\273_6\024\274\220\243\n\275f\022\010\275\335Vg\274\337\267\274;\247\020\255\274\234\344\036\274\246B\033=t\373\362\273\352\n\260\274\343\202\260<\023D\265\273\241\362I<K\270\220\273\014\341\036\275\232M!=\355\335\354\274z\212\036=\321y\235\272\250\304\201<<\252\201<y7\264</V\001<\3040@<\346+\007\275\016z<\275h\r\367<\322\302;\273\034\312\016<\035j\002;\005\276\033\2746A+=\205i\336<\013\231\201\275D\024s\2743<\325\273\013\310\031\274\037\033\367\274\252\235\251\273\3337\237\273\225\340m\273\331 \246\274\335\226\n<\276\311\301\273\016:\243<N\340\002=\256T\277<\2640\022<\367\nv\274\211\274\244\272\302\320\242\274 \314X\274\246\347\204\2735\313,=\217\373y<\\\236\207=g\327\351;+\334k\273\227\242=\273\257\244^\275\206%&\275\301\352\t\274)\244\200\273\357\366\314<\3358\230\273\000K\376<8D\017\274\364\203\316\272\366\256\367\272\326\024\016\275C\222\256\273\322t\035=\205[f\273\211\365\037\275W\253\000\275d\342,\274\225$\r=A\273\203\274\241\330N\274|\030\032\2758\375\221\274\021\230q\273e\207\203\274\227\353h\273\250\314\307\273\035\036\220\273`\316\025\274\025\246\r\273\242\035\203\275l\r_94\214\323\272Dy+<Z\372R=\275\315\313\2733\006\034\274%Jn\274;D\212\272\230u\323<U\014=\273\257 \031\274\022\260\r\275\006\030\020<*\017\265;\305\007\000=\355\333\244<a\335X<\243m\257\274\036\346A=;_\303:\320\327\001\274\320\021\304:\351Li\274?+\225<\243p\005\275;\333\n=b\230k<q*v\274q\364\366:\242\254\321<q\261\033\275\221\336G=Me\026<Jp*<\032\023\311<+\r\345\274\262Si<,fq\274\342\265\014=\215\212\360<\355\310\273:\323\274\210\274\033b\005\2749\376\223\273\340\013x;pr \275\301\037\337\273\335p\303<gV\246;\201S\314\274D\032\230<\267|\270\274\003\273\256<J\342n\274\206?\316\274\"\331\000\275\321\225\251\274\2622\251<\366\\\244<\025(8\274\027A\036\274\344~=\275\244\304\022=e\001_\274[\177{\274|\207.<r\251\340;/d\214<8\006\211\274\337\203\204\273\004\001\027\274\314-\325\274\304\363y<\010Z\264\273\370\252\272;3\250\006\275\274\rW;7\221\275<4R[<Dl`<^\343\265\274#\253\373<0m\317;{<#;\330_\311\273\203X<\274\225\204\342\273\337\305\236\273\017\260\036\274\032d\214\274\250X\255\274\323[\020\275\367\250|<\231(\321\274\277\360\021\275\033\271$;W\224\377\273\244\371e\274\020]\317\272\331\2637\274;\016\031<\306\016\004\275\247\024\365:\231^\214\274\232jJ\274\350\260\262<X)\225\274\337\035\324\273/]|\274\251l\034<\035\215$\275\314\347\212\274_\373m<n\273\026\274\257\010=\274\271#\237\274\320Y\340\274CM\354;\271\350A=J\376?=\314`\240\273\223:\367\272\232\037\003\274\322f\037;>\226\264\274\343j\234\274\224\355\220\274\304\202\031\2753Z\237<\035\251\016\271\277\247\253;\210\253\033\275\277eN\274$\266\262\2739\207\325\274\244u\263\273\311\034\341<A\202\026\274fv\'\274H\366\022=k\317D<\\\251=\274\341@\021\275\215\335B\275\005.[<\312\335\005\275\330!}<\177W\017\275\266\256\003<\306\356\236;\206\337\035\275\035\352\360\274f\346\244<D\231\353\274\233\252\210;-\2329\273b,\277\273\352\370\254\274\306 \354\274\266\300\361<\334\351\245\274k\311\353<Q\366\033<C5\273<\320\222)\274\232>\023\273\360\r\230\274\254\315<=\004\264m\275\026\354\243\274\306dd\274\001\310\304\274\214\377\221;\210\223\257\273\003\\\001\274LT\"\275\324f\037\274\3777\316;H\222\003\275{\256\224\275SKp<s\023\246\273\250\375\036\274.u\357<QM\020\274\264\242\363\274\320\246\357\274\254u\215\274\361\2275<@\324D\275|&\022:\207\020\3678;\263\317<\n\343\306;\210=}<6I\223\273\243d\356;\2101\241\274\247^\343<EA\302\274\244\241\377<\314\372;\275\347fx\273x\337\226\274\335D\237\274\030_\302<\036&\210\275\225\230R\274\016\210v\2742\341\254\274\021o/<\316~\026=Ih\245\274P*6<<~\005\275\367I\030\275\227\t.=\3262\276;b\006\227<Y\214\307<nW\307\272!R\332\273E?\006<\017uS\274\017\333s<\257\'\003<\371\005d\274\3334\232\274\274\347\202<\231S\262\274\200\207\357=?s\345\274\320W0=\345x\031\274\310$\345\274L3\221=4\0052\274$\304\263\274\034\330j<.\007\262;\000z\226<\227E\206\273\2647!<\305\235\313:\352\210\260\274:\274\253<o\212\345<\225B\022\275\"\232\020\275D\246\022\274\2263\252;i\215\2049\347%\307=\310\213;=\231\013-:\305\301\275\274\003\261\017\274\217\205\374\2746\240\244=u\260\231=\324F\320<~\377n\275Y\202\221;\023\345p<\334\222\365;!\217\325\273\324\201s;\302\243\033\275\'\217+;\312\355\356\274U\325\221\274H\365\354\274\223\no=\026\345\227\274f\221\335\274\302\217\276\273\317\362\363\273\t\2048:\226\n\\\275lz\027=[#6=\362cD9\013\2025=[\221Z\273\005UH\275\205a\247\273\317\027W<\306e\250\274\256!\036=!\033\377<{\001N=A,\313\274Q\247\242\274\355\367\026\274\346\346\234:\247\033P\273p\216\365\274\r$\311\274*V\267\274!VY\2747\020\307\274\316\217\006\272\356{2=\375\336/\275M\272\001=\235\335\271<\307P\'<\023+\266<\304d\"<\205\361/\274\302\263&=\333\010\267<.@s;jE\237;\357\212\275\273\0366\020\275\260\233F\274\010F\255<~\262\'<@\344\324\274\273\355\007\273\202\007\265<\004r\031<\302!\255\273\274\255t\273\232\220.\275\273z\354\274\264\273.\274wA\026\274\302\337\034\274>H\274\271\240L\341;\223\217\237=&:\021=\322\2021;\236Xw\275\343\017\262:\256\310\225\273\035\314.\275\275\253\017\274\234p\024=\364E\225\273\211.{<.\261]9\232\034\213<L\215\240\273X\226\351\274\357\005\035<#\0321=\016\355\377\273\223\333\023=\033x4\275\247P_<\374u}<9\033\206\274\003+\233\274\022B\363\273\347}H\274;c\322<\307\\\007<\273\240\223<0J\010<3(\334\274?\230\031=\233\027\373\273,\274t\2739\014\002\274\310\"\225<%\367\375\274{.\375\274\3176_\274R\310\035<\257\200\347<\246\351\276:\346Y\036:\200d\345<\373\270K\274\264a&<y\363\017=\332\026\221\274\344\311N\273n\216\035\274\312kH\274\022\251\341\273:\354\017\275/\217q\272\214\2232=\223=<<~\346\020=\303\257\310:\300\372\273\274\026\346\215\274\325\010\325\274\237\307c\274\025%C:\374\236]<\254R\304\274Py\2677_X\n=K$\006\274t\2553\275\3302\207<\300\0328\273\200?\270<\214\375\224<4\270\333\274\340hQ=f@\030\275\262\003G\275i\036\373;4\350\206<\231[\340;\323\271\272;*f\001=\365\320\017\274{\330\010\275\"(\017\274\350\256\314\273\257l7\274\026\337T<\331Z1<\352wU<C\016\247\274\3167S\274\334*\033<\035m\366;\351\204\200<b\304\225\273K\313\005\274\364&h<\3116P\274<g\366\274\300_Z\274\205\005\322\274\003\273\220\274\\>6\274\362\375\361<N\202\334\273\324q\262\273\315\346(=\335O\317\273\224\027\246\274\314\267\261<a\006\215<h\251\245;\214\371?<\324$=<|\312\260\273;s\220<\274O\302\273\236\362>\275\317\021b<3*\025;H~\320<f+(\275!\027W\274r\273\376;\177]q<\252\235\242;%j\301;ab\032\273\341\267\324\273\262\347;\274\326\221\366;\235\241\254<\301\021\270<[\211\022\275\216\334\005>\224\026f<\030\324\331\274\222\025E=\024N?\275D]\201=\013\355\220\274\345\216j=Uz\002\274\366(\013\275R\\\371;\253\230D=K\265\216<\307Q\000=i\277\315\273xI\337;\255D\033;\243\361d<\237_6\274\333\336\351\274\372.\365\272\342\222\260;\"\304\277\273J\357m\275W\333\234<\033\332W<\374\332\223\273\222y3<\177Y\240\2738DU\274\256\257\025\273(\2663\274\323^E\273{\036\352<`\341L<\361%\324<\2354\277<S\352\253\273\322T-\274\\&s\274Z\233\023\274v\'.\274\301\343\300<\017\235\340;\2332\326;r\205w\272\267\313\245\274M\312\035\275\357\345\242=z\002\302\272\t\031\344\273\2100\357;\2611u\274\224e\034\275\224rr\274#O\375\272z\r\235<\005\266D<%Z\200;3o2\275\336\2055\2753U\351\274\350\242\327\274\303\307-<\373\350\316<\356\014\336\273\246C\240\274E\"\241:\010\210R\275\016\206i<\214;\313<\344\352\316<Z\306\321:\321[\257<3I\271<\010?R\275\010c>\274p0\210=\356\367\337\274\243\324\026\275\370\362\213\274\031f\207<:\204\325;\020\243\343\274\355\265?\274\230\330\313<\270\351\024<P^\221\274Iu\261\274n\333\275\274\272\233L\273\026\355\221\274\264\310\001:@A8;1\226><M\310\005<\302^x;\303\327^\275A\365\334\274\025\377\203\275Q\035\237=&\034\204;\341\363\302\274\314\026D<\366\375\341\274\276*d=]\031\025=\377\304\374;c\341\"\274\361:\215<#Z~;$3\263<\0237a=\013\344\275\274\345\301\216<\035\263\2539\251\236V\272@A\347<\022\036W<\342\356\350\272:\3245\275l\001\344<+ax\274c\226h\274\214\243~\2743\321\264;O\032\007=\232\202\t=|\254\200\275.F\271<\374\231\013=\200\337t<\202N\005<J_\235\274.\302\265\274f\013q\274\371\237S\274\225r\322\274<\222\001\274\334\256\307<j\221\354<\321\002t\272\266\'\307<U\261&=)\007Q=J\343\271\274\303\231\300\273EO\302\274\331\310\034\274\2073\004\275\031\337J\274\227\317g\2748\2669=\347C.<i;(\272\355\252!<\367H\013<\235T\262\274\326\360^\274Eo[\274\313\306G=\273\364f\273m\177@==\363\000=\3317\365\274\371^X<%\327\201\274\275E\225\273\377\323Y<B\200\t=\270\366\301\273E\313D=\225|\017\275\373\3243<\304\350\';\351\264\353;\356R\027\275sX\022<\333A\327<\373\330\303\272\254\320*\274\206\324\316\273\r\246[<\221\211\242\272\325\273\337\274\2037p<h\222\276\271\355\023\237\274G\265\235<al\032=->\010\2758.\377\273\t\024\214\275\205\373%\275\223\300\244\274\271g\232<\365\227\251\273\010\275\203\274\031I5=\326C\213\274n\205\301\274\341\372W;\265\332X\274\316\320]=\017\233\233\274x\267h\274\231\n\267\274?<I\274\241\370\235\274\315\357\232\274l\247\234\273\355\227\331<^%b;L\3336\275\23221;4\236\'\275\207\240\3569\001\361%\275\273P\233\273\330 o;\332\224\352\273{\207E\274\002dH:@\374\022\273`\334b\274\317\022\252<AQ\230;?H\215<\235\003\030=]\324|\274\333a\341\273\310\016\222\274\213\014)\275\031\375\313\274f|L\2750B\272\274t\364g\274?\310\233\274\\\312\266;\n\222\217<{\354\327\2741r\211\274&\000\343<\224\264\345:=\366\245\274\335\020\037=\032\005b\275g\311\267\272\263Fd<\300q\212<&\303\211\274\237Js:\252\302\000=UW\216\274d\335j<U\327h\274/\341\307\274=\014$\2759D&\275m2\212\274P\335\331\273\272\351\336\274\311Fp\274\302\377!<\310\034\201\273\'8\325;\025\245\204\275r\234\000\274\230\006\333\271V`\226<\351\3347\275\3212\266<k\240\220<:\323|<\230\023\200<\226\200\232\275\351Z\320\274\360#!=\206r\260:Z\025 =l9\364<\317\230m\275=\tD<\253*X\274\026\321\300\274\211\036\256=FP\234\273hg\017\275\027\020c\274KtB;y\t==.N\261\273\341\330\355\273\366,8<\310\270>:A\301\335\273\244\276\234\274\320\265\343<\2003=\274\000\354u\274\225\273#=D\357\274<t\345\017<*\014\215:\376\'\262:A\320\330;l\302\250\274\367\214\320\274\267\223\204\274\320{\004\275\211\261`=\023:\227\271\235\222\251<\007:\345<\"\320s\2744\242\265\274B\225\301;\207Q\n=i\254\205<\206lB9\260sO\271\344\3768\275\000\216\255<\221G\314<\005^\004\275\356\036\314\274\214\352\200<\216d\224\274\027\212c;!\324\263\273\322\355\300;d\356)=\320\022,=\326^*:\036\2344\274\353\'\020;\221\341\315\2745\345\253\274\2003\306\274\034\035$=\217n\006\275$\330H\274\024\237L<@\253\t\274\315X#;\002\207\370<\374|\013=\017&\206<\'\273(\275\327c8<Q\277,\275\001\252h=K\275T<<Yb<\nA\301;jS\006\275\3203L\273O\337\343\273!W\235\273%\311;\275\251\3719\274_e_\274!\206\025\275\027p\346\2743\"\3069\354\231\372\274\305\370\373\274UR$<\245\211]\275\240\377\376<i\323\325<\210\216\000<Cp\256\274\374{\314;\242\346q=\275\261\'<\271[\223<\255\020\214<K\352\035\274(k\236<\\\245f\273!\207\237<9&\355<y\211\034=\244\004\354\274\236,.\2734\327\304<\2631\374\273J9<\275\366\224.;!\024\027;-[\316\274\310\222\364\273\217\0061=[;4\274\221\252Y\275>Y\314<\207.\236<\315\'\353\27217\370\274Go,\274\215\0044\275\360\222\001=0w\265\274\275\333\273;\275o\'\275\'\030\241\274Y\252\025\275R\350\025\275\2746\235\274\316X\261\274)\034\007\275#B%\275\016\306\274\274-\361\201<Rr\364\274j\340%\274\234\016\330\2643o\312\273\013 \326;3D\242=\326\221\360;|\300\375;\245\002D<\252_|\274\371*\275<\203\243!=5\370)\275\204\252\353<g\017J;\324\262J\274T\352\007=)\261\265\273\360il\273\235\034\311;\370\227\264<4t\321;\257 \010\275\210\215\013\274F\022\231;\305\037\317\274L>\n=J\326\323;\301hB=\342\240\233;3L\017\275\322\000*\275\223>l\274\302\262+=\330\324&\273\206\333\032\275\276\004D<\027\233\316<7\355\244\274h\361\264\274\257\237B\274\021\247w\273S\002\203=\300\340#;\322g|\274+|<=\340\227\205<\276\001\002\275\000\r\261\274Z\272\320<\273J\212;\335\361\276<o\252d=3\264\211\274%:L\273\233]\271\274!\215\r\275&y\006=\342Q\026=a\205\265;\330\304\207<\007\360y=gFT\273\226\004\202\274 \273W\275!i\362<\314%T:#\r\245<\324\367\361;\301N\306\274dB\355\273\216\200\256\273=\003\267\274Y\217\232;T$\223<\026\336\031=\037\232r\274z\335\332:\341\342\252\274Z\355\025\275\331{g=\337>\300<D\360\300;x\205\036\275l\2769<\373L\310<\367\2717\274\223\241\n\275\327\300\372\273\350\367i\275w\250\035=jS\242<\222\333<\275\21226\274X)C\275\2046?\274\247\035\226<\031\275\n\274z\373\034=R\306\000\274;F\";\360\3341\274X-\244<\030\334\354;\241\301x\274[v\252\273\306#\010\274\204\267\335<\316O\261<)!\336\274J\023#<\001qW\273\217\330\237;\376\354\022\274\t\311\251;\372\325\206=\"W\301\274\374}\253\270\256\020\260;+wA<SNc<a\215U\274\370e\312\274g\220\016\274\2414\037\273S4M<\320\"4\273\033\214\035=yN\275<\301\316\2669\252\253\000\275Q\343\023:%\377\202=0\357\202=\203%8;\311{\002\275\334\220S;/\021\312\273\260\367\230\273>\021\323<\215\311\214\274g\022\001\274M\214\244\272\300_\237\273\235T\256<j-\004=\203\335\027<[\236`;\247JM<\007\236\024\274\363c\227<\032\365P\275\034\\%<c\371f\273_s\361;\201\312&\274\342\242\271<\235y\333\274\306\276\004=\227 \013=\273\247\035\274\341\320\307<Zv\236\274\330\003Y\274~q\307=\032\375=\274\0068\240\275\301\264\204\273Bu{\274\345\227\r<;z\024\274\304\021W\275\200z\260\274\001\266\222\274\354\253\010\275\262\313\232;%\322v<\000$\327\274f\322+\273\033\230\021\275\363\000\240\274\245\313)\2744!\335;j\257.\273\037SN<\005\016\252\275\010m\301\273\035R\241\274\352\254F\274SC\366\273\013\303\351<0\335\267\274m\025\t\275\204\002\020\274\320 \235<\333\242:\275}m\361\273\334DA\275t=\222;\375\220\230<\264/\234;\022Uw<+\363\225<\344\233m\274\374\313\357<?+k=\316\271\264\274\336:\226<\n\251\002\274m\027&\271\243\335\307\273}\343\n\275\334~k<\207\223\326\273C\245\352\274\353)\345\273o\342\323;]w\244\275Y\217+=+ag\274 \203\344<\303.\374\273\216\"\271=U,N\274\256\366\211\275l\266\372\274\377{\t;\013y\035\274\265\231\201<\2273\314\273\224\216\376<\235\207\220\274\242\373G\275u\261\226\274\340\324]\274\274J5<\322\216\007\273\26252\272\334N3\272\270\210\335\273Jd\221\274\270\010#<\2079\211<~\366\232<\033\242a<c\313\001\275>4\014\274T;\030<\223X\323\273\030~\312;\217&P=]\000A:n\243\214\274qF\235;g\223\321\274F\354\216<|\277\316\274\005w\254\274\360\320\016=\201\304\234<d\371\003\273.\213\235\275\016&+\274Y\337\005\274\205\223\271;\031\350p\273%\256\t\275\330\266W\275%\341\005;\214\033S;\335\233\243<\207\255\262;\310\342\204=\357Fw<(A\221<Lu\227=\340\344\367<\324\302\247\274*\t\t\274\360W\200<\nLJ;\230Z\017=m\r\017\275B\t\331\273n\263\274:u\2735<\032\3715=\'\034\321\274\366\023\022\275\000\360\003\275\266\214\226;\264\365\203<\303\362\032\275\350\0315\274\275\031~=Dk1<\260\023<=B+\340\274\337*\2319\266\235\304\274\343](<\262\035V<\241l\223<\236U\030\275\216\253\220\274\201w,\274\2149\231\274Ze\010\275\344\334\311\274M\305q\274&\006j<\257\376\002\274u\375\327\273\306m\006<\2753\221\274\251,\351\2749\007\377\274?*\0279*\312\320;\375\037\'\274S-;<\344h\n<\230\221\250\273\200;\242\2735f < \034\331\274\253@(<|\356\312;\354Dx\274\024\336\t<G\377w<\030O\225\274\233X\245;w;\244<S\026\242<Er\252\273Brl=s\343#=:F\'\271J\320\272<\021\205e\275\322\316)\273\031\334\221;\346\004\035=\243v\241:\023%\316<\226! \274\0368&\273{\177t;\r\024\227\273\240s\021<\345\371\322;\232\341\323<`\206\006<\364\023\023=\0357\325\272\000\261r<\355l\267\274\rN\353\274\3103\312\274\262Og<\212CO\274\2223,\2740\006]\274E\376q\275\374\226\013<\262\226\245\274p\021\215<\243\345\2629\367Z\230<\306\016\222\275\261\215\345<\317$\016=E\237\346\274a\313\";\231\206\213\274j\232\2379_)\270<0\007?;\t\256)\275g\365\204\274\233\243\340\274\227\221\250\274&h\267\274\323\034\r\275[\006H<\354*g;\343\367V;\273\267\035=\262\252V\275\362\n\005\273\277\031\242\274\273\233\024\275\022\232\021;$\246\362\2740\023x;\232\345\005\274>\254.<\\p\356<\345\222q\274^\2664=\013F\375<\315s5\274\235\031\333<\324o5<\360{\272\274.(%=&\364\355\272b\312E<\245#\017<\211\246\345;\341\207K<\356\212=\274EL\202<\371G_<\203\273\'=\260\365T<CY\251\274\304M\306\274\261CZ=\310Y\230<\010Q\000<\363\241\222;\257\212\225<\021WA<\306\010\020\274\203s`<L\366~\273\374\241\245\2758\317\027\275J\033\033<K)\240\274\3208\341\274\245\372\247\274+z^\274\212)\315\274b\3212\274\2374\260;,8\313;\006\210J<\tl\313\2746\010\214:\212U\273\273\035\273R:\326\213\245\274C\274\234;\203,\313\274_m-<;\032\356<bk\334<\312\273\201\274c:\374\274\246\2065=\t\270)=y\001\300<\320\325\231\274e\370\264\274\317\204*=\343\246\241\274\310>\036\275``<<zd\336;$\027\242\274\236+\276;o\272\267:\023\341\216\274\001\002!<-v\241<\000\350\273<\252\032\006\275@\202\026;\232\035\014=\257\000\206;\023\346]\275\351\354\240\275\020\021$<\27189=\277\214Y\273a\351&\275E6\034\275\206U\0009\246\tQ;\234\317\273\272\t\235\262=\324\354\t=G\256&\275\203\314*=\330s\264;\221\361_=M*!<\355\263\245\274\305T\312\274\027\021v=\000-\345;\022\2143<k\356\262<\301\302\023\275m\210\251<\024.C=\305\356t=\314dv\275\242\023\311\274\324\020A\275|w\376\274\271y\237\274<\000{<\270!%=k_\037\274\300a\027;dB\010<3N\356\273\367\363\255\274]\356);\002\3668=\262@}<`\312<:\2510\020<\240\336\223\273\253\346\357;\353\314\235\273b\236\207<\322\304\236;\306^\032\275\220\342_\275\271=\246\273\216\223E\274\363\027\260\274<\204\"\274#\241\024\275\217\034%\275\214\233\200;\252uy\274\314ip\275\217\314\325\274`|\310\274O\355k\272c:A;-v\244<Z\360\354\275Mq\225;:\325\013\275Ro\001=\257\322\031\275\031\210\371<)%\325\273N!s=\277^\207\273:\332\034<\034\330\234\273d\270\221<`\347\217\274K\253\274\272=\334\027=b\355\014<\354\352\240\266v&g<1\027\3439\326\216o<$f=\274T\353\362:\033M\331\274\024\232@\275.\307\014\275\312\3256\275\354i\342:\263s\017\273\000\352\177\274>\'\306\273\r\252\236\273n\231s\274\207\3067\275\263;K\274\270\030\272\273\244b\330\274\021\325\222=\017X(\275?\305\201\275\035\372:;\353\245T<\276\341&<\366\"Z\275\345\\3\275 \000\272\273\003\020\302\272\0351l=\227\267\354;P\017\032=*\0259\274\275A\021<\'~%\275\203N\266\274i6\330<\356$\361;5\365\237=u\247\007\274\237\223\323<\374\267c\275\203g\264\2739\245\342\273\312A\266<O\022\243\274\353\022z=\366\232\325;\302\016)<R\031z<\224<7=\203\242%=\373\016R=\207aG=e\230\204\274\350r\223=\343#\357\274\351\253\304<x\267J<\301\033\253\273\213K\211;\335\026<\275\255\006\024\274\343\327\037<\340\347\360\2747\374I\275\374\223G\274\220{\016\275~ 3\274)\\^\274~\272?\273,\313|:\342\210\314\274\216A\"<\274\317\000\275\374C\344\274\356\203\227\274]\227\010\27524\370\274\257\307\222<\302-\215\275\245\320\213<\302p\357;\225\326\341<\236uo\275\311\315\004=\357V\360<u\363\347\273W\214\r=\023\215\373<\315\266\350<\220N\t\274\377\372\025=>\035\261\274\220d\003\2743\203#\274\203L\366\274\\\3121\275\016\025\243<\366\024\363\274\347\t\001=1\362+=u\273*;\202\240\002\275\322\014\231\274\3514\004\271\333\206\363;\305>6;\215\204\220<L\0230\273+\321r\274z\273\367;Y\356!\274\332*$;\036\240\014;aZX<>Z)\274u\2753=\276\333\202\274H\356\376\274\331\354\256\274)\246\017=h\300C<Co\317<\225\245R\272\273\023\000=]I\205\274P\203\354<\213\326\372\274\n\261|\275\343k\335<\000\2645;\240)\220:\362\030z\272\022)\224<\344g\307\274\272HU;\017\313\0269\361\3057=\324;\235\274(\211\345<\317\030\272<\243\2069\273\341\356|< 0\016\273\034\315f\274\212\354\033<\033\226Y<\260e\014\274\031\263\213\274D#p\274!`\222<\250\207\203\272\032\204\031\275\205\222\222=\307\213\032:P\215z\273&3\355<\213h\313;\226SZ;d\264\261\274\035\004\232<\355\331\000\275Y\024N\274=\241\366\273}D\230<\304\356$=p\236\357\274\"\206\023=\262\241\004<)\362d\272n\302\177\274\277\305x\275\377kG:\376\260\237\274Z|q=\375\370m<z\350\242\274\240\255=\275\346\214g\275n\027\027=\207\004\002=R`=\274\037<\334<G\2150=#\372Y\274\t\206\264\274k\266i<\366\336\033\275\256B\276<\275\367;\273~\367\014\275\332\325\371;\240\337\234=w\276\r\274\361\204\020\274\215\347\014<\242\300\301<\224\230\224\274\306\315\300\273OR\271<w\315f=X\010Y\274\306\274\027\275\241\204\252<0\021A=\'\024(\275\374(j\273\252L\327\274O\234\277:9\0361<\365\302#<\203R\306\273\\;|<_\237\027;\337\2067\274\004\362\230;:Z\037=V\2207\275\377\367B<\013\r\2759\307\352\311\274:6\343<!\271\207<H\033\313;\237\345\353\274T\234\342;u\2038<)m\353<+\026s<\037\001\262<\001v\254;%\362)=e\207\270\274\n\243\367\273\340\357\216\273\351w\313<\203s\032\273\374\311\024=\255\022-\275\212B\267\274\223\360y;t\330l<\311\234\215\274\206\247\010\274\014\003\213<\201e\327\2742\035\326;\317k(=\217\202.\275\3163\314<e\013K\275\021mm\273D\243\311\274\311\305\222;>\000[\275BTf\275\315\357c<\321\201\'\275\313\0337\275\210\311\177\274x\220|\274Z<\324\274\317~x<\014\350+\275@\013\360<\347\321\230\274\303\226\351\274\272\203\014<\361\215\017=y\324!\274\361y\322<C\312\022\275(\234G<\'\370\'\275#\213M<\227\346\272<*`\310\274Sz\023<\3127\025=\230\205\216<t\211t=a\303\355\275\216\343\265\274\274\002\031=\375\027&=\335\321\336\273\210\336\206<u\016^\274\020\353\210<\340\315\036\275S\237\014<\230\220\255;\333\204*=\212\226\344<Ie\245:\211\320\247<\340z\220:j^5;w\212\201=\322x\265;)\263\220\274\274\266\226<Go\301\273\223r\241;#\255\244=\302\354\373\273\356\2308\275^\253\305\274\327>P\274V\374\342<\311,\034\274n=\360<\3525\032\275\210\366\036\274}\350:\274\031\277\r\275\246\022\315<\375\031^<\021Ge<\361V\346\273\tN\004<\313\010N<D\253\221<\272\254\333\273\322\240\2779~By\274ryy<P\226K<B\244X=\316\357\326\274\234\331`<\320\271r\273Q\364/=\003\334\372\273!n\025\274+\350\362\2744\215\026=`c\200\274D\313\214\274\t\330\335\272\231w =\334\227\227\274(w\205<\r\256\020=\375\230(;\335\r\334\274q\204.<l\365\'<784\274\360\334\005\274I\204\366<7\2152\275\225\336\n;\347\374\304<\332\263\001\274\005\034\253<m\261\365\272\263\274\017<\277\225c<)\342E<\324\325\242<f\240 =\306\033n\273m|\002=+\027W\274zoy=\017\020\221\275T\267g\274+\336\024:wt\010\275G\214\036\275\352\327\035\275\215\017\021\275=q\310<Z2\256<\317L\357\274$:\013=%#\240<\234\223{<t\241/\275\"u\344<M\036\273\273\302\"\203=l#\225\273\346M\201\274Wiu=\201\334\220\273&\"\247;\363\276T<\tb\242\274|\252\305:\222\307\210\275\2537\312<\371T\337\273\221!\223<\276\301\212;\3753\363;\345\267\237\273\273y\207\2746 M=5-\222;\232*\312\274\224m\233;\371k:<\231\340\204;\305*d\274\236%\236\275\221\243\r\275\313\216\003\275\201N\364\274C\270\262\273\231\271\027<H\202\214\275=\243\320\274\247\271\240\273\345%u<_\310\366\271T\213b=\263};\275\"\236\006\274T\360%=?\026]<C\245\035\274\274\026\356=H^\325<\240\350\221\274\374d\231\273\336)\301\275\344q6<\377\231\205;\2339\324\274GA\212<\014\264\026\273C\267\252\274p^\312=\021\331\007=\310\340\014\275\253\300\233\273\2157d\274F\031\274<f\222\252\272\241\376\\=o\337\213<N\310\333:\377QC=M\331a;\364\034\366\274mu\367;\267P\025=\261|8\273\272kn\274l\207\270\273\273d#\273{\207\031\274\360\257\333<\321E\276\274z\256 ;\353\327\r<\021y0\275F\233\205\274\234T,\272\332\016\323\274\271t\232<g8\201<\1770\254\274\2278\315\273]\223`=-8\302;&\031\213=QaO\274\343A\316<\007\261\361<\307\343\356<o\326H\275S\345\220<\002\261\366<\234\001\325\272\271\351\204<\370~4\273\214\371Y\275\332\177\310<\350Nr=\321Y\004=\262\022\274;U\017\257\274\300\000\213\274\026\001@=\331\216\242\274+H\022\275\023\247O\274\005\226\241\274]\217\277\274\276DN\273\366\3363\275\274?\302<F\034\237\274K\022\330:W\204\232<\'\367\251\274dK\241\275Ed\236=~\1776=\350\214f\274\023\366\007=\222\201\250;\013\254\n\275\317@\343\274F\036\334\274[\\\373<\024\370\345\273\332\367\234<$\2562\273\255\240\205;\307&N\2752\361,<\320\t\341\273\003\010~\274T\354?\275\317i\020\275<\216\033\275BP@\274\265iH\273)5\335;.\331*=\245b\221\273\007J$\275\254o\t\275D\377@<\346\351\303\274<u\227:\372\375?=\211\367\206\274\241]\226\274\321/\332;\000\337\016\274\312Z\214\274\370\264+\275\002\217\212<I\315\315;\253s\003\274-\214\220\273\027$y<\322\257\350\274w\370\316\274l\306\343<\342\210\017=\205:\275\274\362S\233<`e\342\274Y\222\252<%\375\317\274\233\340\346<8\303V\274L\221\305;Q\231\236\274\223g\323\274%I\356\272B_\247=\033f`\274\0023\010<\242n\000\275\344\376T\274\207\341(\274\343\177Z=\177\310\222=~\266@;\243\022\251<J\342\247\273r\014\265<b\346\370\272W\363V<N\232\025\274Hl\227\274;\032\221\273x\321\224=rU/\275\252\331\017\275\357\313\025\273l\223{\275\237N\357\274fys\274\030I_<\036\\\030=\207\273\323<\005]\252\273\264\351S\275X\362\010\274\3560\224\274\033\206\342<\301\033\260\274\267\013V\273\223\306\016\274+q\027\275W\225a\273m\365\001;\377\003\220<\352\374Y\275B\324\324\274m\365c<*\000\361<t/\315\273\370\326C=\307K-<\306.\217\274&F\004=\374\024v<\217\271\266\272\001\260\276\274\243\274\231\274\216j\236\274wq4;\326#Y<\340\232\'=\373}\211\275DNf<\364\325~=\273\201\234\271\325\026\024<\342\001\275;\3705\002<\310\271\213;\212\337 \273\314{\232<ei\366;W\366]\275H\320\300\274\221r_;vk\277\273\257\030\252\275\361\231\001\275O\353\357<\345%\212\274\354\007y<\210z\210=\267\214\362<\2475\"<\351\202O\275>\230\353\274Fe\n\274\3167\266=r\366\364<\021\350\255\274\001\023\372<\r\264\366\274\335b\231<\037\237\027\275E\266=<\331v\016\275_\003\022\274\364\033\"=+gC\274 \224\n=\317n\354\273\213a\264<\334\017\200=a\334x=J\332\300<k\002\320<\017\222C<\326\240\373;\330\220\362\2732\022\216<\027U%=\303[;<\337M\005\275\033\304D\275\334\271\327\272\240K\205<\355hN=\312S\030\275\356\337\335\274x2\207\275\340\261\024<\240\031\327=\r\216\247\274\220\3034\275V2`<\037\242\255\275t\375|;~a\312\274\312~\200<8\241\246;\026tN=v\251\214=Y\277\327\273\331\372\332<\342a\353;&\334\366\273\322\023\034<w\301!\274V\004\213<_\"K<\237vM:5\010\025\275\023~\356<!\261 =\314Ph=VAR\274\013\005\007\275&W \274\314\274\014\275|&W<\032\036\213\274\253\377F\273\371sK\275\303\265/=I\315T<|M\234<0\000\357\274j\232\230\272\301\003\017\2743m\033\274\024\031\377<\345\377\210\2741\223?<.,X\274y\260\232\275\221\033\010\275Ak\n\275\347\266R\274]S,\274\241l\032\2751\226\202\275\253\275\302\274\241\353{\274\363\014\375<\315Q[:\214!\371\274Q\004\210=\307\356\233<z{w\273\317&\023\274\325\037\222\273\362\334\277;\213\267\246;\364m\243\274\266\210\215;U\3674<\241-\221\274:I\353< \375w;\026\306Q=\340A0\2744\243\2648\346&\t<2K\250<v\336\333;\201\036\215<\367v\213;H\247\372\273\005\341\032\275B\2115;\026J%\275~1*<-+\366\274\300J)\275\255\363h=U\257\025<h\005\016\275\226F`\274 ^>\274\343k\315\274\377\271a\2742\216\336\274\374\036\010<\030u+\273\204nq<:\2423\275eM\021\274\n\252\242;\253\310\t\275k\025\257\274E\027\363\273\203\"\337<\370\261\325\274\313\307&;$\217A\275\250DG=\300\014\206<\241\254\342\274\246)\317<\"\037\342<\031J)\275\000\217\233<\230M\266<cO\t\275g\311N<\032\037[\275\264A!\274\3715\352\274I\025\371\273E$\252<|0\002\275\357S-=\340/2\274zt\n;3\234\242\274r\327\230;\2326\310\274X\261\312=1BH\274\260\205\313<\3523\340\274zN\n\274\203\201Z\275C,\332<\213\242\266\273\205\253:\274\352a\203\274\222_\317\273x\372\364\274\007\263\324\274\230\305\004\273\347<\205<b\032\"=\032\304\233<\340(\363;\237e\324\274t\030\216=\210\356M\274J\266\206<\'\025A<\211\362\000\275\201\347\247;\252\374\204<U\321\351\272\007n(:/\014\355\274W\317\304\274\201@$;e\220\357\273\005*\223\274\"<\244\274\203\210\037\275\005\2307\274\253#%\274i\3658<\241\226\206:\243$\300<ng\222;\004\346[\275w\311\n=\027\201\230\274\003&\037=3\266\322\274\363\310\3229,\227\032<\357\372\345\2741`\263\274N\262I<\377N\254=\245K<:J\353J\275\247\374\220<\302J\014<\004O\245\274\214\272\231<J\037\255;c\361*\274\231\3505;A}\364\273\017\026\001\275\344\201l;G\\\177\275}\366L<\277l\227;\266\350\337<Mv\207\274\256\253\345;|\3125\274\007\365\367\274a\324j\275E\365\334\274y\351y\274\305\020\030< \013\356;:\331\313\273\\.\035\275\020_\013<P\301(<<\337\252<i\020\026=\263\260\2209\202\'\271<\246\030\267\274|\370W\275\'\212\037=\336\271\'<\021\350\\\2733\247\217<4\274\350\2730\331t\274:H\222<\310P =\307\216\030=\316<\020\274\335\322\215=\276\365z<\262\370\213\274\345\020\024\273m\037\217;\312\357B<\271-\035\275\275\325=<F\332\322<\27121\274@&{\274_\277\231;\302\223\346:+qy<\3459E<\227\2556==\266\364<\362\025x<\'\324\035\274\242\225\204:6\337\275\275\024\251%<g\242H=\325iA=b\007\262<\255\330d\275\323}y<\222\r\343;\277;0\275s\204\004<\266\223$=\004q\373\273\267\274j\273\345\023\252=\243\256?\274\007\345\227<\360\301\027\273}\034\002\275\342ZE\274\230\365\r<\362\274\001\273\250\232q\273Ga}<\321\341\275\271\325L>\274\032\233\220\275\217\335\257<\342\346\247\274n\232\204<\342\364\256<\232\235\211<\362\2356=\2718T=[LV\274\353\363\024<\331\270\314<0\343\n;\342\242\255<1>\244;P\342\302\272\355\220\327<\'\331U\273\216\006\304\271$tE\275/\210!\274\251\373\016=\257\317\207<9v\321<K\017\243<:\360\226<\361\245\001\274\264\032\362<!>\203\274\220p\026\275\212-%\275\222Z\000;\"\267=\273nH\331\273V@\271<\022\002\304\274N\213\220\274\207L\237<\232;C\274\035M\377\274\332^\260\274\321f\234<{\350\342<\205{3\275\372*w=\242\357^<\235\273\324<\200\336~;2&\200:,(~\273+\341\356\273\024+\0229\204\230\254\272\373\234\367\273\357C\377:\2653D\274\221f6<@8w\271/\373\345\274\310/\227\274\267\005\326\274\320P\257<\354\252a<X\013\205<\272E\204\274\260vC\275\332\370>=]\331\332<\310\310\267\274VqG\274\205%!;D\021\320\274\352g\325\274\241E_;\006\r*\275\332\364\022\2751\025g<\251>v\274\217\205\036<\033\003d\274I\261^;\261\226\222\273^\343\306</\251\314<\253\016\376\274\362\373.\274\222\305\023\274\032l\033\275\350\026\336;\003\203\267;\337J\330\273o%K=\374\3456<\003\276\013\275K\257\265\274\036+\245:Y\225\220\274\270\336\200\274s\351\203:\321+\237\273\365S\002=\211\323j:D\244U=i\025\374;\354\220\376\274\335\205\304\273L\t}\274\334\253\037=W\202\360\270us!\275G\2304\274;\204\206\274\344b/=\331\233&:\247\373y\274\237\325\020\275\253R\206<\306\211$\27502\007\275\226\274\243\273h\373\216<\304\205\002\274\351\350\025\275\341\335&\275tM#=\241\352\333\273\360)\361\273\233\326$=P\244\006\274q\367\235=\227\313\355\274\360}\267\273s\016b<\006/d\274p\037C<g\223\334;]\031\235\2746\335\261\274R\365\004\275\014X\370;x*f\273\211\207\370\2743\257\205\274\245\027#\274\350\271#\275LjZ\272_u\210\274,J\335;\274\312(\274\r\316\231\274\314Q\231\274\272\257\213\274\t\221\205\273RhB\274\303>\363\271X\024\261=So\010\275\035\346\322\274\342\301\001<\233\244\230\274\r|\372\273\301\363\355\274\213\225\226\275\370O\035=q6\003\275C\354s\275\323\216\312\274\361\320q<\245\370(\274D\276\210\273\221\030\233\274)b\344\274\245\2444\275\270\254\344\274\20100\272<\244H\2749*\352=\246\030\346\273M\333\022<K\005\030\275)\003u=\341E\343;\332\240\006\275\271\201\252\274\212\036\377<\222G,\274\316T\347\274\371F\000<\311&\241\274\373\236\227<w\203,\274\320\355(\275T\366\211\2735-:\275\240X<=b\376\202\272N,,\274\355\366 \274\313W\330\274\273\013g<\226}\030;ZF\202<\036\340\006\275<\034w=\032\323\363<\320\324)\275\256\307\232\274G\037S:h\016\330\272k\\^<UCU\274\357\200\335\274\363\022\306;\244\310\031=\315\225\222\273\025h\272\273/\343\234\274 \347\307;\204\240\r\2745\3401=$\230P<\014\261\277;#!2\273\273K-\274 \324\262\274\010s\341\274\366\207\314;\025\235:<D*\323\273\n\271P<\333=S=4{]\274]o\266\274F\025\026=\247\221\n\275\257\213w\274\203\353\361\274\367\026\013\275\010\340A\273z\275\217\272F;b\273\225\n\303<\271pM<\213k\376\274\226\355M<\024a\236\274\333E\247\274\376\354\331\273\024E\004=\355\352\200=\031\342\213\273\020\016P\275\256\256@<Q\355\370\272\3518\352<\367\266N=\342/>\273\020\351\325<\246z\347<8&\330\273\241(\251<I{W;\267i3\275\240CP9\374o\234<\226\260\377\274=\031\r\275\216|\002\275\341K\360;=\'\324;;\320\005\274\334\315\307\274\303T\303\2735\213\317:\207Z\033\274JcO;}q\036=\226h\240<\347\375\325\272Oq\001\275\306\212\225<\3257b\274A\327\204=f\254v\274m4\276\274<\016\007\2736\251\320\274\201\260\216\274\220\300\313\274U5\310\272s\210\224;\3647\260<Q\033\217;\242\357\003\275(b2<\355\250\314:Q\225\021\274\227\025\275\274\375\227\253<u\206\210;X\232b\274\023\331U\275\330\001\345\273\376\364G;V\243\247;\245N\204\274u\331?\274\360\206\242\274[J:\274\020#G<\210\232^\274\306R\276\271\306B\304;%\333\263\274\276\355\311:\231\266\336\273\r`\371\274l\006:\273\314\361\272\274\373\273i<4\216j\274\317\201)\273\r5\274\274\273\016\344\274C-7\275*\341\213\274\363\357F=&f!;.5\333\274\324\330\001\275\243\302\006\2731J2\273\207\342;;\023\226\221<\276\035M\274\023\303\201<\327\316\n<LA*\274\323\210\217\275!?\306\274\010\'@=\216\271\276<F\316\324;\371\271\312;\025N\343\273[}\272\273\026\352\024\275>;#=\227\322\002=\362\263\224\274\300\020\004\274\330t\316;\277\220b\274_\004\264\273j\034\256<a\334\000=\362\304\363;\200[\206\274\024\310]:\020&\347\274w\364\231\274\237\361a\274\001O\236;\177^\002\274-\0228\275^\363\007;`\213\364;r\243\241<\327v\222\274*\327\031\275h\313%\274P\266\005\274\224\233\364\274\"\306\370<\372{\312\274\221\364\376\273A\302\204=~_\250;\037\023\014\274*\361\177\274\036\211\000\275\207k\207;\344/\010\275_\033\206=w\352\3569\353\232\210<,\211\314<o.\024<m\325\263\274\233~\336;;93\275\366\004\372\274\026Y\255\274\004\273R\275\020\356\313;\256\211\271\274f\274d\273\233bv<\243\275\312\273\217\373\356<\264I<\274\\\363U;\203\017K=\327\036\212\274\336,\372;\241%G<xd=<\333\234\340\274\230\025;;Fs\263<\375{\332<\266\203\354<\270\3037\2756W\255\274h`E=x\236\303;\301\n\254<#L\020\274|Z\242\275\004X\021\273\316\263\314\273\232\341Z:\341q4=\005\374i\273%\324;\275=\375\303\274N\267W\274\331p\250<\341\024\360<\314\025\014=\215M\244<\0214\003<\343\230\036=\242Q\257\274\376S\313;:8\021\275\337\000{\274$\267\273<\003\224\013=\321u\221\274\373\n\275\274\232|\317\274\362\221C<@2\375\2742\'\035\274\025\220\252\274]\026\347:\014\205W=&\274\006\272\350\323\'<\353z\225=\261Z\031\275\362\305\324<n\030D;\230+\036=\3626\203<\016\r\255\274\211\375+=\'\351\374\272%?!<\253V\220\2753\240\261\274NoB\274\210\363\320<\327J\244\274\375w\010\274\014e\355\273\353\373\215;\'\230\374<(\0221\273\376\221@\274:T*\273)$\350;\366\244\200\274.\220e=yG}\274!\322D<.\212i\274\232\336\331:\215?\322\273q\335\014<\017\355\214<6\334\262;-\301\310<?\262\356<\005\341^\275\204\371=\274\032\000\007\275\353\321\204=\204\305\261;hm}<J\263\002<\376.\237\274\016\255\357\274\025:V;A\203\267\272>\236X\275\221.\035\274\3127\027\274A\215\273:\320\273%\274cuz\275\372\321\346\274\006\177\273\274\227UV\275\253\r\000\275\303\340!=\367\215\275<]\022\241;:\364\237\274\234zF\274\243Q\302:\275\315g<2\250\376;\257\032\317\273\236\333:\274\"\027\023<\336`\021<\317X\326<\247+\210=\020\221\036=\013\301\341\274\321\022F\273L\377\2659D\264\221\273T\305\243\274\251!\277\274\307-O;\301k\203\274L\273\305\272\335\224O=\255\264\222<\376h\253\274\013\376\325<{=P<G3\260\273(\373\260\273\3303\024\273u\257(<Ag\343<\244x\n\275g\276\213<]\237<\2758\224\306\272\222e\356<\021K\003\2731\327\007\275\321w\242\274\032o1\274f.0\275\035*}<\252\337\003<\237*\n\275\3047\251\274\230\027\347:\025\256\230<L\300\023=\337\022\304=\341\021\346<p\333J<\304\320*<\341\3412\275\336\257\217\274\376\221\010<\t\263\373\274\351\315-\275l\332\304\274\345B4\274|\014\262\274d\3013\275\250#\345;\207O\213\273\322\226\006<\371\362\306\273}\003\t\273\375\216\n\273u\310\215;`dn=\005\337J=\346\207J;tP\211\273\n$\242\273\317\200\230\273\231\017G\275\265z \275\304<J<\331z\303\274\002\205\037\273$*4=R+\030=hL\241\275H\351I<\215O-<b\"\222\273Sc\243=g\026R\275\333J\'\275\314\330\351\273\371\221J<\331\366:\274\014\376\024<b\345Y\274\322\330\256=\361\351P=\004M\235<\020\3466;\331\363\375\273\375\325\351\273k/\365<0H\271\274\221`d<>\361\263<\257\225\316<\247\377\t<\234\363\261\275\215\315\266;\242&c\275\377\305<=\322\251\230\272\315_k\273wI\356\274\243\353p<\342\207\246\273\310f\316\273\335\332\250=(\014`\273\373\367\261;\010H\244<M\227.\274\371\317\335<\270\020B\275\340\000=\275\035\210\203=\004B\021\275\3606r<4\"\235:\266\347\002;\240{\350<=\356\200=\363\217\301\274\007\310\'\274\\\034\246\274)\211\243<i\317\242\274\360\314\204\273\2020\265\274\nc\313\274\337\271y\274\346*\033;\262\200Q\274\337\201=\274y\000\273\273\013\236.<viD\274\"9\240\275\370>\252<\307\203\233\274\253\266\202\273k\262\252\275q\215\"\274\360+\204<\235\023\013\274\026\177\253\273H<F\273\231r\265<\206\202\233\274L\034\215\274\033Iv=\377\257>;jL)<i\343\311;K\356\r\273n]\227<\332\276\227<?kD\274\017\231\262;\001\323\020:\243^\001\275#_W<\356\324\212;\245\035J=\246\352\256\274\366q\314\273\375b=\275\004p\261=\216\331\262<O\263 \273\3219\310\274F6~\274R0j\274\241F\003=v{\256\274O\364\223\274\026]\254\274\331\225\227\274\262\345\022:? 6\2756\317H\274\201\nU<\tRA<\325\330\021=\352\3064<\'\334\206=\025\022\237\274\214\363\353;\001\320W\274\354\211\006=W\213\262\273\252\265}<\355\201\312\274\244\324\320\274\342|\033=\235\231\263\274\362\351\026\272\'\202\002\275\0063\344\274\350\316/\274\266\345<\274\232n\311\273\025^@\274\267\254\302\273\377\'\3359\320\007\017\275\317^\227\2740#\247<G\335\241<\360a\300\274\014\2550\274\0328\224\274\314\'\\\273\024\311\373\274\027\034\243<\007\005\026\275\261\215|\274\362\213\310\274K\376\026\275\310sA:\257G\023\275}\226\r<0\357\226\273\374\312\007\274\333\025{<\322*\275;\270\"a\274\366\246\017=\277\326\311\274/\202\203;\205%1<\004\033\304\274\021+\334\273g\325\026<\353\241\233\273\020\265h\275\373\r\374<1\021\311\271\340\3312<uH\024<\212\n==N\002\310\274\321\234\250;\036\265<\274\242\352c\274>\217\202\274\027fJ\274\335\005T<\200J\034;\342$g\2759\023\233<\341~\255\273~\026\205\274\226\302\323<\275,\376\274&\315\010=\227\0339\275\362\275\200=s\237u<\342\\\225\275\211A/\275\344\\6<d\314\225\274\023\265\263\274\252h\001=F54<\2424\334\274M\252\336\274o\233\354\273 \364\202\274\322\303S\273&\336\016:X+\350\274\346h?\274u\014\262;^1\\\274\203\017\312;\240K\357<\341\232\023=\375\020\220\274\212\344;\274\034\213\370;\253\334s\273\"\273@<C\033\307\274\020\333C=$\333\211\274\374\373\317\273\"\3013\272\272\210\022\275d\321\007=\261\263\352\274\313v^;P{\263\271?R\372;\233\013{:5\335S\275\325\310\375\273\377\366O\274\366x-=1\237\247;\006P\325<v`\233\275E\326\226\274*=\211:\351^\031=\353\337\220;\363(\205\273\024\237\222<9\216*\273\036\2617=\3742\371<\351\0148\274TB\035<=mN\274\341\212 <\270\032\307\274\023\036\246\274\306\3052\2749\026\022=\302\021\3609\3345\244\273.^$\274\242\3454\274\224\003\027\274Z\317\277\274\3711\363:\032\347\270:\240\370`\274/\006]\274(4\'<\212\017\305<\203\366\201\274Kg\240\274\2771b\274\001\262\222\273.C}\274\n\317\250<\024\361\212\273+\030\227<\354\250\310\274]e\324<\025\376W\274\031\265\275<\354\275\300\274|b+<P\347\311\274\267\rf\274\320\231\032\272\002\221\320:P\305\350\274t\006\264\274v\201\322;V\344\215<\2544\200<\264\2411<w\200i\273\343\343p\274)\2708;\375}\236<\3160\003\2753\274\302<1\267\265<\223BU<\033.\014\274\253\300Y\274\222d\264:\253o\004\273\200\304\005\274vG\':\234\264\321\274\'H\214<\365Z\023<\3321\261\274g\341N;\014`\202\275qH\311\273\264+\242<\306hQ\274J)\327\273\'\266;<\244M\257\274f\334\315\274\306\225\325<\305/M:\345\300I;\205]G<\005i\357\273[\344\244=8\363W=\016\216\244<\010\314\240<\016t\003\275\020\"\274\274\002\242\010\274\235\321\024=C\364P\274q0I\274\031\271\314:\016\241\023\275B\002\277\274G\317\256\274|\261\352<=\005F\274sy <G\322<\274\33287\274\374\326\365<\023\316%;Q\360J<Ih\032\274\010\220\237\273wR\022\275\025\307F\274\226\360>\274\370\366\237;\023\351\025\274\264\335v\274\034\335?\274\370]\006\274+r8;\204`\021<\365\217m\274h\242\007;\241\344\375\274\266x\331;\236H\260\274\233\3411\274\022\036(<oW\016\275\236T\007<\332|0\274|iS<\270=4=\355\323E\274t\005\303<\322\304\213=k\300T\274!$\002<\374I\343:C=i\273\307\363\310<\257\202\t\275\305\216(<:\367\233<\376\353\315:Z&p:L\351\366:nc\302<A\330\013\274x\263J=\334\275\274<\367S\233\273\211lr\274\215\304\302<1\0344\274\326\014\370;F\353\265\274:t\221;\032d\325\273\020\212\030\274\266\330\221\272\340\322C\270\242W\237\275?\345\035\275\332\302r<8q\276\274\217\344\213\274N\257\332\274\253\365\366<\275I\352\274\236\365\327<\340\341m\274?\007\227<_SF<\241\316.\274\010\233\016\274Dx\220\274\372\365\000\274\263A\265\272\372=\017\273~\372\220;f7{<\360\035\020=\274\370\316;\212\220\003\274\2157\255\274\345\237\200<\2433\235=~[\352<c1\234\274\021\235\371;y\355\205<\263\212S\274\310 \267\275eg\203<\255\301\225\274<0\007\275\217\332\321;\030\303\222\274\rw\030\275*\223<<\036`\014<3\237\000=S2$\273\227\371\353\273\334\265\324<\204\241\037<\343\001A\274\376\251v\275\255\'U<\276\311\211=h\346\243;\321#\006\275\212\027\356\274OD\247;)\'\303\274\334\347\227\273\233\007 =\364\231+\275}p\344\274c\256@=\335\232\211\273_\306\005=Ws4<q\t\342\273\223~\237\273\373\200\036=b\354U;\213\263\002<\265b\346<\310Q\332\274\361\355\303<[\014&=\222\322\217<J\266\276\275\362K\361\274\274\021\275\274\374Cz\274\221\252J=\251g\205<%9\301<\254\022v\274\231\333\234<\255o\377<\230J\030\274\267D\300;\265V\343\273b\350\274<\'|(=/\353\221\274\265p\372<\302{\023=\370\021\356;\241\030h\274\345\352\273<\322\266j<\240\036\356\274\tJ\363\275\353\334g\274Q\216\312\273\177\251P\274m\276\223\274\225@\313\274\t\216Q\275-\010b:\245\334\363\274(\344;\275\330\353\215\274\376\222\211\274\220k\204\274\'\252\360:k\005\302\274O{\220\275\256\261]8EA\027=\0055L=\234\371\016\275\233\247\265=\031W\271<r*\210<ju}\273\257\031\'\273\026S\230<b\3751=^A\025=\265\344\031\274;\257\206\274\210\360@<\206\013\2279\262E\272=yB\035\274Gg\357;\316\007\255<p\260k;\207\267\300\274sA\016\275/\241N<R\326\340\274z\336\031=i-\240\274\236\210\010\274\327\227U\274\206+O\274\266\363$;\355e\334\274\206\203s<\377\004\246\2706`\300\274\227\327>=\202\267\313\274S\256y\275\272\220\r\274Be\227;f\275\225<\037(\005\273~V~\274\264\237A\272\244$\r\274\243_x\274\360)\232<n\232\236<\373XM=CC\322;\361(\261\274\3672\212\2740\265\217<\004LS\275\026\200\205=\253\244k\274\203}\006=\206\251\266\274\320d\004\274\333>7=\360\254r<.\245\240\274\353`l=h\272\323<A\306\315\274\245,\325\272\265\025j=\377\370\315<\256\260\261:q\005\016=c^Q\274q {\274\023\0040\274\370\250*\274\013c\376<V\006\307\274nR\321<\370\302\221\275\236ME\274\017\3253\275t\212e\274\243\232 \275\305\021w\274DLQ\275\357\345\212\274\022d\'\274t\312\\<3w\262<\207\342\227\274n\325\206\275\323f)\274\371\0134\275\203\333-\275h\036\000=\234t;\274``I=\275b^\275a\036\'<!\212\2559\346\352`=\230I\034\274\347\2066=\203\255\356\272\001\256W=\'\033\366<\216\323\220=\023\355\335<\220\005H<k\367\213<=\031\271\273\023j\362<JD\362\273\277\302Y\275\312Q\024\275\352\226\r\275\251\221M\274\200\230b<6\314\230\272\373\343\010\275\037.\245\274\326\314\262<J\321\020\273\344Z\303;\205\263M=>f\340\274\377O\301\273\001\262\204<V\365\324\274\321\014\376\273\337\254\003\275\252\2273=Z7\234<\020\336\032\274\251\007G<=\215\025\274\214&\202<\271\274\361\274U\341\203=z\r =%c\326\274;#\231;\356Q\321\272\330\027T\273\033\005E=c\354\304\273\013\307&\275\341I)<\350z#<\265\342X\273\231\256\252\274\345W\337<\300\371\302<\t\345\337;\325\230\237<\010\362u=\331l\030\274\224\225(\273\360\367\201<\321\321\335<j\306\335\273k\230\215\274\211\027.:\304\300R;\366\372\347<\370-6:\223\351\207;\265v\262;I\373\211<\255\320\277;\032\273\267\274\347N\211=\344h\317;l0>\273\253\375\003:\022\"y<!\212-\273\320F,=\3452\035=\0220C\275\301\212@\274\237_N\274>st\273\017\031\032=l)\363\274\n\364\224\274\"\236\370<\232\210\360\272ga\354\27313\'\274\211\312\\<\225I\277=\247\017j=\336\0144;X\213]\274\314F\035\274==\372\274\216\326\';7\243\357<\360\372\255;\006\317\026=2A\314;\234\001\307\274g\177\365\274\340.y=?7\200\274\253\005\"\273H\"\322;z*F\275\313\030`\274\204T7=DS\235\274Uy\243\274\2025\200<\256[\023<b\272=\274\223,J\274\027o]\274\203\266\306;\0254S\274K\022\225<\365\243>;b\'4\274l\355M</\311\370:\250\233\3369\344\212)<\340.\211\273\346`\254\273rb\256\274\360\357q\274:\036\332<\027\360\215\274\204\362o\273\2424\226<\263\033\265\274Jht;g.\323:L~\242\275\025\337p\272\004].<$0\222<\210\314\022\274\335\356n\274\014#\254<\236y\251<*\036\214;\303?\307<\257\325\007<\225\033\315;\246a\272\274\303\316\256\273`\376\236<\007\360U=\312b\221\272h\231O9B\204\360\274? \202\274\022\210\006=\224w\260<\366\222$\275\344|T<\265\361\267<\340<\274<V!\026=i\353\375<d\022\316\272\343\021\215\274\003\246\003\275\375\031[\273\t\366\261;W\362\257;F\260n\275\014)\005\275\277\342<<\302\312/\274\n\264\304\274\203y\007\274\336\n\365\274\272\233n\274.\007\213<\r\335\001\274N_\212=}\3115<\270\021\230\274\375\3313\274w\232\222<,|$=&v\306<\326\222\254<\300\"2<<\216\006<\324_\250<\002\205\205<\260\235\216\274a\215\377;\370\313X<V\210$=\331\215\266<\312\377<\275c=\373\273\305\035c<\355\212k;6\326V\274\024\230\370\273\037\262\216<\034\263\010<>\344&<\3239\232<\201\304\357\273vw\340<\'Z:=\252\202y;l\033\"=\341\254\220<\031X*\274,\004?=\027\003\000=4G\362\274\301\362Q=o\366\203\274\207e\'\274R[Q=5\027\255;\223\007j\274\341.\000\275\261\334\351\274\034\r\350<\014R\263;\364\304\362:\210b\021\275/\036\210\274\003\330\335;!|\271\273\277\'5<{\324\264;\007l\236\273\263\317\241;\313\312\375<\272\3203:\260\017\r=L\346\274;\217f\030\274s]\2029\314\305(<\300\317\300\274y\224\321\273&\206\214\274X\204\177;WZ\257;\007\376\335<\331z\240;P\350A\274\332\302\231\273\214\002\300<B\201\275<\'j#\275\005\254\357;M\025\373<\356N\013\274F\301\265<1\266l:q\033Y\275P&\375\274\251\214\017<\023]\340;\265On\274\300\317\270\274\220[\\<\324wH\275\250\206\223\274\371\"\255<&\0209\274~u\320<$\250\357\274\256K0<\232e\240\273\324~\\<\351\267\026=S,\000=\360(+\2733G\217< \3036\274\375RY\272I\323,\275\200>3<\202\276\307\273\267\0322\275\366\201\n<\251\200\320\274\330\246\237\275\2636\022\275iC\014\274H\274(\274a;Y:\366\376\216\274\3661\232\273\226\263\320;\325J\250\274%f\276\273\313rS=\327\206G<\303}=\275f\010\330<[~\035\275\237\241\241\272\330\233A<\244N\243\274(D\322<8y\357<g\003\244=\375\272W<G2\320\274z0\336\274nC\251\275\222\"\256\274R\374\006\273\005\243v=\221\212g<\274\231\224<\3643\000\273\252\331\276<\030\275Q=\307\373\002\275\233j3\275\'}\235\274\004\356\367<\256\313\204<c.l\274\203\240/;L\214\367\274\217\234\254\274\',\234<\035\241\233=y\037\261<\312\327\235<E\203\354\274y~\016\274\216=!=\270\006\036<!+\244\274~\201\232=\264\264\233<\360\322C:\354wI<\231*\216<B\001\210;8\221\322;\233<\324<?\037\n\274\317\235\007=D7\234<\263\204w=N\337\333<\\\014\366<\307$$\274S\347\007\274C\024\304\273(/\267\273\034K\370<5EA=\356\350X\274\352\3769=\215\234\257;\017\224\203<\372\370\262\273v\242\236\274{\0036<\272\026\\\274\270\353\371\273\276\223\315\273u\267><\356\t\344<\372\177\246\274\267bm=\366\203g<s\030\232\274\213\314\224\274\377\245\205\274v\2120=;w\334<\010F\324:\025\206\004\275\021\023}\272\030\317T=\030d\252;\006\026\223<\306\371O\274\355c\020=\301\021\314<\242\224\300:\307\361%\275\330\310;<\023M\312<\251\266\000\275\260\315\332<)oa;\235z\214\275nl\357<\315X`<\320N\366<\252\260\253;\274\234\330\274\370\035Q:nI6=\316\351\204;xR\002<\t\233\354\274z\255\351\274\261\317;\275\233\331\014\274\324\201\025\275l3\240=@D}<_\364\200\275\001\030\014=/:\022\275\267y\240\275\225\350\221=Y\331\036=ec\265\274\206k\306\273\337\341\340\274\250\234\352<\210\241\361\274FA\246\274\271t\244<\370\2301\274\214m\373\271X4\'<\024H\242<\013\263:\275D\300+<\247\226\324\273\026\301\\\274?\001\022\275\035W\346\272\277\257\027\275A\003\231\274\253\212\273\274\354\245\375<\346`\227<#4\324\273>ns<F\263\323\274\221\036/=\376\037\247\274Z.\262\273\345H\002<\014P\354\274\273\271\245\274H6\245\272k\353\002\275\370m\326\273\207r\022\275\374^\033=\003\271\301\273\365;\010\274A\250\206\275\244\322Q<q\233\200\274\230F\247\2747K\031=c\333\342<O4\226\274\013\255K<\203\271\237\273\244\366\014\275 \r\340\274D\203\355\273X^-;\021\212\246<16\271\274\331D\360<\251\303[\273K\203a=Y\024\214\273\331|\303\274\255\307\017\275:\235\262\273p`\305\274\346\\\\=\346\255\232\274S0\350\274\305r*=oW(<\223\t\305\274C\357\210\271T\300\332<1\243\356<\264\316V\275\266\311\257:\210\224,=\264PT<(\t\223<\347\035\236\274\350b\031\275\002\232o\274\306\371\206\274Z{\n=d\311\257;\305\200\031=\354\336\377\274\027.\322\274\363J\016\275\033\240\225\273\371\275\262<\007\031+\275 \316\2718\254\007\323\274\271{\007\275\006\036\300\273y\031;=\207\364\210=r^\006\275\314r\246\273\014\3400;n\322I\274\227v\244<E\325\267<p\334\271;^\032\225\274\255\251\322<C\\\215<\037W\317<\260:c=\266\311t\273v\373\346\274R\227\317\274x\234\020<]\257\217;\0213\332\274z[c\274\263\021\266\274\356I\013<M\314\232;\266\r\026;i\337\006\273\343\001\335\273@<\364<\320\223o\274\217P<;_i\025\275\\a\312\274\242\026\321;\341\310\177\274\375*\\\275\344\210\r<\'\204\374<\350|\253\274\t\2048\275\264:X=\222\321\213<+B$<o\373\n9;=\337\274\277\200\203<\034u\277<\025\301\001=\246\375\271\274_\264C<\233\210\277\274c_I;\020\214\n\275\2578q<m\233}<\327\237\022=\311;\234<\247b\206<\214\022\013<\250\341B\2744Xn;\340\241>=\365v\031<\236\036\241<\035\236T<\241\207\270\274VBc\274\330)\304<\205\363\313\273E\213\250;\302{\220\274v\340L\275pZZ\275y\2516<U(\303<1\030\323\274\007\037\240\273fI\214\273\344y\377\274\010{!<\013\312;=\r\222\013;\275\353\246\274a\253\335\274\365N\033\275\350\317\262\273p\024V<\320Y\305<*\274\243\274\007\313a<#sz<\320\266\000;\3517\245<,|K=7\331F<\367\371\252\274\0366r\273\375\315\331<\2423\307\273\207\341\222\274\346H)\274\210N\005\275\025\253\000=\251]p=j\212\177<\352\031G\274c\223\253\272\250\353H\275{u;\270\244\263\315\274<\323\246\271+R_\275\006\362s=\340\326\253\272\300^2<H\376!\274\243\247\255;~c\271\274R`\021\274`Ab\275w\233\000<\321\226:;/\270\226;~\2570\275\255\215\362\272\207\327\335\274\2276V<\251\356\317;\201\343\344\274\022\261(\275\317,n\273\246\214\216\274Ag\022=+\026\005<&\022\211<\016_\324<\277\232\255<\332-\266:g\337O\274\217\275\240\274C\177[;X\277\201\2749\274\013<\240m\371;g\003\236<%\342\'\275\303\356;<N\001\216\274y\214E<\223\037\n\274\267\305\261\272\220\244\262<\303\234\000<\302\376\r=\243\035\302\2747\313\023\274/\332\007\275\235\367J\275\310\374\353:\353\257#;\231\327\252<\262S?\274\027Iy\275X\240W=W8\'=\r\2549\275\021\256\316\274Q\344\331<\356\\\332;\345\014\313\274*\322*\275df\214;\t\332\215\274\022\302\243<\234p\035\275ko\204<;@\210\273RE4\275JR\031\2755\367\255\274\371)\377<\241Y\372\274\3030\327\273\245\211\225\275\322\264p\274\273\017\337;\252uP\274\326\362\236<\220\336L=\367G,\275\230\362\203\274ThK<\217\260\312\274\336\177p\274[\351$\275\311\377m<\023V\233\274\336\326\354:(\323\275:\t\324\352\274\273\325\004=d\324N\274\205\251\356;\014\220\256\274\030\266o\273&\211\307\274 \221\007=\241\227\232\274\214\006Z=\310\024\n\275\260 \034\274+\235[\272{Y\035<JO\232;\312\316\266<\021m\204\273\227\004\354;]\270\204\274\237<\244<\244Nn\273G\253\017;\305\213\252;Si\331<\216\223\217\273Y\006\256\274\377\331\264<\202M6\275a\023\225=\356\035\345<d!\224\274>\366n\274\367l\025\2750F&<\211\331\n;3\020\321\2745^\016=\272c\3039K\263\335\274\255\260\\\275\367\r\230\274\342\202\034\275\274\036E\274\210\364H\274\225\033S<\375\r\002\274n\322\215\274\003]\372<\212H\021\275\251\360\226<\014\035\350;\000\346\320<\340`\313\2748\213\346<*\324\334\273Y\365j<\373\312\005\274\247}\016\275k\345\030=\020[\032\275T\017\277\274!p;;\227\'\333;d\3355\273\033\203\335:\254\364A=a\363\237\274\277\306\207:Q~\t\275\343\230\322\2745\237\030\273/;\246\275\250\255\302<\230Y\254\275\'i3< \n\255\274\315\347V=\312C\257\274\223\321Z\275\243\205\257\2743\351\220\274~e\230\274\323\257K<CB\020=\3653!\274\013\025\017\273aL\206<U\272w<\021\321\005=\271\340\373<\326\243\027\274\022C\302\273\332\346\376\274\363\027D\275\352\333\002=\327J\031\274L$\030\273s\262X\2744\013\270<\315Z\227\274\225z\203<\024J\036=\354\356-=Y^\243<nJ\222<\264\205\326;wy\246\274\242\341w<\321g:\273\373\242\344\273\310\036\"\275\273Sg<\035\274\216;\266\010V<\005E\226<\217M\235;3m\326<1\362\'\274\262+\225\273\233\361\r<\'\256\"<Z\355Y=\3742\263\274\303\245\026\274\355\211\211\275\322`\334;I\322\026<C\360\003=\'\276\344\274\245M\340\274\216\363p<\220\344\271\2725\027\004\275\207 \302;\317<\356;\034\351.\275rW\205\274\276\331==\314\250\242\273m\325V<\332\027\354\273\222Y\017\275\307,\210\274v`\340\273C\217\234\270Q\177Q\273+\203%=w=.\274n\315\037\274/PA\275\265\344\271<\3437\226\274\267R\204=\345\306\002\2735\205\222<9\\\371;\241\006\353<\016\000w<X\341\000\275\004W\331<\324k\351<7!o<\214\357r\275\253\005\315\273g[\036<\365O\353;]\031\225\274^\200\004\274\014\231\\\274\noU\274U\017\261<\013\261\247;\275@\023=\003\351\261;E\357\374<\274\232\247;*\035\203\274j\255\037\275z\232S\275\037\230\213;0\347\"<[\2647\274z\261.\274\312\210\327\274\324\311\366\273\270Q\031=\r\273\010;\0207.\274L/\240;\261%\256<\037\002\r=\240\223\357\274\245\312\206=1\302\034\274r\3069=f\216\246<{b\226<\376\024=\274:\007:\274\344+\233<\026\263z\274\n\346\210:*\3604<9(\223\274\342B\366\273\251\375\363<#~\013\275\342\346z\275\341\245l\274\235\021\010=\361\210\354\274A\007\310<\003\031\034\274\270}\240\275c\t\243\274M\245\013<OnI\275\222\243\274\274hRy<\371!\030\275G\276\264\274\312w\032\274\351 \211\275\345\342\305\274\243\316j<r\251j\274Y\362o\274\273\004 \274,\304\314;t\000&\274\300\267<\274:\341i\274y\344\251\274|\310_\274\014\210\030\273\213{G\275\333\0239<\007\370\255\273Ga\000\275 M)=,\361Y;-\330\352\274\032,\016\275L\026E;g{\003\274*n|\274\353\000.<F\243\276:\200A\017=9\337\031\274q\235\036<\2218?<\032\243\221\274\302\230\014\274\334\220,\271\324{\316<\354\352\323:++\027\275lV\345\273\r\370a\274\243<\246<\326q^\273]K\263<\273\246\254<d3\026<\035\217&\275\020i\245\274\344\205\310\273p\221\262\273\013y\016\275\247\253\310\274\204\033\'\275\002\337:\2749\336\265\274\205jT<\354V\232<ghI\272\321T\014=\367\023A\275O\214\"\274u-2=\2362\263;\000x\250<\320\355\"=i\177\240\274o\236\265<R?\331\274c\t\014<\305z\233\274\177\241\021<\271\376\267\274p\206G\272`\256\200\275<x\005;\326\231=<\233\261r=\376\365\211;\'\016\273\273\033S\237\274Y\234\002\275\300\307\314;H\357\002=\017C\257\274\317q\341=\" \270\274n]^\274\363$\353\274\257\200\250\274\322b\307\274\271{\223=\360\235W\275\004\365\004<D\213\330\274\241\276\261\274\364\027/=\207\373\264;\r\333\000\275\231]\212\274JW\t=\371\240\347\274\270r\t\275d\000\247\275\374)\007\274!%\325\273H{\232=\3106V<@`\002<\017\265\027<\225\215_=\314\005V\273g\320\026\274\310\3574\273\340K\313:\363\031\037\275F.\030\275\314o\350\274w\247\262\274\230\202\030<9z\204\273\202\326L\275<r\227:\023O/\275\352\325\"=%\336\t\274\337Z\000=\366\020\016\275F\016\024\274\316\275Q=\221sX<iAC=\272@\242\273.\301G=\207\357\353\274\361\372\223\273\357\371B\270\266H\314;F\356\000=sF\3629f\036E\275\2301\324\274\264\2226<!\3451\275J\217\033\2744\241\025<g\266\'<\234\341\t\2759\312\r=\023\313\323<\215\262\n\275v4\355\274\253\005\234<\240*\270\273p\262U<\210\017\276\274\001\260\353\2745\n\214\274T\264\250<h\305\267\273\237\360\005=B\257\303\270\034P\252\2741\311\266<\3574\310\274\212\332n<\264\207{<\t\356\363:}\013\274;\177e^\274\t\362\334<\333H\230\273|\240E<\241#\363\274\235\2704<\352\301\315\274OC\'\275\001(\020\274\301\253\024\275\313<\241<pg\020\274\2274e\275\313\263\371<\273+[\274\304\236v<\016p$\274K\3622\273\200t\274\273\273\332\014<%E\257\274\316\244\035\275\031p\006\274\247\321&\274\217!\024<\032e\347\272\232\212\366\274\211\311Y\274\020w\371\274\360;\335;A\325\311:c\326i\274\n\304\256\274\310\371\010\275=\324\017<\314\235S=\370\242d<\370\201!=\375?M={\212\034\2740\210\211\274\t\220\222<v\004\004<\206\2505;\200\253\236<r\317\273\274a\305\254\274aQ\257\274\230:\215\274\210\353\345\274k\2716<\264x\250<\251\017f\274\304T\020<e\353\221\2746\244\367\273\014yK\274\210\227\333;:\353\272\274\373|m<\034W\367\273\346A\332\273\377\007.\275\276\240\235\267T\206(;\213{\204<\020\213\010\274\202\372\374\272\031p\201\275\020t>\274\375)\335\273\374\020\250<\311\265\003<\226\375\243\273\217Y\364\274I\013\005\274\022E\205\274\031q&\2721\2625\274\005\016\215\274\240k\241<\3774\207\274\343Q\265\274\355\231\006\275\256\367\225\274\034\260\267;G:+\274X\330\316;\023\314:<\t\035\246\271\214\351\342\274\330\243\243;M\267\263\273\330\0010\275J\247\263;&\270\005\274!\206\237<\204\367\340:\256\335\335\273J\2437\275\027\253}<\231w\236<+LG=\016\251)\274\022\216R<\356\2541\275\272\257\377\273\006\230\373\273\033\215{=A\244w=\276\0171\274\334\017\002=\033\013\257<\317l.\274PN3<Q\034\201\273\014\036\224<\215\270\306\273\3672E\275\321\343-=!T\231\274\211C$\275\316\016y\274^(\300:\243\021\036\274\237V\357\274\3431|\274\2264j\273\362]\261<\030\032m\275+\203\342\2740\020\314\274\236\312\312;)a\377\274\3449\017=\021o\273\274p\003,\274\342\304c=\336\2666<+\021-<\010\033\274\2740\021\366:%N\316\272\027h\324\2744\330\340<\377\233\207\272\256\336I=s\352@=`= \274\307|~\274\t\236A\275\267\014\274\274,n\026\275\n\260\370\274\240)?\275\324o\265;\257\262\247;]@\\\273\232\027\317<Y\363\264;\324\234;=h=,<\254\317\024\273\177\\C=\277\373\007<\271l\267\274m\007D<\\\nM<\347Z~\274\254\"\036\274Z@\013=\270\376\224;\230\"j<la\202\273\030Ub<\2017\212=\222ni<;\241\332<\3237r\273,\343w\275\033[7\275\031b\241\273\351\225\013\275~u\262;\214I\000\275\255G=\274R\355\246\274O\300>\275\336m\2618\315\0325;\300\014R=\2341\317<\301\366\002<\364^\177=\216)\310\274\025\007\354;8>\335\275\033C\333\274Wn\277<;L\014=\245\207\311\274\236\200\014\275!\363\323\274]\341\356<\334\034\241\274\357\365\271;\344s\357\274\240a\251<\221\370\000=\020\376\021<\303\346\231\275Os\265<\324\220)\275t7\001=\210\247o;\257L\362<\321\346\206\274)A;\275\306\302t=\004\020\342\274\363\031\356;\324NU\275*\213\n\275\257\360\025<\237\210o<\224\320\256\275NGR\274\271\357\033\273\377^\201\274\261\365$:Qc\365\272\262\024\235\273.y\227\273\006\334\241\273(\'\212<\345\010\241<\033\304.\275y\257\t<[>\322\2737\375j\274\021\334(\275\032O\006=\007\343\r=\322\201\257<tt\206\274\021\231\007=\365O\207\274Z*==4\337\203<\213\217P=vz\267\274\276\333\371<\002\201\256\274\027\037\336\273\"\354\362\274k\032\014;{\373\016<\301z\205\275\"\226\316\272\263L\n\274\360\272\004=34\025\274\212\301\250\275\203\216\201\274M\033 \274\340J\305\274\022\235\030\274\255Q\355;q\345,=\314\333d<hc\202\274\274\210B<\331x\002\275\r\002?<\021}+\275r\0140\275\035\244w<\273\274\240<\203A\306\274\323\253\236<T2x=R\271\232<\013<\033\274#\033\352\271\333\322\021\273\271p3\274\024\365\033<\315D\257;\355\225&:>\256$\275\373\2657\273\365\350&=\371\376o<\300\324j;\026t\261<\2727\327;/\036\002<.+\277;\251\276\310;\342O\037\273\271\356\234\271\365+\237\273\340\3233=\004\306#\275\357\202\350;\375\003?;\n\257\333\273\263\233\204\274e|\251\2733\276!\271\374^/\275gP\231\274\251\307\246\274.\361q\274,\013\014\275\360Dq<O\'\263:\020R\336=)\017n=\211\373\003=\332D\032<C\243k\274\252\215\354\274\360\346\026\275Ki\000=\356\'\335\274\247\267\036\275W\221\240\274\334\204a\274\3720\021\275\366\2422\275\221Y\023<3\362V\275\360\3643<\304\265\267:\371\213\035\275\200\363\010\275\364\324\000\273\221\272~<\330\021\021=\n\307N<\303\013<\275\324\223\025\274w\265\216<\302,\261\273\305`\024\275\3075\274<\261n\003\275\361\243\345<e\265\245<\3658k<S\'\224\275\353\227h<u\311h\275\202\257)\274\267\035%=\002\212\221<P=\211\275\317w\035\275\347\030\367\273Y\374\314\274\252\n\262\273PW\250\274\226\331\033=\254\211\300\272K\231.=E\365!=\005\234$\274\005\237\002<P\306\025=\001\344\026\274\3738H<_%\220\274\370\236\\<\304\375\203\274\310\r\223\275\367\214\3679\026\336\325\273\240J_<w\331s==\221[\275!\240M\275\036\351\300:p\347\020\274\210\"\370\273\005\221j=l\373\003\274!\026\226<\362\244\205;\005K\264\274\274bQ;iR\005\275z:\203\274\3035I\275i\272\216\275\035Ds\273$q$=!\214 \273\353\242\204\274}\362\217=\2535\210\274b\237\205\274J\'\021\275\250\202[\275\332\331X<\022}\215\274\315K\\\275Q\327v:\001\335\352\2723\344_<\230\314\206\274HB\301\273\304\376\004\2740\271d<\230\002\272;%9r\275>\314\"<\360}\026\275\205Z\316\273\334\270\247\275\356\365\313\273<m\377\274\233A)<\232\322\363\274q&\251\274\355?F;OL\251\274\3312\270\273\255\317T<\0264\\\274\3009.\272\236\264\333<\n\034(\274xt =b}\205<.\255\375\274\234\022\303;\324\313\261\273i\322\247\274\252\272\006=\341\240~\274qb7=\261\245\331;\006\334\253\274D\333\002\275F\2402=\367\313\323;\351%\206\274*G\303\274\254\203\220\275\010\231O\273hn\256<u:@\275\356m6\273\272\343\021\275\350wn\274\247\217C<\327\211\244\274\235\007\031\275\315\230\250\273\315\"\300\272IU{<\313\267\313<\227\364d=\312\2152\274%<\206;\363z\032\273W\304\033<\006!X\274\324R\206\274Ae#=\211\344I\275\007l\315\273z\277]\274[\233O\274\252\345\036\275)@\377\274`\217\014\275\375;\255\274RC\216\273\261a\345\273\221\246u:\251\255\347;\347\225$\275a\026\243\273\302\021\001<9z2\274\376\037\210\273\266\342!\274\320\317\302\274fE\000\274T\266\232\274\357\207Y;\243\301L\275\356U\022\274\t&\211:\203\352\271\274\252\374\353<of\257;)\205(=\202\270\200\274\305P\252\273\226\210l=y[\355\273L\360\000<5\336\322<\316\251\022\275?\033b\274x\000C=4D\213\274,\361\312:\332\001-<\233\273A\274\227\033\325\274\360T@<G\000\013\275\000^\365<\360H\361;lS\030=\211\222\004\275\342w\246\273\311\252\351\274t-%\274\225\260\242\274\271\203\213<\263\262\001<\031%%<\323j\033\275$\200Y<\3274L\274U\\,=\362\337\005\275\316\377*\275\356~\252<1(\n\274\332\301\036=\260\\\002\2755Ju\275X\236\355\274?D\333<\352\320\246\270~:\345\274\343\232\227<\360\031U<\277N\016\275L\343\243\274\311\tZ:\313v\010\272v\014\275\274-Ce\273=\032\206\273w\267\350\273\022\025\376\273\350\253\277\274\007\016\246\273\316\367I\273MX\202<d\0316\275\350w\031\273\370\246\205\273\220\201\206\273_L4:\213\006\314\274\201\207\024=H$\014\274Nb<\274\336X\003<\352;\332\274\322\246\235<vc\005\275\332\024\350:\261\231j\274\370\032\177\272p\327\216\274\243\366\215\272Q\024h\274\204{\241\274&\272\030=\321\241\263<Y\355\204=\230te\275r\316\264\274`Vs\274~5\351<\271\354\230;\262KQ\275T\027\023\274\233V\214\273M\277\347<\321\216\216<\n\232a\274\020\312\226\273\016\025\0209\200\010\302;yOl\275mo\306\2724\240\320\273\233\267\314<\344A\212\274\032\021\325;\317\006\373\274N\235\005;\006\267\373;#\345G\2755\347\220<\300\003\212\273\216\222\356\272|\203\265\274 \2170\274\321\201\377\274\264\013\200\274\221cO\275\027\342\014;-\353|\274\360>\036\274\304W~<\027\376:;\260m\262<h\334\300\274z,\202<,\275\005\274\265L\222<\234\r\242:\320\\\016<4^h\274s\370\016\275\356\251\372\274\217}?<\352\370\035\275\264lV\274\274\224\220\273V\t\007<\315\010\005=TA?\273\233\235\333\274\025\330\203\272v\303_\274\216\003\343;yq\201\274\213\007\345<\004\277\014=U\276\032=\216\220c\274\003\330(\274.\347\r<pL\203:\267\025\222\273\301H?\273\363Q\241\274\347L\340;]\264!\275\213S6\274^\314\006=\n\235?\275\305\311e;\371\326\036=x.-\275\3276\307\273\276\375\035=4T\032\275\273\301\264\274\234v\370\274 \201+<\371\266<\274\001\271\036\274\n!\256\273S\335\266=\267!\010;\221\177\302<MK<=\362\0213\275{\245\313\273\031H[\273n0\333<A\034?\274\265\304\013\2744Y\210\274s\260\r\275\241\037\233\274\212e\r\274\346\331\272<^6\000\275\277\353\277<\262\000$\273Z0\246\274\026s\224\274\253\3622=\215j\242\272\035n\001\274\375\031\331\273\210\177\177\275{\253\232;\315\311\311;\016\001\211;\312L\010<\217-i\275\264\020@\274M?\227<\234\n\253\273\277\304\236<\344\211\211\275\353\352\000\275ML\344\2741\316\036\272\274*\232\274\241i\265\274\265\364?<\315\0219\274O\010\274<\271a\010\274zB\266<\023\371B=\373G1\274\315\311];\'\375q=sXB\274D\357C=L^k\274O\345/\274ZN\220<\236\270a\275\241\302\30695iO\275\021\313\260\274F\362\034;\246\227\003=\206\263\222\274R\316\351\274\313R\352<C?@<\360\326\227<Z\226w\274x$\217:\226\334\205\273-\360!;:l<\274j\373\316\2732\206\375\2743F8\274\221\367\312\272\305\353z\273\307z\003\275\036\336\337\274\2275\';\037\r\375\273\233\321=<\261\305g\274\032o\272<\360G\272\272Si\237\271?\312\t\274\027\255\020<f\260\025\274\361w\227\274`\273\312<\206\302\002\275\343 \221;\364\302\304;@G\002\275\241\346\230;~Q4\274A\030?=\313)#\274\30569\274\314Fh\274n\373\275;v\276\372<\315\177><\035\232\204\274\310I\230\274\204\341\360;\336\313\274\273]\251n\275\257 o<\013\353~<\007\267\030\275\227\262\002\274\321\305\373\274R\305\300\274`\3578\274\224,,\272\261e\326<\220\2036<\202+@\274\206B\247<\304\t}<^\263\371<\3622\026\275\004\203\265;\312\311\027=\"L\202<a\001\241\274q\274\202\275\2355\020\273R\375\307\274\337\342\306;-\260\367\2729\020\326\274\332\212\214\274\303\225\301<:a\206\273KT\\;\273<\340;\326\215Z\275\352\023\035\273M\365\264\273x\316#=2\036\206<\234\311\267;R\305P\274\350i\220;\374\217\332<b\332\256;\213F3\275\320\216\001\274\345\2475\274\242\366\261\273\312J\203=f;\270<\030_j<\245g\305\273\212\032\355;\367\353V<^\035\272;\'\302\320<\017\245\335\2746\250\272\2742\0359<LF\240\274\033T\231\273\307\375\234=R\262\375\272zN\223\274\343+g\274\361\374u;-\024\366\274\3450\235\275\331\254\267:\223\235\200<\0074C\274L\206Q\274\026PA\273\241-M\275\351\352\267;\317\n\237\274T\303H\274\036\031J\274\003V\231\273w\336#<\033E`\273\251]\013=. \007=\325f\327\273\225\203\332<o}\004=\362\312\212<\r\224\201=\005\\\033=\032\310E<jR\020\275\325\245\007<\006\377\305\274`7\237<\336\203l=\026\n\361\273\322\352L\274OF\020<\n!\004\274\245\033\034=\036\217\212<\307\317\226:Q\r:=\224W\231\273j\237\027\274(Qh\275,\003?<\246\r\014\271\200\337\220\274\032\373\316\274\377 \024<|m\351\273}\200\224;\322\301\237;\307\3576<S\214\212<M\203\r\274k\202\273<o+\346;x\333\311\274}\364@\275\254\273\233\274\326#\211\274\362\006\307<\354<&=\346\023O\274r\322\202;\212\272\203\273\263]\237\2744/\222<\374\232w<\374rx=\231\372\204<\260\324Q\272Hg\013\274\0141\347;\"\3040\275+Y\031=4\205\031\275\242SY=M\356x\274\356\016\021\274\221LU=\031\367a\274\217\364w\274\005c\004\274{\253\264<\274\264?\274\215\211\237\274\336\276y9\037\'\256\274!\335h\274D\243\372<\272\030\034<\374<\376\274{\342\245\273\\\317\213:\304\005\346<+\340\022\274R\200\000=\330\230\250\275\t\305G\274\311>2\275\216\225,<\335\343.\275=C2\273L\222\r\275^D\372\273z\374\355\273\223\327\003</\t\270<\337\344\030\275\031\321`\273\257\205x\273#\203\356\2715\222\352\274\343|!=\245\204\207\274\034\377T<\216p[\275\224\266\n\274\007f\200\275-\206R=*W\025\274\221\356\003=\203\244\232\274\250\322B=\217\372\274\274m\302\014>n\\\274\274Z\273\367<\262\016\334\272\255\3256=\331\2220=\032\300\310\274\310X\206\275\234pA\275\234\340R\275g;o\273\204\006\253\273\2756\230\275]\377\\\274.}\261\2746\324K<A\326(\273\231\265\216\274\314(\347<\251\025\265\275-\024\200\274\344j\254<_4\254\274\333\227%\274:\206n\274\321\303K<\332\246*<\027Bo\275\334\370\2548\r\006s;\016\243q=\332`\223\274)A\322<\020\3474=Vs\232\274\\l\337<\035\2604;\214P\3749\341\320 =\301O\001=j/+\275T2\232\274\007\264\020<z`\245\274\0035\225\274\221\010\241<\005Z\357<\374\272k<\005\222?<\311\264\365<R\017\215\274_d\216;\357\274y;\257q^\275\220\377\332\272\r\356F\274#|\206<\372.\031<\311\234\304\273\347G\367\274\213 9=\'\235\301\273\334)\216<\261\307\016<\326L\247<&\\,=\224\371\235;Z\312+\2738\203\206\273\230@\213<\026\215Z\274\321\265\030=\364w\277<\322\243\020\274\254?\247\274\266\202\226\274\263\260\026<\333q2<\016\267\206\274\274\337\007\275\350\335\261\273\217\\\257;t\265\016\275\207\242\347<\354\037\";\331u\252=\250r8\274\222\211\266\273\020\364\212\2758^B\272p\330[<\227\314\301<\246k\267<n3{;1\306\215<\t\177\321\274\t\007\207\274\030yY\274q\033n=\313^\372\270I\237\252:\341\014\002<\202h\004\275\200\323\307\274\251\355L<\001\234\256\274\034\020\002\275\372H\205<\003\037\032:\347\036\341\273zv\342\273:9\247\273^\354&\275\037A\331\274i\265\'=\363\276\314\273AC\363\274\205\307=<\274\262\014<\005.\370<\0020b;\251\254\367\273\342\200\203\274\t\213Z\274X\355\225\273W\243>=3\206\342\274\014\332\211\273\335\023\277\274\360\310\203<\260{8\272N\376%<c\022\247\275\245\251\032\275\354k$<\345c\220\274\257\324\020<\375\313\303\274\311\313\206<\302cb<J\232\262\272v\2406<\363g\202\273\241M\034\274\340\332\265\274\'z\022<\031\024\332<7\'\337<\367\027\243\273W\\\362<\275@\206\274\002x\272\272rX*\274?a\357<\nY\367\274\006Lo\274\336)\347<\323:+=9F\"=q\327\264<\304#\277<\333u\233\274\226a\026<\036\324\207\274\010\354\215<\264\354\023<R\013\022\275<\017\372\274?+A\274\253\360r\273\235\025\r;\256\335\250;\0363\327\274h\214\313\274|\355\205\273\357\355\335;\273m\251<wm^=(\215\t\2745\306}9u\336|<\177\314t<s\320\267<\373l\177=\302n\253\274\310O\000=\002\021\026\274\313\315\022=iI\220\273\345[\315<\363\013\370\273}E\344<\255\341\374;]FZ<\337\345#\274\241Y9:v\330\347\274\245^\250\274\305?\033\275RA]=\347\352\345:\225\317\240=\341\234\004=\0008[\274`~\204\273g\275P=\325\224O;N\353\242<\356\244\300<\352\tm\273Yy\313<\353\3566=\226\376\235\274\370\035\026=\021\370\333\274/[\037\275\247\262\366\273\223+\324\273\025/}\272h\227\232\274\307j7\275@\363\315;vj\276\274\320tr\274\014M\321<\242m\225\273\245_\\\275;b\327<R\0327\273\200\257\2329l\324\255\274\377\365\231<W=\'<~\271c\274A\026\332<S\224\033\274\024S8:\372\2316:\323\202\366\273G\017\027\274\321\017\017\274\235\355\205\273\206f ;r\007\277:,\224T<8O\014<\327\212-9\323/\315<\205\236I<\032\314\'=\317yB\274\366\026\254\274h\326\335<#z\205\274\271^M9\223-\0029\305R\236\275\2104\007\275\370q\264;\273\200\357;\327\366\222\274\312\315\246\274\307X\026;\274\313\223\274k\004=<\217\300\265<\243\177\"\274\377-\002=\354\321\\\275\036\017[<\306\312s\274\241\271\313<\014(\003=\027*\326<\022\206\356<up\216<\361\327\265\273\\\032\273\274\222\214\364\272\370+\200<zo\353\274e]Q\275B\005\270\272y\207x\274\300\330\237\275$\257&\275\205\303U\2746\346\210\273\\\301\236\275\243\333\221\273\021\217|\274\223i\315<\234{4\275]\r-\274]\220\307<@=|=\342\2240\275\205\215\022\274-j\213\274_\345\003<\345\250\354\274Q\353\000\274\177\006U\274\030\210\326<\233\315N=\372\225)=>\342\016\274F\301\340\274z\006$\275\240\354\261<i\3540<\260\345\274\274\024\302\210<v\264\244<\227l&\274Fy\310\271/u\\=\220U\r\275-\031\033\275\370\370\231;\204\373/=\241$\032=\270\367\272\272?\0205;:4\271\274$\344\272;\336\006\276:\307\271\222=F\332\231<WD\217\2743\332-\273\361\354\3439\002e\2079\t\361/;\274=\244\274\25653=\035\274\261<\3176\337\274\335\315>\274\212N+<\276\037+;\265V\354;\366\002L<\315\026\013<\246MI<\320Ef=\035\244\032=#\237\371:\263\310M=\242o\014\274=\244\211<_\221\210\274\232\340\233< \014E\274\254\331\337<sX\242\274\314\212\311<\317\256\363:mg\257<0\342\313\274\233b{\275\022\035\226\273%\330\302\273\0101\031\273\021e{<\346\2039<\005\241\315<:\370.\274w\014\013=\342\364U\273\344\023\205\274\367\373\326\274\330w\024\275-\343\030=\335p\216<\357\317\262<z@\t\275\027:\265<\307\270K=\020\214)\273\014t}<\221\205\013\271\330\003E\273R\305\257<\367\035\245\274\261\321\235\274\013T\3659\272\326n<\302[\265<n#\222<\200\271\'\274 m\352\274\277i\021=\360\351K\273J\261|<4\007\210;H\254R\271L\376\013<\262V\341<\376F5=R\223\311<#\206\021\275l\033\013=\005\203\226\274\377dN\274&c\372;/\312\204=\312\346\005=\324/\273\275\350C\001=\271\267\372;\223:\022\275\300\223\345<\217\021I<\016\300C\273#\231\004\275<\277\266\274P\233/<3\332\333\274\242\326\004<\262y#<\030z\232\274#\243\362<\214\314\366<\213WI;5X\210\274\211\023\343<\025\302\262;\214T5\274f\3520\275\300\265V;\374an\274\250\214\035\275r\035\010\275\273\020T=\n-\225\273\301|\206=\273\207G={\313\255\274\250\334F=\301\006\177\274\02090\275\020\340\361\274C`l\274\301`\251\274\357\016\221\273T\027f\274E\336\013\274\014\006)\275\340ov=O\035\036\274\252\211v\274fJ0\274\'\225\237<I\376\273<VE\210\275Z\240F=\217\263\335<y\002\313\274\036\367\227:\357!\270\2743\245R\275\353\242\366\274J\220\311\273\232(4\273\324A\r<\325\344\232\273\357\031\240\273\223\225\037\274-\233\005<\343\251\367;\002 \373\274\210\323\212\274\213\314\332<\203\003\273\275^>\267\273\275\325\022\275\345g\035\275g\347\200\274\365\330\006=\312\375\001\275\276\272z<\'\217\027\272M\177\315;\314Y\227\275\326\273\213;\322wI\2758[\177:\277\375\035<#\0340:\326\333\224\274\241\265m\274@<5<^\342\234<\024I\000\275\322\252\223<\350\271\274\273\031\217(\272\312\217\237\274\372\330\024<\264+\344;0\257\224\274\367\2565<\346\345\024;\n1\206\274\'\177\206\274\001\223\263<\220|\003=s\002O\274\234\342\001\275\221\251J<\323\325>;\363\373P=\022\322T\274\020N\216:\307t\004\274\321\265!\274\255\347\340<\240\250\347<u\002\003\274\205\260\022<\202 \201\275\336(\317\274\340\006\212<&\007N\275a\243U\275\202V\353\2743i\000\275}zA<\301\264:<\340\003\017\275\375\264\036\275j\014\371\273\223\223\010\275\273&\221\275\302\223\317\274\305\227z<\356\347\033\275mz\236<\273\232\010\275\342\0008\274\346\255\314:W\215\233<)\226\314\270\373\262T\275\204(\030\275\354\020\371\272\211\n <\312E\356<O~\256\274F\274\311<EB\230\274\261\350\222<\253\260\006\275EM\343\273\326{e\274cwe\274\261~\255\273\222~\266<\204\325\236<\236o\031:$M\021<\200\005\n<\371y\265\274\365\222\301\274\3013\210\271\3031\273\273\326iz\275V\205\323<\270\207\243\272\271<&\274\225\325\'\275%\301\033=\372\320\004\274\313\027\013\275\316\211\301<:.l\2757\244 \275<j\200;4}\237<m\364\276\275l\302\214<\331\3348\274s(\031\274E\233\212\274\2176\033:\010`\255;,)=\274LY\241<\200\037\304:\332}\314\274P\370d<5@p<\261\215\333\274\237)z\275\026\251\035\274\232<\204;\031^\375<\323\037\317<\277\335\225<\037V\023<\327\035>\274\313\311@=\317\002\301<\350\205\303\274\2750\363<\330\260T\275\271%h;\2325\350\270\242[\n=w\231\351\274\225k\355\273XQw\275\320\n!\274\266\213\026\275\227oE\273_\n^\275R\333N=B\365\333:\340bZ\274m\341Y<i\224x<+o\032;4\332\223\274hD\306\274\031jS;\013\024\3049\031\007h<\253S\324\273\213\252O\273:\353/\275\255J\246\274l\263J\274\376p\001;d\002\027=|\215\000<\275\001\270;8\355W\273B\336\244;5\032\224;5\0310=\221Y\202\274/&\205<P\232\337\273W!;\275\271\242\003\275v\3111\273=\222\035\273\271\351\313;\263=r<\312\303:\275I\345\026<%s\320\274\212\243\236\274:0e\274Y\337\374\274\36013=\255\310L<?e\335<s\317\253\274w\\T\274\246\377-\273\027\n\203\275i\376\331\273Rt\252;\032\307\231<\t\3670\275\357\244s\275\332\254\013=\233\355*=\226\235q\275\330\232\021\275-\364><rG\263\274/E\372\274.->\275>Yb\274B\363\226\274\002\266\026\272\024h9\274\266\2424;^\351\205\273\177\346t\275\3135\214\275\206\246\304;\325-\322<\245\010\202\274\221\007\014=g\347\177\275\212\037\225;]#}\273m\333o\273,\343S=x\016\337\273\232\326\351\274\355N\200;`\332\275\273x\013#\274\270\350\224\274\372\251\337;\022\376\024\273@\340\350\2738\034\300;A\026\022\273?\241\217\274Wf\263<z7\201\274ZM`<\035J\326<\301\306\n\275T\3369\274\242\362\223\274\307\363 \274\033\210\233<\304?\r\2756\361\207\273v;z<\364\3750\274Ny\231\273h\262\036<o\346\264\272\231\027&=\271\250\031\274g\215\034=\321\"\301\273\250F\023\275\213\363\220\273\2738\007=bM\241\275V7\241<f9e;\230u\226<\227\0211=\007\240\023;\234\030\220<\254\035p\274\234j\340\275\264q\230\273\337\027\220\274\3033\364\274\362\325\031=\265\265\264\273!3\301\272]}\037\275>\310\300\274\262\010\t\274?\302\340\274\327\327\325\274\233_\220;\033\250\276\274\r\230\343\274g\303X=`h\235\274\316\215>=\256;#\271F5\177<\373m\211\274R|G\275^K29\2455\235<l\351\310\274\027\360\204\275c2\025\275 \255%\275[\223\321\273\026E\261<\254%\036<\177\346D;\332\030m\275\022z\222=T\n\030\274\324x\300\273E\237Q<H8\310\274Y\253s<\2065\217\275\276\236}<\005\335\234\275\264\2103\271\3777\274\274\377^\026=\215/-\275\367\306e\275&\317\203:f\270*\274\016\017\r\274oI-\274U~#\274\225\003\231\274w\233?=\020\272\224;3\226\230<t\311\031<!\020@\275\234\315o<\ty\305\274\241\322\310\274In7\274e\247\002\2739\025\371\274\021\023\t=\323\342\004\275R\274/<\355v\036\275=\264A:[\217\363<\304JL<\347\027\353<\210T\201\274\310}\326;\377F\304\274\\\302`<\n\343\314\272\007\017a\274H~;\274O\361\024\275b\277\005<{H\361\272qn\220<%\277\353\272\247\246.<\034s[\274+*\311\274\311\205g\274\230\177i;k\263G=\2164\367\274\324*h\274\250\316\315\274\344S\202\274\000\240\300\2724e\356< \375\201\275\303eg<.\374V<\231BP\274H\364\366\272`\371\250\273y\\\212\271c\241|\275\3702E9\030Pv\274t\332R;`l\223<\005]8\274\240\022,\275f\\\243\274\224\233\316\274\377\223/\274\366-\351\273\334\373\r;t\325Z;\023B\255\273\267\372\310\274\307\033+=\332\230\212\274\354\362^=#M\307;\266\324X\273H\246\005\274\240\025\221<\032\024a<\260\303\261;3n\'<\'\325\203<N\335A<Z\303\202\275\216\3236\273]\311:\274u=\371;=\302\010\273\3333\356;Ir\000\275M\2476;\034\016O<\324yX\275T\313\331<\367J\216:\200\271G=\226\370\031<\226\017\003\275\335\207\017\275<\0164\275~=_<d\362\022=\374\257\302\274\212\371\351;u\235\261<\232R!\274/p\213=Lg\300<\364>\002\274[jH<N\027\260:\330\356\232<\276\330\253;8\376/=\215\225\335\274\253\264-=j\207\t=\r\346\234\272\010\251\254\272\026 ]\274\377\275\360\274\361\307#;eg\263\273\004Zc\272\207\023\321\273D\313\035\275\021\304\341;t\031\355\274\300l&\275\3507]\275\261\0241=\264\260\350\274v\2008\274:\334\003\274V\030!\274\3344\254\274\005\243\223\274\3564C\275\370\206\370;!\235\000=\223\330\350\274y\364\322\274\031*\326\274\226B\036\274\032\236\024\273 \357\361\274>\356\365<\300\r\031\275\276\351\253\274;\250\006=\210\r\367\273\\P\336\274\322\237}\273\246\353\244\274S\371\t<\213\003\251\274\311\365\020\275\245\021\301;\315Z\305\274\266|;\274\031z\237<\211Z\226<o6\033\275i\256\034\275;\247/\272\021\301\";\230D\277\274\321B\002;\303\274L<\036E\266<t\324]\275\277\301!\273r\t\230\273\311\237\212\274\365\300\204\273\275\343\2219*2\346;\344\020\350\274[NQ\274@\205\342\272+\312[\274.\024\363\274\341/\235<\220\022T=wG\334\274_$\357;\034\311;\275\311\277\221\273\371\"\t\274\262o\017<\262\350b\275ao\207\274\214\216w\275i\367\202\274l\230\n\275\324\324\217;#\322\220\273\200\336\207;%\021G<y\210\345\274j\351\340\273\254J/=\245\031<<\226\013 =5q\001=\252<\213\274\024`\207=\374)\014\275Rq\007<\302\206\323\274\374Z3=\344$\017\273\331\010I<X\2048\275\345\3159\2723\216\217\273\342-\352\273\010\032\256\271\323\253\313\274k\301\035\275G\312+\275\250f\253<\022{\201<\277\027\003\273\374\3742=c\367K\275\276\360L\273\332T\r=\024\212\033\274+\030\254\273T0-=\211\3408\271\330\255*=&\331~\274\021\260\323<\030rG=\346%\265\274\270C\345\266(\221h\274Z\326<<yv\304<\354\276\357<\024\003\264\275D\030U\274\213Ye\274%aG;\321\033m<\365\367\251<\000\2322\275s\026p=eYA\275\2337%\274Cq\221<\224\367F=\211yk\274^\376\004\275\\G\020\2753\030\030<J\\\0269\2204\214\274\010\375u\274d0\352;\316E\001:}t!\275\215\324\365\2746Z\r;\272\340\361\274\253\352\032=\276\347\324<\211U\347\273\2771\266=\'3\032\274\227\327\270<k\021\307\275\335X\217<\276\247\004=]\263\256;\027\216\233\274\242\206\"\274\2335\r\275\013\305N<\026\257\005<\006$\306\274\031\200\352;\372\236C\274\312t\"\2738/\255\274\273M\330\273B\350T\274\254IO\275\2017\357\274\314\271?\273\216\355\274\2742{\312\273p\2034=\357\211-\275\337\037g\274s)^<\245\374\214<\350\351\351<\371C3<B\020\220;8\"\303\273\306K\025;\253\270\233;\276;\016=XK\322<t\247H\273\023`\255\274Eq\236<\340W\004\273\334r\t=@\257S=\035\264{\272\336\276N\275\036.\232\274\273\246A\274\010kF\274\216&\263<\3607\343\274\323\220X\275\344Vi=\353\366\330\274S\221\231<\302\270\377\2744\251\210\273\034ld\275\207\032&\274\323\204p\273\274\220\260\274\324X\016\275\334]\227;-\001d\275\214\202\331\274\261\326\007\275\307\355\236<\342G\022\274\353\235\006;W]E\274\300\351\004=6\355\311\273\2419\236\274\266<\002<\205(\030=\240\016L\274:\301\202=\327%3=\013\345\276\274Z\004d\274\2257\2029Y\0318<\223\030\034\275\330O}=\312\205\001\275\225\316\004\2752n\227\274\207\335g;\036\255y\275\277\344\203;N\261R;r\323R\274\036\324\336\273\t$\010\275\016\243N\275\010\r%\275\001T\024;\005\207[\273F\024\276\274#\005\267\274\t\003\214\275\215\010\032\275\214?\024\274$$N\273F\234\276\272B\253\027<\264\311\230:\261\301\226\275\357\314[\273\201\324\205\274$\375\216<\014\"\035\272\214\"Z\2747s\020\275\212M\364\273V\337a\274Q\264\276;\340\302\363\274\330\260\336\274\271\255\013=\235\337\303\274\201(%\274\256;\026\275o\267&\274@\204\200;\214\036\304\274\324\201s:I\221h<x\214\316;\346\206\004\274i>\002\274/\337\207\2732\243Z\275\035\262\362\274wO\177\273\347\246\3409\363^\321;\327\245\035\274\272(\021\275\'\306t<n\302\302\273\0055\242<tA\356\274\037\003i<\315vy\275H\000\343;\307\260i\274Vo{:\236\305\021=\340\3141\274\013\375n=\366\272{<b.\326\273\206\375\017=\267\343+;d\310\";e\031\014<\315J\360\274g\310\034=\270\322\361\274\263\036\301;\023V\351\272q\243\304\274\375bM\274\320&%\2737/\226\275\016\0246\274\234OU<\217\013V\275\233DJ\274.\322\306\274~\202#\275\270\035\020\275\300\346\026=Ib\226\274xQ\247\273d\333,=?>w<@\r\003;\336\204\'\275U\r\306<\240\310\277\274\007\243\267\272\006]\365\273I\022\032\273\017\300\242\274z\227e<W\323\177;\0377k\274\202\271\004\275\243n\363\273\223X\363\274\210{\376\274\344\343\017\275>q8\275\260\345\242<\222\277\247\274/9\376<VP\336<s\350I<{)\360\273\275\267 \2747\2068=i\3157\274=\353G\2740[b<\211W\277<\357\233\376\273[;d;\0331R=\000B\225\273\001\3413\275\247\321\026=\222:\237;fC\023=]\026\300;\373\236\327<\237y\264<\215|\360\274wi\260\274s\240M;\350\275\273\274Z\006\007<v\271\025\274U^\000:0\367\236\274\335\306 \275\321r\200\274\357\274\240\275^\263\326<`5\306;\376\371\311\273\256\350\373<\205\035\332\273qA\004=Y\333\230\275\247J\002\275n\217]<.\244\304<\344f\202\274\036\025\024\273\244\332\362:\033\232H=u13\271D#\251</\364\375\274\024\213\362<\251\366\207<\332\303\240<\246\272\257\275\354\372\322\274\263w\207\274\035\3360=\225I\320</\356\332;\253\006\016\275f\003P\2752\376\326<\014e\272<^\233\r<\322\271\356<`\200\022\275\335\335(\272\350\036\326\274%]\304\275f\326l\274-\327\236\274b\2323\274\0213\262<0\265\177;\261\366\007;n\220\303\273\r\255\203;u\354\365<\223\360\362\273\315I\203\274o\325\243;7\360E:r\261\274\274L\271\333\274s.\244<f\271\005<\360>\261\273\010MI\275\0234\210<b\245\0329&\014\200=\350\270(=\201\343\020=\363Y\244\274\251\377P<\004\221.<\262\374.\275\250r\003\275u}\003\273\324\000\236<\316E\324\274\024\352\321\2732,:<;\351\035=\002\026\2028F&$\275k\317\033;n\212\307<\364J\323:6\344\203\275\343\211q<\321#\220=\"B\345<\020d\341\273\344\276C=j-\032;\t\332p;\374\313\000\275){~\273@\r\315<\276\177\237=\222\226\341\272\035\335\224\274L\364==\237\020\000<\r\354\177\274\005I\034\2740I\273;\020K\205\273\212\314^\272k\001\t=\307\315\037;\031WV\275\271k\352:\266\334e=\350\307n\273\034\010\r\273[\215\022=\3219\026:I\032\201<&XR\274\203A|<k\262r\274\352\375\333:P\337\340<\000\355\201<\202\211J\275\3321\367:\276P`7\226\250n<:\263\366;#\234\336<1\007/<\244,^\274\271!\005\273\303Y\034\275\216\341i\273\342O\256\274\353/\302\273D+\001;|\277\002>\204\231\203<\352Tq<\017\275\022<K\346u\272\257C\271\274\022\203\026\275\315\177|=\230\031/\2758`\253\274\207\215\003\274\233\225\232\273\022\245\334<;e:\274\262\n\227\273\227\235\235\274\340i\337;\302V\266\273\321\365\251\274\230I6\274\311I\r;\365\243P\275\033\372\237<\003\221\313<\254\303m<xxq<^B`=\372\251/\275\206J\356\273\264-\200\274|Q\207<X[\356<U\216B=\311\225\027\275t\361\205;\177\007\301\274\2253\356\274\007P\037\274\371\300i\273\014W\343<\306\037\007\275\264Tg;4\306+\274\324%\230\274\313\234\233\274\367\320\357\273\t\031\313\272Ze?\275\265!\226=`\214\350<\230\013\034\275\317\317\301\273\375\304\251\272\377\332\314<`\245\265<\213\340*\274\345\237\260:nVF=}M\371\274f!\267\274\343\232\235\274\007I\032<\347\3103=lL\267\273\232\"\377<\327\267!\275\262\351\n\274\235\267\307\273\352H\212<\237\252`\274\245\302&=\360n?\273y~\267\274\367\033B\274\020\253\202\274\310s\002<\232\255\021<q\017\241:\336\356\305;\341P\035==2\020<w\362\"\274\370$\":\2522\246;\002\334\317\273\203\344W\273$\232\362\274\231\2722<\376\327\270\273M)\243\275\204\342\n=\313\373\354<3f\313<B\315\004\275G\212L<\017%\177\274\350\251=\275l\004\030=\237*0=\277\361\265<I6,\275hN\215\274\301\335\232\2742t\2549\274!\035\275\265\244\010\2713\203\321\273\350<U\273G%I\273\202\326\314\274\360\016\322<v\364<<\270\302\377\274IR\315\273iO\004=\337\211/8\341\277F=N\371\224\273\014Ug\275\347P)\2740\2109\273\351B\242\274\335u\323<\331A\220\274g\014\303<p\3308=(\247\240\274\231%\317\274\273:\034\274I\224\270<x\227v\274RY\356\274\325\211=\275D\310\236;\264\220:<\211\'E\273\236\206\222<i\004\256\274O\234+<EW\014=\247\356\023=\215\370\274;\013\375\375;J\n\025\275\034\006\014<\202W\204<\016}\222<\346RI;\227\r\004<]\374\025\272\314J\025<2\360=\274[n0\275\243\242}=\316\364\023\275\360\324\027\275\247\345\033<\014\240\361\274f\321\301\274%\327\241\274t&\n;<^\313\274\346\026R\275MR\242<\313o\206<\333\244\020<\377\252\364\274\337\3656\274:V\257\274\270%}<\023\023\216<\3235\377\272\231\251\366;t7\013\274\275{\007\273\342E\267<\235\261\347\274\303}\300\274\036V\\<\270\242\017=\305\r\201<\273jv;h+\337;w\272\230\274\330\014+<Q\324\237\273\277\271\351\273\241\266\343<\013\245m\274d\343\311\274\213\030\010;\302\261*<\222\253\305\274\005\247\002\275A\276\204<\271h\254\273\354E \273\300\035\252;\026I$\272\351\344\216;S\254\353;\317\241P=\244\254\214\274\364\036\302\273v\301\230\272rSM\274\300\355\231\274\205\277a:\304mh\274~\270\341:SB\330:\032\354\333\274\253\341:\274a\026\260=\251\"\357\274\322\224\237\274,\245\260<G\364^<\274\346F<\323n\335\274pm\374\274<\2055\274\246l\005=\010w4\273\277va\274\236O\246\274\324r\231<}w\240\273\030&\350\2728n\325;m\371{\274\301\223\032\274l\360\234\274b7-=@\217|\274\225\230,\274\377{\020\2756\323\325;\004I <\271_\305<\034E\n<x\036\344\273\226w\205::H\276;\207Za\274)\215d92\007\032=\0172\000<{\232\031\274\022_\226<Q\323\247\273\0012\240<\345\000\233\274Q\024%\274\272\277\203<N\373#<\273`\206<2\037\016=!Hb\274\354\353N\274J\322\234<\2256&<2\307\346<\021\265><\rT$\273o*\030=\022\331\210<\264\001\375\272<*8\275=\271L\274\001j!;\307\276\257<\303K\301<\312K\240\274\201\242\235<\177\003\202<\242\213m;\3218i<\277\367\023;k-\276\273\232\230!=\317R\001\274#\316}<\023Ff\275.\276\022<\177\361\213<QRV\275N\313\274:\237\230a\274\340\352@<o90=\355B\273\274j\014!<\246\026\220\274\035\321\005\275M\325\330<\372\"G=i\301\211;%8}<\010\322\207\274!M\034\274Xu\212;\212\373F;\017-\033<0%X;\014\2045<\255\326\376\273BE\024\274\374\200\t=\340\234\014;\342&\214<\360F\010<\366Z\036\274L#M<\017\245\346<lK\253<M\304\202\274\363\017\255\274c\350\3269F\372\033;`\"!=\312\227\354\274\314\r=<H\363\005=\262\276\302:\244~\367\273\033\010\236<\303#\022=\266\206\025;f\022\217\274\016\034\253\273\341\335\234\274tO\022=JQ\256\273\240q\2209\016j\030=\010\004\313\274`\221\035\273Vi\013<x c\274+l\274\273\200\337\340\274=\215v\275\320.o;\372\"\336\274\236Us<2um<\n\224p\274\303F\2449\275b\263=1\360\321\274\241G\244;\261\336\017=\027y\020\274\313\254U<s+\037;\321\242\207;\272\2541\274\n\201\252\273\331\350\2449\345\256\202\274(\367\003\274\032\007\000\272\243\327\t=\316I\330\274\360\200\255<S@%<\214\021t<*^G\274\021\242\211=\336\032&\273\203n\221\2730g\305<r\202\217\272\270\271B<\300&!\273\303\367\207;N\2508\274\355fc\275\265\225\035<.\254\326<8\\V<\363~!=\tXS\275y\377\222\272\221o\271\273\377c\006\274\006%_\274\255u%\275\270/Q<\016\274\362\273\336\037+\275\220z\270\273E\"\342<\312v\357<K\355\301\273~\226\251\273\2555\343;>\347x\271\204\023\017=\200\240\027<7Pl\274\226\026\210<P\037\214\275\010\373\222;\253y\246\273\371H\334;\372\352\022<\376*h<\276\005\253\274\376jB\275$\277d=\006M\024\273\025M\204<E\017}\273r\351]\274\306]\317\274Kj+<\330\333\214<\230\325\326\274p\t\377\274\032>4\274\244,\314;\265\340\036\274\236\241]:bX\320\274\241.\213\273\024\254J\274\006ch;[\205\245\273\014\023\322\273\372Z};C-+<\2034\233\273|\355\260<o\312\311\274\266}\212<vxK;\016.\244\274\311\2404\273\310\224\215;\256\224\355\274\031Zx\274<m&\275;\037\037=\366\273\324\272\260\026\224\274~\327:\273.\2024=\2338\256\273?y.;\016\362\242\274\341\034\005\275b\363\374;\307\\\314<0\304\027\275N\251\242;\034\000\231<L\017\267\274>\003\'\274\005\220T\275\320_|<\267X\305\274&`\320\273\017E\335<\207\037!\273\206\261\260\273\026\241\242\273\202\177K<9\376\224<\265j\213;\227\300\253;\010\207\207\274}\237\2469c\223\243\273\274\355G\275\034d\237;v9\213\274^\2221<\3437\220\274nOP=\255m\231\274Xe\206=\3639\273<\013\222\305\274\002?\335\272N\210\363\274.J\264<\347 \337\274\002\266&=\365I\002<?u\021\274\024\302v\274]<\353\274\230\363e<\313\214\245;\245q\210\273h)y<f\033\356\273\325\000\243<\261\323A\274\373T\'\274l\303\245;K\235\241\272R\017\203\273No\305\274r\222\014;\255\255\263<\271\363\026\273]\326`\274Fn\223\274N\357\006\275\263\256|\275my\242=\241\351\276\273\257H\177\274\035\271\236<\227-.\273\010\207\344;\"\365\300<\355\210N;\241\270\251=\033P\203\274\002\313.\272$f\252\2746\307\003\275\337\364k\272\372\352!\275\004&\233\274\204\314g\272\346\202\241<9\217\256<\200x\000<\262vS=\240\0361=0\263\214\272\'GR<\201\211,<\004\376\201<dXw<\256\351\177=\301\245V=\2673\177:\351\377!\2736\356$\2755\216\202<b\345w<j\360\254;\367\273\315\274\207\300\267;\243B\277;\272/\350\272*\243H=\300d\220<\343\032D=\341\376\272\274\314\236\346\274\237\2604\275\277\364\353\274Y\377\350\273\227\026\304\274\326\333\335\274\217\211\'\274X\222\026\273\335\023\235<\020c\224<E0F<<P\005;\247T\301\2745P\200=\253b\007\274\021\004\230\274\352\337\242\274\216W\250;\322\217\265\274\375b\020=G\005\253<\366\322\227\273C\017\237;\354\322s\273pd\022<\221\260\254<\223\306Y<\270\345e=\007u\372\272\265\031\231:]\300\253;r/\366;P\353\323\273\264\270\010=)\313\342\274(\323\242=\"`\362\273\177\026e\274\200N\264;\325u\230\274Y\226X\274+]\025<#1$=K\331\260<\255\302\023<\330\351y\274\362\2565\275\224\252\220;Qb\264<h\233w\274\330@\347\274;\337\312;\274-\322<%\272\343<B~\330\274\370\204\207<%\037{\275 \223\347\273v\227\030\275\274\\\216<K0\336\274\017\3375\275?3\266\273a\205\245;$\220k<%|\032<\277\336\315<d\332m\275\301\306\215=\237X\205\273\306\021\316<\330B[<\027*x;\036\274\257<\307\313\002\274\334]y\274\343\225y<\316\253\036\275ebu=\330\230\215\274\314\213\207<\352\320\301;A\302\340<3@\330\274\216\003\332=\rsR\275}\317\334<f+\233:p\001\265<{F\372<6\260H\275\263\245\212<w\tL\275\232C\014\275\315\335V<\261\334p<_W\017\274\213\002Y<K\3355\274\305\351|<\346\255Q\274\017+9\273P\206\372;\305q\234\274\370\277\201\274\177^E<\253\262\033<\272\265\274\274\035\371\236\274\271\022G=\245\2744\275R\014\310\274\014\355e<\370\rz<!\\\\<4\020\310\274\033tx\275Hu\350<\336\306\010=68\303<d\016\033<\326\255\254\2733\332\013=\033X\233<\211\022\033\275\021\323\271\2749\213\306\274!\n=\274NR\317\273v\261u<eIu:\212-\330\2736\027\271;\311\346\004;B\346\370;r\341Z;9\210\004\273\233^!\275\0173?\272\317T#;n\334\325<5P\014\275V\024\236\274va(\275\262C5=F=\256\272\277\351\000=\020\245\223\274D\027\225\274\357\016`=\253\333\3048`\321\317\273dW\260;\007z\221\273\002H\244\273u\365\221\274\304\245\366\273H?\007\273\373\204\t\275,u\245\274.\330\225\273i\324\242\274g5\243\274\r;N\274\376\374\303\274^(\335\271Bj\350;\014\322\204;:\004\252\2724\366\351\274\354\222\201<\030e1<\234ES\275\030\322`\274\247\300\250\273_\306B=\321\225\224<b\0147\274\3174J\273\2017\350\274\\\247\234\274\340[a\274\007/\255\273\255\010\235<\305\254\333\274p,\270;\344\224?\274Z\244L\274t\034\023:8\026\370\274\303\027\326\274\235u\346;Z9\t<\334\220v<>U\024<~!V\274\247\200\036\275\020\r\000\275B\021\235<M\0246\273b\034\321;\344`\320\274\334A\360\272W\237\202:\346:\025\274\336\233\024\275\033E\006<o\215\014\274;e\t;\221P\357<J\361~\273\033n#\274\275`%\275f\252\322\274<\272Q\273^jW\272\356\316g\274\253\376\345\274\310a\361;c\005\367\274s\366\233\274\371\347\001\274\215\r\231<\036\214><\177\334\035\275\030EX\273\357\024R<\013\272\353<\316\301\231\274f\354c;\357tT\274\243[y<\244\240{\271\234\367p=\n\230\346;\246y><\317\222%\275\344\216:=}r\264<<z\233\273\023\r\307<\177\0207=\277\t\302;\306\370\231<\351u\325<\267\355\364<!\357\327\273\320}\020<\244O-=d\246\272\272\202!R<\334.\261\274H\201\275\2749\227\304\273\325k\217<\310\375Q;\240_H\273KK\306\274-\220\021\275\367\326\217;\240\323\365\274\000\363\225<\t\265\326\273\343\225\256<\230q0<\307\341\003=IU\036<dGI=\256\307\n\275\314\260\265<+3\010\273A\277\026<_\343q<m\323\"<Y\020\363<\340\231j<%c\037<oXv;\233\247`\274\272\277\275\273`v\275\274\360\260\255\273\303-\367\274E\351\247\273j\331\010\275\276\240>=N4\010={K\233\273Q\027\273\274+OU=\363:\272<Sy\231<\326A\\:\341\266\261;\r(+\274\027\032\263<`\264\026\274\344>\312;\230\351[\274P\230\326\274\256d\177\274\t\231H\275<b\262<\232\210\232\274z\252\306\274\223\\A\274=\261c\273Xc`\274_\0342<`-\221<\311i}\275,\240(=\026\021\014:9\241\303\273\212/\227;\r\016\036=FG\205\274\272\341\206\274\030\250\275<u-\277\2748\276K\273\346z\250\274\244\362#\2756]X\274\234`\252<\033\277\304<\321\313\205;\224\247\023\274\334\347\r<\376%\217\272\323U\300<M\005\353<\203\312\260:\025<%=\237\224\235;\255\301\004\275\371\000\302<\366\240\004\275#1\351<S\370\302<[[\032\275)!\004\275\223\\V\272\nR|\273\252\212J\27423\241:\331wr<\345\005L\274-\205L\273\323b\340<p\233\010\274\014\203\243<\000N\225\274l\3017</\277\337\2733\361\036=\314\235\"=^\363a<I\253\t\274o\271\366\273)\205\241;x\245\030\275F\244d<\025\231\272<\322\251\302\273\033R\t\2752\300\352:\346I\033<\311\203z\2754y\006\275\272\037Y;\345;\2039\374>\222\275\267S2=\003\021\266;\033\237\005\273\2460];$$\215\2746\326\n\274\372\"\370<\311\006K\2721%\234<.\262\013\274\332\004\030=\022=\201\274a7\227\274$\374(\275/&\030;\375\212\237<&\247\361:\345\264R\274\242\271\"\274\316\301\233\274\n\270\260=\241xo<\217\270\306\2746p\021<\324\363\352<3\007\255<\335>U\273\031A\212<\347\354x\274!\032\220\271\031-\004;\323\304\331<\252<\242<\217\016\305;\001[8<\274\356\264<62\320<bU ;\244\350G<g`\206<\n\364a\275g\324\017=\036\205\237<<\346&=<F\262;\246W\377\273rf\306;\233\255\014=\345\203\342\274\375\004\370:4r\230\274\254\310\205:\262\223\217\275\033\021$<\207\353 <\323?c\274#j\353<\225?\256\273\213\3744=\374\275z=\355t\024\274\272\323J=a\005\021\275k\r\005<\213\016\336;i\257\027\275\323\352\341;\212\257\002=\321\355\376\273\234.\010<\333\027H<\025*\351;\222\353;\275\375\027\325\272\026\351\006\272\313\357\250<\376w@\274UE\257;\237\210\235\273.e\001<7\232\023\274A\3755\272\345\225=\274b{\236\274[\035\205\274$W6\273\177~\375:_\004u\274\2020\366\273\273[6=\366\261~\2742\301Q<\207\217\252<\243a\207\273L\366\362<4~!\274ju\360<\212\217\307\270\272\263:<\032\360v=\254jh\273%\272w\273\"\177\310\274&\256D=/\2337\274\031\251\213<t\310^;\256\005\333:\276\371\307<\347\246\007=\353\014V=z\"I\273\220\302\217\274\3071,=\276UQ<\331\355+\274\201\020\030=U\200\006=\211j\214<wHj\275\027<\373;-\371\262\274UWH<\376/\245\2732I\034\274\300\225y<\035\247!\275\212#\335\274M\240X\273,\270%<\261\224\373<\027\206\367;u\r\272\274\2774\033=\211m\264<\023\277\357\274\024*\255\274\323_(=\235\2067<N\216\036\274\033\231D\275\222V\013\274 T`\274\356\212\265\274\225%\353\274\253Z\036=\345\241\204\273\016\305\253=U\261\005=W\370\365\274O\341r<\252\374,\274w\343\366\274y\205\026\275\245\247\337<\346\360\335\273]-@\274D\024\212<\326\001\254\274(K\220\274\346\177\232<\177_\371\272\017\232\267\274\357t?=\251N\255<\230c\n=\370n\264\275\\L\363<\031\214t<\317\214\263\274\024#\301\272\231\270\371\274\335\370\255\274F\234\262\273l\326\'\274\203\177\272\273g\371\355\271\270\377\027\273\365Y\310\274\"\364T\274\231q\003\274\021\302q;^\315U\274\330\212\237\274*\3345<k\221\027\275\326\304\256\272\301|\207\273\326=\270\274^T\262\274\303\255\016=\300\355\005\275:T\372\273\331\355\260<\2126\306\274oa8\275?\243-;\261i+<\272\362\264<\300\367\202<Xo\006=\375}\004\2746\222\027\274\341\306\256;>\350(\274\333X\000\275w\243\377<H\3634=nR\212\2745Y ;\347\344\221<~\033H<\330I\245<z\304\207\274\307[\323;\002\230\305:Q+a\275\227S\0207\341\315B<\231\255\246\274\211\362\305;\274.\272\273\320qJ\274\342?\014;\202\367\210\273\271\215\004\273\311[\005\274\214W\01482\245\253;\022cj=6\036+\274v\264\311\273\335\202\203\275\203\r\347\273\240\324M<\347\276\331\273\223\3554\275\314\316\265\274\243z\000\275+B\302<P\020\2269\3155(=\230Z\314\274n\020\323\272\024\037\312\274\247\275\001\275V\344\213\274\302\323[=\300\271\221\273vf\252<PN/<\022\0170\274\020\022-\271\316\200E=\344\232\206<\250\307\353\273\274\232L\275\026\244o\275\343\232\352<\005\353>=j\347\233\274\236N\273<\317\330\364;\314\317>=\035\215\277\274(u\365\273~\211\264<\254\370\235;Ca\002<oN\t=\271\006\343<\366\261\231\274j\035|;\366\333i<\356rM\274\316?C\274:\204W9\313\312\236\274G\317\202\275\220\033\331<\002\233\'\273\202\000\330<QX!\275\035\017\352;A\271\271\273W\033\034\272b\243P<\233<\321\274d\271\323\274\034{\275\273\376\310x<\374R:\275\263\203\260<\030\321\252\2747\023\207\275\032j\323<?-=;P\323\247<\033\t\275:\252)\210<\340\255=\275\331D\271\274KU$<\337\016p<\317\361\326\274e1\t\274\367\271\r<\266\376F\273\317Z\325<\213C\240<7\357 \274P\373$=\217s\226<\037\360$=~\211\274\273r)\335\274\272XW<&\233*\275\205\"!\274=\010\n\275\333\032\r=\306\006\216\275\340\203\r\275G\375V\275\024\3445\274\274H_\275]\327r\274l\310W\274l\263%=\027\030\263<)\301\316\274\213\361\177<`\030\260\273C\200\211<U\335\217\274}\232^;G\341\034<\242\035\307\274\203_?<\2056\211\274p}\t\274\360\003U\274\274\203\034<\261}5\274\335\326\222\274\027w\301=-\025\001\274\377Z4=i\337\2229\340\224\317\272\335}\035\274\344R\177<\374\322Q\274\253\326\303<\226k\344\273\334\345\335;\320.[;\025p.<\251F\314\273\346\311\002\274s\216\274\274\262\270\277\274\253S\260<\365q\323<\347VY\274.\255\034<1C\316\274\241\3105=v\300\267<\036Sg<Y\322\264<B\203F\273\243p6=S\340\302\274p\372\205:\216\213\301\273eb\263<3Q\201\275\211\260\367\274\262\324\033=X\214\370<\347p\r\275,\304\334\274\301M\n\274@\316\031\275\230r;\274\035)\017\275i\365\236\273\206\003\216\274\033])\274\314\016\224;0J\206\274\260N\260\274N]\215\275;\2112\275)\327\212<Y\2071=u\344\225<n\316W<7\324D\275z.4=J;\234;\261\007 8\004\327\214=G\275A\274\365\276\370:4\251\222;>0\250\273\2013\222\274\337\307\355;\350n\240<\373>\201\274\206\\\037\274\204\037R<\270\002\253\274\251z\316;6\253&=\346AF\274\001\303\304;?@\230:\360\032\350\274\200E\200<\231\260\276\275H\342\250\273\337\322\312;\302\315\225\274\305\221Q;\331\306\246\274\364\374\221\274\246\320\220\274\232|S\275.\307\3637\032/\001= \036\222\272\211fZ\274\020\\I;eT\314\274\354\031@<\343\240\021=\3609^\275\205\204\336<\306\035\230\274\277,\t<L\377\\\271+\236\032<O\025g<\240\210\364\274{d\302\275~`2\275@U\356\274\264\321B<n\002\210\274\242\335\325\271I\312W<\260\230\326\273\263/F\274Y\221\001\275\275A\323\274$\022\205\274\355\210\211\274\375W\330\274\227\\9\273<\022\027=\307\336\004=\035\353\206<\246\331\310\273\362\317\302;\r\355\024\274j\330%\275\333\020\316\273, o\274\177\210\275\274\376\0364\275/X\016\273\335Z\203\272\361\326\354;\305\335|<\341\306}<{\312\357<Xo\242\2742\234\316;\316\217q\274^g\255\273\317\244\003=,\"\314\274\306\217\024\273&\201\314;\026\272\227<\332\022\226\274(L]<~\204\233\274M\036\207\274\230\n\216\274v@\024\275\255\242\034<\036\322a\273\215\332\224\273\231x};\316\232C\275\266\030K\274\277\231g=K\0145\274,2\327<\347$\374;\206\234\032\275\263\027\031=\0052\216\273\2771+<V\226<<2\333k\275!@\035\275\363L\207<\352\312\006<X\017\313;>\361\215\2748#s<o\206\262<;\317\357\274 \023\354<\255|\022\274?\030m<\347p\316\274\374\213\324\271e\271r<\245\256\034;2\202\206\274f\010\213\274\240\177b=\247\327\267\274kJS\274\246x\367\272f$\271<\2032\010\274U<e\275W\030\027\2743\205\007<zo\321\273\034\010\206\274[\236\217;\337\033\264;@\376\212\274\2070\363<\325\223\351<\211\0308\275\322sJ=\317\350,<\214\331\253\274\231\313\177<\036\362%\273o\013R<\344C\022\275\016\271\313;\016\'\014\2749\3026<\203\344B<\343\032U\273\375\222D\274R\225\265:\247\212\032\274\3138\226\274@\014:\274\271\016i\274Mg\257<\024\355\265;\321\222\r\274\r\006\246=o\273\350\273\372\026/=\207\262\t<\311\021\010\275\251\346\260\272K\013\265<\017\272\300<J\244\253\271\215?\345\273\360s\350<\203\376\335:\346\2003;\035\255\031<9\2237\275\200\3670<\246\301+\274\325P\350:\376\264T\274\370\203\t=\346\007B<x\353Y\275\221\020\202\273\013S+<\007=>=\006TY<\257\033\004<=W\265\273\315%{\273\"\226%<\035\236\312<\307\034\214:M~\017<\373,\273<\326zh\273Gr\253<\234\030d\274\246\026M<\254W\t\274\020L\237<\330+\367<\2238\022;\3156\313\274\034\312N;\3057\224<<\373\215<\254\323\340\274\211\214\313\273Xz\205\274U(\334\274+\321\325\273vZ\005\273\336(v\274\212^\333\272amQ\275s\255\261\273\214\340#\274.\237\351\274\210\216E\275\271\325\340<\211TE\273F\017\355\274\3603\205\273\321\025(={w\205\274>\353\251\273\030M\203;\274(p\274\034(B<\370\206\307;0bO\274\306\317P\274\2461O<\314\305\271;\200%\340\274\234\016\027:\037\265\n\275\257\3167\273A\020M=\374j\244\273\373\215\323\274S\335\306<\262\303t\274\343m\264<\202\010\303\273\202\024\331\274\201j\300\273\367\246\354\274):_<H\203L=\317q\006=\334Q3\274\034\377\370\274\252Q\253\273\371g\263\274C\234\271\273@f\221\272\227\221\026=i@\n<,\t%\275r\223\246\273\205\344\231\274i5\332\274-\213\021<\000#\333\273n_\312\272\232\005\320\274\364\204\252<\002\345\312\272> _\274\r\nm\274\351\336\335<\364T\312<\235\252\335\273\257\302_;\270\365\016\275\265F\353\272\376\300\3239\024\277=<1<\225\274\370\030\377\273&$\017\275du\306\273*\257r\274\330x\220\274\365\242\207\272;=\223<\360\335\221<\201\240\005\273\264\203\335\272\247*D\274*f\216\273\303_;=\255\024\025;a$L<\213\251\232=\025J\215\274\374$\017;\310\274\230;\255\307\210<{\233\252=\3569\305\274\375\010E<\331p\322\273E\317\235;z\365\235\274I\236\027=\263\221j\273g\214\242\274J\301\345\274\340\253\200\272T|\313\273%\263)\274\214cO=\006*r\275\006\260\270\273\272\352;<VVo;\001K,\2749\217M\274\017\364\352<\332\221/<\370\2344\274I\023I\274q\314\340< ;\321\272P\324(\274D\340\202<0b\007\274\364%\217\266\372\312\023\275\215\303\363\274\323\372u\274\334\266\243\274X\300\000\274T\261\210;v\216\003<\227F\324\274\350\231\010=\005\250\226\275\204\345\024<v\3441<yc\022=9\334\203:;B2\274\0325\005\275\323\255D\274\236\366\215\274\001>\337;\203\\\255\274_\025\337;\017e\267<\034R)\275q\005\207\274\241[\004<I\t\002\275Qo\332<\004\336\267\274\322\220\017<[\235\217=<)\305\273\020\\;\273v\022\032\275\217I\340\274\000\210\202\273\365\307X<\332\207\216\274>\350|\273\222DG\275\204\247\335<cs\t\273\322\200\007=\000\256\246<\373\037\013\274]7\2659\251L\206\273K$\306\274.VJ<.D\014<\0302\271\274^\317\023\275\331.\337\274\350\224d\274\276A\364<\346W\227\274\001\023\013<\206iA<I\0258=J\002\340<g\337\372;\252\010m<T\n\224\274\036\034U\272\353Y\215\273\225\316*<\275\253\264<\357\201a\274.#\241\274\235\264\352<x\256!<<\303\033=\214\213@=\350\240\264\273}\372M\275\343\230\234\273\336^\220\273N\030\013=D\205\221=\251\232\022\275\271\276\004<\206\'\205=\0043N\274z\376+=\247\214\311<\014\004\';\275D\252\272\212\205z<:0\022;CF\202=\002~\344\274\231\276\250<\240\213\275\274\347\274\333\274\256\0132\275\177.p<q\265b<\260Z\3539\264\\y\273=a\266<\315(\253\274\242\\\021\274\335\273\223;>v\215<\035\256\003<S\244c<O\312<9\225\313\345\274\2770u\274\306\010\364\273\373\036l\274\271/z\274\371k\002=\374\302\332\274\277\035\213\274\344\3606\274\0013[<\301G/\275\337]\027\2735,\\;\227a\303\274\355r\245;9\332\001\275\340\024\314\274\340\234\032\275\332\207k\274\364\274\302\273\323V\347\274m\364a\273tV\214\275\006R>\275\361r\002\2743I\211;\353/\016\274E\001};\232P\343\273A\313\234\275\353\250\026<\001\004\001\275\357\'\212<\334\016C<j\267\314\274Wi\321\274d\316\353\273\202\312\233\274\241#\254\274p\032\003\275\222y%\274\244\033\006==D\223\274\356I\241\274\340\203B\275C>\220\274)\315\253\274d\316\230;\023\263p;\257l6<8\223\331\273:\226\033\274<9\271\274\236\322\373\273\322\001\252<\321\262c\275\361\254\333\274\230\240\317\274\360\331\250\274\302\202\212\274\350f\345\274S\212\310:(U\221=tu\322\272\002\211\340\274\326\270T<\363cD\275\014%\241<Y\215s\274.b8\275\214\002i\273FS\000\275\022\254\246<\364w\317;Ca\302\273\225[\215<\202\024\317<%\\\210\274_\307C<\372\232\030<~:\033= \024:\275%\201{<\203\341p<8\026\205\271\\!G\274q\025\275\274zD\225\275e.\253\273Y\331\347; \266_<\rT\275;\373\217\357\274\037\270\307\274g\013,\275?\364\213<\342Hx\273\267\351\250:\354\274\035=\310\273\234<K\312\331:\212Y\034<\340\224\250<\177_\376\274\265H\000;\256\322=\274\300\002w\274\022\002\037\275]\366X\274\033\344\032<\310Q7;\027 \276\274\346\262\213<\304\372\036\273u\013\247\274\211\262\321\274\252`\220\275\337CC<\226\252i</e\334\274\374\225\303<}\314\020;\023z2\275\361\026\n\274p \004=>\320Z\2750\346~:c\027o<\252\035/=\373J\027\273\272*T<\240r&=\021}c\273\235<\013\275\376%O=\031W\242\274\360jZ<\321\212\275\273\234\213(\275\352\330g\274\023\247\022\275*}\273\272\207\214\337\273\335\323\212\274Q\302\274;e\341/=\032^\302\274\232V\005\275\370\317\313\2737\205\301\272:\222\305\275\342\301\271\273C\346\301\274u\301\253\274L\347\272\274\0170\342;\276\230\321<\322p\230;u\232\316\274qY\037=\r\370\316\273\261\333y\274\253ZW<\276\264\375<<U2=L\316\272\274\303\202\265<\017\343\251\274A\024\246<\353\236\022<%\t\251<\004\234\301<Ygw\274\377,P\274\341A\231<\t\026g=\331\031\321\273\243\244Q<!,\300\274\305\"\014\275\270J\234;\013\320\222<\"\241\231;\321\211\\\273,\330\031\275\344\221\322\274\212\215\252\274A(;\274x\344w\274\021\325k;\326\"\010\274\324\325F\274\305x\371<\t\2379\274\304<\323;\267\232\225<\010\226i\273\n\246\367;\277\350y\274w\257\350<\202t$<\333\000\006=y\340\200\274\022(\022\272\233dA<\362\023&;\365@\261;\214:\366\272k\327\221\273\315\"X<F\215\341:\304\207r<x\231\313\270\002L-=<\230_<\213\3669\275\220\256\377\273\017\027.<\330\330\377\274\306\020\340\273J\306\020=5\021\327=\304\020\335<g\372\331<\025\207\010;\244\327\302<\320\231\217\273\033\003G\275\322S\307;\220\306\215=h\227\347;[\3667\274\246\033j<\037\324+\275\n\263\006<7f\345\274\004$\343<\341\020\3759\376\350\305<r&o\274\223\007^\274\367\341Q=\002\262.=@&\301\274xc\023==\036\330;d\227\356\272]K7\274\372\020\327;\350y\266\273\002\370\030\275\226\306);\246\240\356<z8L\274\3313\037\275\242dm<\n\214\247<\247\226\320\274\375<u<ca\320:_h\'\275\014\366m\274\226\200Y=\210H\340\273#[0\275\232h\253<\003\327\220\274\004W\220\2747h\362\274\304\364S<\212,\267\272\324I\216;\272\351\235;`(E\274\260\322\216:\3749\002\274\323\327\321\273|\310\001<\033\223\030=}\315U=\374\022\370;\311\201j\271b\257k;x\346h\274#\035\"<\221\016\000\275\247\316\024\275\253\255\333\274\354\375B<E\301\'<\261c2=\005\031\257<\010@\025\274\217+\217<\232\232,;d\221\263\2736\254\022=,K\222\273M\002+<\213D\256\274\200T\360<+a\336\272\375\213-=\362{\007=\351\244G=\327=\263\275\336\"d;^M\323;l\031Y\274V%\302<rZ\201<\351N\217\275\243\245\364<2\340\030\274\357\311%<=X\337\273\244\246\332\274\257\337)\275\223M\371\274\034F\234<\300\275\032<\322\261\354\274\002>A\274s\246=\273\031#\204\274\013H\360\274b9\225\274$\254\374:h|Q\275\317!};\230\211n\2744\022m\274|\344<<\003\025J;\375u\327:\356\247E=\327o\254\274\021\tR\274\372\t\376\274\2151\240\273@\362\205\273\316\206\204\274f\036\342<\353\206\254\275x=\364\273\356\271\215\273\000\352<<\364l.;\035\340\200<\273\262-=\263t\321\274\3705\341</\245*<\233nG<~t&=i\245\231\273\036\344\347:\255\326\036<\202\263\307;\\\365\251\272*!\n<@\266Y\275A\253\023<\216S\316\274jT\t\275\032\217\025;\257o\320<Gh\212\275\377i\230=\306\355\000=\340<\033=)\376l\275\225\206\000\273B\245\342\274\'>\277\274;\317\"<k\275\204=x\240b<\322\n\307\274\323\030\332\273F\260\372<\210d\340<\346?\243<\004\003\204\273\030\357w\274\344\236\272<]\225\211\272e\343\205\272\3401\267<>\355g\274\271\001\243\274\001\tN\273\253H\242;L\307\200:E\325N\273\254\213\007\275\370\017.\275\322\343\017<,L\267\273\377I\252\274r\021\200\273yZ\262<\244\227\205<\336O\240<\224m\377\273CC\363\2748(\232\274\n\035==i\022W\271\362<\307\274\314PT<$\324\236<\304\375T\273\342u0<t\220.<\260\341f;(\277\373<\201\243\235<\301\014\206=sd3<,o\221<\264/\033\275\372o\302<\374%p\273mj\367:\364\237\t=\263\317\230;B\346\230<\004a\326\274\302\000\220\274\366\352\320\274&\226\214<\232v\250\274p\rd\275\346\'0=ub8\274j\\\260<\227n\303\2740\365\227<\315qw\274\205\332>\275wy\244<yNA=\246y\237;\207\320\324\274|\354W\274\365C\024;4U\251<ds\345<\335\374C\274\251\243E<\313\336\354\273FE+7}\'\n<\357\320\350\274D\355\273\274|\267!\2731\216\320<\n\314\302\274\266\267\323;\211\004\235\274 \354\262\274\331\311\032\274A\360X\274\016\302\230\274^n\017\273\263(\263\272,]\207\274\021p\013\274\244\007T\274Ll&\275\033\0272\275\201:C<\200\374q\273\223\201\330\274\346\026\036<]\376S\273\316\007c\274\250\t\222\274L$8=\322U|;#\3472<\234\241\337\274V\201\235\273\325\252\332\2724\310\312\272\331\355g\274\352\277+:\210(\254\274r\005\270\274\262,.\274_*\363<N\2416\275Wql;\025/\377;\025\373\033<\375\211@\273\030!{:\177\226\032\274\252\260\027\272_\363g<\231\353\230\274\021\262t\274\036r\217\274B\206\035\273\227\330\031=\342=\303<\257\205\211;\270\226-\275E,\210<>QF\274\3570\362<\322\365\014\275\237\005\026\274\277\246\355\274o\345\217:\226\033\271<\323\004\226<\307\021\267\272\307G\342\273#K\256<9\023\272<\251\330\035<\243}$=\2248\313<!F*;\207\253\353\272w\372\t\273\204\224\016\274uc\336;P^\311\273\304,\256<k\023\030<\027|\2519\023[\226=\246\310\035\270\277`\030\2729\226\370\2739\377\'<9\226\243<\007\326\007:\"\335\331\272\002\001\254<\342\315\215=\374\2402<\250d\004\274^\223\000\275/a\332\274D\363<\274\243x1\272\346\261\r=D\330\016\274\023\235g<\225\245\262:\264\023h\273\273b6=\350T\224:\3624[\274\t\310+<4\0074\274\332\262\371\273\006_\300<\356\200\210=M\240\350\273\310\232\214;!X\247;{\253W<wg\033< 2\262=\314p/\274/\2660<\341\305\216\274&\211\232\274j\303\233<G\220<\274E-\007\274\330\340m<\233\236\362\274\326\033\346\274Lb\221\273+\345\215\274\253N\021=i5\360<7\001\202:\255P\'=\351\373\226;\256)\r=-\223g\273\014\304\273\274{\233\020\273\345w\225\273\223*S\273\301\377Z\273y\331?<|L\007\271\357\251\251\274\330g\227<\014q\265\274\304\360\004=\242{]=\037\304\234;\262(\331<\326ev;\\\322\017\274\341\370\317<\2272\n=e\250L\2744\373\332\274\206\233c;OJ\252:!HH=\247#\243<\307iA\272\177#:=7\251\001\274\020\334\262\273n\323\234<\271\216\301<\313\247*\273\300\204@\275\215\006I;\364\277H\272\227\302\r\274\204O\272;\037$\245=\013i\267\274p\260\373;\313\036\216=0\301\221\274(\346\350\274\240\360@\275\004f\351<e,\037<n\242\200\270\032\317p<\362cB\274\312t\257;\336M\364\273\223\260e\275F\017\034\275\027\217\235<o(\031<\335\243>\274\306Z\204;\337\202\017\274s_\255<A)~;c!\231<\352r\264\274\024\201U<1\230P<\307\027\306<_R\351:uP\354\273\263n8\274l\377\303\273\276\0063\275\353\274\305<\203\\y<\354\236h<\370\222\344\273?84\275\227\377\270<\325\245\242<\375\200\n<\241\362\240\273\373\202q\275\202\302c<\031\"t\273\030>\314\274)\304\333\274u\204\"=\276\031\215<X\324\001\273\nu\272<;\027\203;r\272.<\224\023\332\274\314\330\275<\2748\347\273\274/\255\274\0048=\275mK\325;\355q\244\272\326\267J;\345c\300:\240\273r\274\246V\235\274\005D\203;\036,\212=\034H\002\272c\240\337\274.\035Z<qg4:\036\362\236\274dp\002<\327#B\274\214\346\345\274zS0\275\200)\215\274\357\213\374;\364#w:\312g\014\275\002}\252<\364*u<\244\020\274\274\234\245\005\274\315\367\226\274\202Dw\274+r\205<QI\335<)L\302:\211\007s=\303\221\263\274\245S\235\274I\034z\274v\274\337\272\342\311A\273s\032\203\273;ly;\334)\252\274\2640\336\274\342\251\244<\206\237\372;dSy:\324\264\237\274\200H\251=\343g\323;\n\361\353;)B\263\274\364I\214\275\243\':\271\022\022\336<Gm\276\274{\3779<\325~\203\274\327H\324\273\327\260C:\210e\222\274\374\257(=z\261\235\274Dex<\'\240g=\2744W\274\271c\233;\256d\177\274O\023]<\337\205d<\365\351\377<\341\204\375\273\010=}\272\275\021\035\274\322\323\374;g\362\001={\'1=Y/3\274\336p~;Q\324\254\274\232\310\213=\016\267w<\335I^=\275\224\210<\014\261\334\274]uJ\274\254\220\350\274\313wv<\n?z<\272.\250<\323\262J\274n\324\252\274\244\215\323\273\226\036X\2757\016\027;fF\021\274-o\324:\021\304\316<\247\005\305<\t\235\333<\036y\t\274\267\243\t\272\246\312\026=q$\364\273\247J\275:r\264U\273\313]\031\273bQA\275\373^$<\031\030n<d\340\374\274\3365e\274\243\335Q\275\317w\252=&\250\220:vq~\274\250h?=&\316\023\274\226s\334<\304\020,<\200\3155\274\021\255f=\272V\306\274\354\377\234;\304\360\260\275P\211%;\022\367\220\274\214&>\275\307\026\314\274\027\325H<\205W\023<$0c<5\233\217<\354\033\217;\315\216d\275\177\246\331;g)\212<\277\275\210<\260\261\265\274\005\344p\274\307\347[=c\225\215=\274\242\264;\000c\274\274z\204\230\274?k\334<M\356\207\275;_,=IC\003\275\244R#:#\022\332<\370\003*\273\232\207\232<\323\266/<\004\322\023=+\350\n\275\300\227>\275s\204\001\275}m\254\274\207w\251<\261y\026\275\031\326\177\274o\237\001\275\342[/;\375p\324;\217cJ\274v)\226\274\225\214\227\273\025<\247\274\360\373\006=\351\033\201\274:g\230<b\312\032\274\361gJ<\251\240h\274\023\350N<3\301\357\273%MJ;[\3010\2728\313\027\2722g\307<\221\357\025<\032\366\276<\337\206#=\2703\260\271o\261z\274z\317\233\272\226\033\000<u\201$<\214p[=\314S\036\274.\3548=\350b\255\274H\014H\274\030H\002<\316\334\271\273\220\216\020\274\246\370\374<\374V\211=\364\243\004=}\345(=\370\005\t\275p&\230\275P\303\223\274;P7=e\342\t\274p\003l\275\220\227G<\017%\206=Z\366\214<\343\331\260;h\037-<\320\213z\274\326\034!;\025\324\245\274\343O\223\274\344\2704\273E\026\016\274\n[\024<p\233\200\274\365\332\322;\277OU=\340I\177\2744@3\275Lg7=\216wY:u\356S<\366r\023=\302\310|\274k\016\264\274B\255\024\274\";\376\272\376R\037:\027w\200:\300\013e;~\2303\274\214\325\207\274\017\344Z\273\322\005\266\273a\300\r\274\024/i=vs\021=\266L\215\274n/K\274!=E\274\355\273[<U\027C\275_\323\243<\005\rK\275\014\377\237\275\323z\200\274G\236_\273\347\214\230=\307\026\226\273\224\036p<\261%]=\333]\033\275\362\013.\274cSJ<\332wZ\273\001\'v\274D\266\230<\353\322\273<\3425\262\274\343D\327\274 \007}<\031\243y\275\246\356\267:-[\213<~o\361\273`_\317\274\345\001\353\274\027w\033\275s\344\217<s\237\203;\202\242\003=\275\323\302\274\267|\340:k\215!=\370\177\371\274\312\342q\274\333r\037<\022\364\360\274\274\221\204\2748\226e<!w\233\273\256\221\276\274j\211(\275\004\277\022\273_\026\317;\024v\030<\333\356\\\274\305r\001<\r\010\234\272\373.=\273j\326\231\270\214\272X<R/\262\274\353H\350\274\344Y\314\274\206!\n<\356\300\344;\'Vd=\217\026\014\275\204\275R\274\212\362T=\225\013\314\272\237\262S\274\n\341\256;\277\343\034\274\375\246\255<\004\013A\275r\214\213<2L\212<\357\260\220\274\343\306a\274\025\311\257\273A9\367\273\255\312w\2746\006\320\273(\214\201\273\214\344\252\273\017r\301<\323\014~\274\226\374i\272\016\316\332\274DI(=\342a\217<\010\242\334\274\301W\357\274S\324\025\275dm\000=\264%3<.\230\3179w\332\306\273S\024\312\274-\203\016\275B\026\324\273\256\315X\275\0253\004==\321C\275\350\310.\273<\2175\274ge\214<\205\313\233;\222K\336\274\242\005\007\274\035\300f\272\210\203\251<\356~\234<\241D`\274\373\013,\275\314\343r<\316U\247\274*|M;\261\313\215\274\036e\267<t\202\005;\214\336h\274Q^\210\274\312\365\236\274\324\372\233\274#\033n<\303<z<!\353\251\273yE\020<06D\274u\340\345;\244p\256\273g\344\271\275\222\204\254\274\260e\230\273 A\301<f\2547;\263\207u<\222B\222;-\004\016\275\200\027\225;\266\243\277;\254\036<;#W\'\274x\341\032<#\233\374<P.\251<FD{\274w\256\306\273\035\025\354\273E%\031=\273\330@<2\253\370<\220t\215<\353\250\244<&8O\2752D\235=\211\031\224=\004\241o<o\344\232<\303^5=\003w\322\273\357Rg\274\344@\260<\247//=x\345)\275\020\314\355<\252\000\036=\342W\207\274\367a\225<e\013\361;]w\020\275\024Y\206<\177\033O;|\340\242\274\375\321D;\350\276\376\274:\021\007;\262\002\177\274\305\221\027=z\3132\274\035\264\036\275U\017\254<\220N8\275\351e\034<\213$\354;\300\261y\274\241r\313\274Bra=\261\234\332;\000\"4<bh\233;G\242I\2757y\367\273\221\211\255\273lro=$Y\245\274\204V\330\274\361/0\274\303\367\325\273\020\240\017<\315\231h\274\313I\177\274\256f\351\274\311\347\351\274\362r\203<&b?\274\316\364\013=\314a\030<\314\350\341<\277\242\356;\307\215\312\272\327\357\351;\277[\267;\245\313\221\273\266]C;\372\274\027\274v\323\234;_P\370\274=G.<\304\346\260\274\366\037\345;sR/\275\374@\302\273\261*\277\274.(M=\235_\214\274VQ\236\274\216\277\035<&\\9\275\375\212c=\033\200/<re\310\273\306\032x<R@\352<\\m\365<\212\212\305;\303\245E<\344\032\'\274\272/\331;\244\314.\274~\323\014\275\256\204\255\274?.};\351\2101\274\023\030\222<\000\035t\274e\214\001<c\265R<\347\306\000=\352\346d;\307\303\256\274\343u4=\344u\325;[M\353;57\350:3n.\275`\215\320< h\007=\0069x\272\340XN\275o\270\202\273vD\037\274\240*6\274\344\003\220;\"\240\016=\235\'\244<\230\010\272\274\312\245Y=\3102\225\273\367\352\010<\034\231>=\207\\:=\362\310\306\273\270QF=\303\037\210==\361p\274E\213j\274+{`\272\030\024\001<\020\350G\274\005\232;\274\033J\023=\330\352\336<\031\177\n\275\n\312\360;\314\334v<\016\305\224\274\367\303\030\274\335\351\031\275\255D\315\272\030\031[\275\016\306\003<\300\034\003\275\343\361\2459\366\356\235<HWR\274l\266\371<\\\245\037\274\265\373x<H\253.=\312\002\251;\247\317\007<v\262\205\272<\341\203\274t\362\230\274+\033\243;q\367!\275\236\000b\274\257\336!\275\272\006\024\274\204\335L\274#\327\224\274>\342\010\274\3730\250\273H\317\017;\217n\353<\344Fu<\276\034\244<%=\2259\254\2035\2744W\276;\367\303b<4\211a\271h\364\322<\\\031\262:\202\375f;\274\244\212\274d\265\375<\2745\363\273\357w\356<,\235\2639\315\372\300\274\362\373\320<\025\262\024;\212\275\342<w\010B\273N\003\257;\\\371E=7<\026=\305\344\247\2745 \234<V\310\005\274*:\223;\271\267E\275\005p\320<\302\2404\273T\270]<\265\036\206\273\024\266\n\275\"\000\215=\345\233\214=s\216\322\274A\216\005=\314\242\014\275\331OK\275+\275\034=\277.`\275$R\364<\037\027-=\004\322 \274!\2372\275w\334\002=\325\030\"=\021\226\232\274\331nO\2744\300\233\274\263j\243<6\364\325\274\245|[\274\017\217P\274\302?\223<\'\016\320<\306\326\215<v\340];_v\317;Q\t\025\273s\230!\274\240\371c\274\355I2\273N\305\000<\336S^<\333\010\003\275>3\244\273^a\310\272\372Z\377\274b\245\n= \254\023=\341\213H=H\325F\271\2449\374;\346h\200=\271\310\355\273M\225\000<\242\270u\274\237\325E=\212\367C\2753w\255<\374<\315\272j^\r\272\200\321 =\325p\356<\0071\232<f\355\371\273=*\033\274\303\215\242:\312oz<\202A\001\273\225$H<\357\325\355;\223\333\016\272=\013\252\274\016\273.\273^\341h\2753\362\236;\327r\361;\326C\025\2751\202:\273\253[J\274\200[{<\177\006g:X\024)=\210\303\374;\216k\273<\rK\255\274\250\307(<\003X\255\273n\213\330\273\'\330\240\274\354\202C=\265\037\n<D-U\273N\224-\275\242o\310\272]\231\205\274U\227\331:$\312\262\2735T\n\272\366\335a\274\351\030g;\234%\203;xI\365\274\017$9\272\2619\"\274~\303&;+\223\232\274J]\201=\224\264u:Ha\241\274\362E%<\242ZH\275\326H\023<7\021\246\274\350C\301:\236\233\304\274\360\023\034=\\\024!=\2236Q=\222\305\312\273\032\260\373<\364\264\326\2740:\277\274\341\236\313\272\0371\n<!:b<E\331\340;\326,&\275\331\373\017;\026\231\362\271\347\313J\275\316K\352<K\217+\274\212o\256\274J\270\346;\241\202\333\274c\tD\272\227\177\n\275\346\331\010=\r_\227;rs\014<c\310\2579C]\037<o:\247;\353*\306\274b\340\217\274\255\021\302\274.\235\324\274;\272\037\273\021{\005\273\304}\254<\303\366\026<\302UY=+\020\276;\272Jo\274J\177Q\274?\370\331\274\301\222\030\275?\343\206\274F\025\034<\000\361\235<Dr\221\274+\027`<Z8\246<\013&%<\326^\033\274(\374+\274_\205f\272\352\215\312:\023y\242\275n\245z\274E`\200\274\340\352\211\274\344\246\215<q\243\223\274\035\232\204\274\246\\\210\274\360\025\307<\000\200\371\272\005#\n\274Q\006\316;\306\353\013<\322\t\251\272\',\002\275t\361\256\273\312Z\245\275\242\234\231;b\225\272;N)a<l\220u:\001\362\016\274\375\276s\274_\t\353;\207\210!:\210@\215=\266-\236\273\235R\311\273\243v\357\274\034\363\n\275IM\205<j\035@=\250\t4<[E\324<\327n\247<\016V\361\2747Y\027\272\362\347\001\275\230\371X<\264Y\007\274\270G\366<\356vx\275W\3034=_t\217=\343\214G\274\353\267\031=\024\036\302<j\223\336\274P\241A<\024\326K<1\016\006<y\316r=\023\'\274\274:\261\354<\3532\342<\251kM<7\277\374<\352\311\'=\375\025\217<\347W<\275\326\320\372\274?#x<~\274\322\274Gl\305<\323\353\366<\227?\266;yY1\274\2569\312;H\260\002\275\246d4=\223\315\276;\375j\320<\276k\324\274\351\233L\274\340\275.;\210\244A\275\355\007-\273\022x\000\273\344}!\275\316G!=OcP=\036\201\n<o\264~\274\315Y^\275\236\275!\275\315Q\014\274q\256q<\357\221S:\270\013\235\274\"\'U\273\317\001\245<\006\367\262;\206\013\351<\021\223\215=\t\207*\275\340\306\254<Y\301\241\274V\033\010\275w\005\025\275{\372c<\321\'\303\273\220\323\022;\021\226\260\274\000\t\017\273\260\2572\274sNV\275\2762\350\274R\215\t\274\203\325\025\274\332\213\003\274\321\315\234\274\026\374\260\274&\252\271<Nt\252< \341\335\274\'\224\034\274\333\315-\274\236S\004\273\013\3136\274;Z\342<\324\307\214\2736\317\235\274Xj\205<\\\'\342\274\335\007\260\274\370ON\275u\021\226;\325c\276\274\317\324f\274\2046\226<@\022\206\274\371\022\374;\006d\017\274|)\032\275us\261;\030p*=\204\243y=f\200C98\355\023\274\345\373\202\272\367\036\337;\240b\356<\346y\246;~\352\234\274\'X\221\275#5x\274\323\005\360:\366.3=S\252>;\253\374\t;K\033\024<F\202\027<j\037\205\273\227t\356<\370\241\376;\210\247Y\273E\377n=\301\231C\275a\276\352:\365\313\010\275\260\260\344<\277F\265\274n\325=\275\315\354\233;^\353\341\274$\263w\273\034T\330\273\343:E\275\204kI\275\350SF\274GK\027:\003\275\371<\317\"u\274D\216\027\275Ul\364\274\240\207/\274\315\271\330\274\340\010\\\275\226\366\261\273\354z5<t\365\031=\213\305\312;\223\371\224\274\275\227Z\275\004\266\372<\n\\\207\274d\n\226;z\006\016=W\037\010\274\2373\371\273\231\245\241\272#\\\264\273\324\242\202\274\202\301\200\274\244\305\234<=B\354\274X-,\275\355\206*=,\276W\274\200W\267<5%\010\275\020\270\t\274,YN\2739\334\017\275\261\352\204\273\222\001\007\274\306\233\255\275G@P<\316\321\317;\304e\277;\031\212\313\272\240$\321;\226w\341\274vK.\273/K\216\275C\273=;D(m8Bw\245=?<E\275\204m\371;\332f\245:\372RK<\264\030\250<\375\021~\275B7\t<\377\222V\275=^\225\273\232\310\022\275[\0363<\303\227(\275\226\263\235\274\213\310\030\275\260:p\275*i\010=\227\247\021=%\245\321\273\370\316\032<\220\357:;v\273\341;<\036\242\274=$\025\275\364}\005\275\373\252\260\274m\200;\274\344\226\333\274TM\214<\'@%\274\273`\253=\205_\344\274\320D\305\274c\324<;U{\274;mTm=\033t\275\274NY\260\274\236u%\274b\032\326<\232\202\246:0Y\201\273\2348\367;\027K\261\274$\254\261\274~\323== \350\227<8\034>\275\234\346u\274\371\204\013\273\243(\353\273\327@\331:\334|P=\001\264\324<P,\033=\t8\225\273Z7R<\363gp\274W(9=\252\343\n\274\036\024\355\273\362#\224\274\217L\266\2740\304\2459\263-\300<\nx\361\274G\232r\2710\200\220<\342\365\304\274\177\r\276;\303|\207<\347\357\311\273D\315\235=ZI\005=_\341\357<\231u0<uN \275\357\206\274\274\271\242\200\274\035\251\221=DK\021=\347\340\360;\340\0020=\327\240\303<\315\253\205\274\003\325\032<\343{\210\274\026F\3379Nfq\274\367/$\274\364s\352<\314\310\360\271\277\037\213\274\2151\321<\265\224\375<\026\001\200;X\t?\274t\032\322\272\325G0<\264J\307\273cki\275\037\247\002\2741\372\220:\020_\264\273\024G\023\274ZR\244<`\034\212\273|\362\211\272\361V\225<\244\210\341<\316\375b;\277q =\212\226\2069\016\250\263\274\351\034@\2748\337S;n\314\216;g-\250<\215XI<.Q\013=t3~:\372\366\224<\242#K\274\377\314\322<\203\303\200;\003\222\367\274?\233\202\274\025v\016\274\004\t\035\275\2270\"<\315\226T<\"\026v\274\347T\267=\342\344Q\272E\010\242\274\262\234\237\275\371\017=\275\226\335\201\274\224%\342<\032\271\026\274\257\221\342<x\220\021\275\305p\204<<N\367:0\"\210<\250\356\225;\376([\275\177_0\273Sv\030:\323\372\244;\366N\341;\247\0164<\316]\262<\014\0057\273Z9\237;D\371n\274V\245\210\273\241c\223\273Y\247\n=\254+\000\272I&Z<)-$<\324C\205;\327\016\010=\020\320?<\027\320\205;\306`9\274\037q\253<\221\'\207\275]\006\024\274/\360\230:\330\233?<\020Z\036=$\200\264\274\277\254c\272\005\230\353<\357\317\004<\010d\200:\013\031\364;(\330\240\274aL\226\274\'2\311<\316\243B\2745\201-=\210\353\216\274\n\322a<\2646\327\273\362\354\233\274\340V*:\272\373\235\274\n\375\330\274\037E\257<NuZ\274w\001=\275)\251M\2729\001\324<\333`\027\274\373\375M<\024h =V+\371\274b`A\274\033s\023<s$\376\274\301\305\245;\3046\262\274\341\224.\274\033k|=\362\266/\275\241\226\311\274\331\341\234<\211\305\r=\022\275\2128\017\200\374\273\273\307\313<[\266i<8\031W<\301\006\311\273\323\374\306\274\234\265\235\274.D\303\273\203\014e<\254fe=.!%\272!b\241\273\304\235\332\273\236ML<x\033\201\273_dp\274\234\347\207\274\376\313L<\354\030\006;\213\367\005\275y\'>\274\363h\344\274\325\201m\274\354Q/\272\235\023\323<\213\255\030;\026\201m\274\320\035}<\213\334\224\2737ym\274\030\331\016\273\234\037_<t\270[\274\263\213\333<\006\251\022<I\3305\274Q#F<g\354\3429\017\225\253\273$\315(\274\266\260\200\273r\327A\274rv\005;\026\221\233\273z|\231\2748\341S<\347\252t<q\372,;\220p\213\274\3140\325;g\217\003\273\217K\017\275\t\375P=\342V\311<H2\257;\311\214\221=$\031\365\273M\213\351\274\025m\202=\0251\274\274J\032M=\205lt\275S3\325<\301ve\272\362\260\305<\374O\360\271\355?0=\3047\232<E;\336\274t!\202\274\320\347K\274\364\370\312\274\024\354\243;\376/\255;\342\210F\2751j\200\274\322>a<\365\373\353;\206\303O\275S\363\357<\227\221L<9\335\271;x\310C\274\351~\256\275w \372;\334\327\0107\271\351p\274\332\016\247\273\243,X=v\351j;\2575\342:\214\277\000\273r\022\326\267\316@\257\274\001B\371;+\346\014\274\246\234\036\275\002$-\274\316\340\205<\016\264!\275|<\020;\214\324\236;\330\315\310\274\321\257\327<0\014)<\215\005\223\273\233\3626\2748><\274\300\236\233\274+#)\274\026(^;\021e\377<\023\0011\275\213\225G<\003T\261\274\300\303v\275J-\271<\363\014\030\275\370/\256<\243\305`=\250\351\314;t{\327\272M7<\275eZ\031\275\224G\270\274\334\303b=\233\276p<\"8\305\273E\227\030\274Q\024\257<Sx\273\275B\345\310<O\237\303<!\352\251\274\272:\306;8y\346\274\265\352Y;\307\2249=K\027\002\275$\3527\272r\3340\275o(\177<\033\n\325\274\344\214\236\272\331\273K\274\307[\324;\242G\034\274\\@\373\272\250D\r<\220\342d<\226\216\347;\010j\326\272\201\203\344\274\231\014H\274\t\223\361\274Cb\251<\350\262-\273$\037\241\273\237\005\020;\350l\200;$7\307=\302\024\317<=\374\031\271\345\332?\275\366N\272<M\214\335;ge\222<\206\374g=[&\001\275\031\336M==\231\314<p*W;tIk<(\274\017\274~D\217<\234\304<\275t80=\367\013\247;\271\354b=#\"\245\274do\217\274\260\331;=z\220\352\274`\243Y\275\274\354\314;V\004\246<\235\013\004=\203\275\231<8(\361<k\323\r\275\333l\370\274^\2637;3R\030\274\337\304\357\273\306\267\251;\346\274\021\274\200\254\005\275\220\225\217\274\"%\025<\361o\310\274a\017F\274_\327\005\273O\214\204;\005\244\311\274\243\023_;\016\206\3249\2666/\275u\345\323<\t\2461=9\207\303:\213\037e<\254p\271\274\211\354\346\274\330#\274\272\353\347\243\274\371\010\035\273\323\231\365\273Y\243j<3\311\377\274\213\304\227\275\025+\001\273\306,\270\272\221\025\362;\021&\013<\304}\031\274\351u1\274\356.\210<\315H\327\274\343\243d:Ph\032<\303\016a\274\200\005\215\273br\n<\303\217V<\205f\250\273U\277\234\274>\006\302\273\220\236\037=\2129\302\273~\345e\274Dd\002\275m\232<\275\014o\021\273(\304\224;\340\\\317\274\364\375\327<z\264P;u\n\266\274\243\332%\275\001\177\r\273\315\304|=#O\002\275GdG\275\246c[\274\355\341\254\274\377=\203\274(\245\035\274P\240\200\274YW`=\037\003\201\274~>\207\274)\357^\274\317\242\203\274|\243t<Wi\333\274\246E\030\275}37:k\260`\271w>\331;\312M\017<\206\020\254\274f\211\214\274\200X\374\273(\356\303\273\242\020\333;\316\010\260\273\314\252\242;y\303\205\273V\337\242\274=\273\306\272\220%)\272\3308\2318\352\243\346\274\212\341m\274\361,\252\273M\006\227\2732U\223=\207\353\314\273\355P\361\274N0\214\274\205\227\013\275\276MB;\360\253\216\274\266\036\361\273\235\276\377\273\036\346\013=bw@\274\340&*<\362d\020<\213\311\225\274\306\364\314<\037U\341\274\013\355\315\273{\247\034=\t\325#\274QB\356\273" + } + } + } +} +node { + name: "conv3/weights/read" + op: "Identity" + input: "conv3/weights" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@conv3/weights" + } + } + } +} +node { + name: "conv3/Conv2D" + op: "Conv2D" + input: "norm2" + input: "conv3/weights/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 1 + i: 1 + i: 1 + } + } + } + attr { + key: "use_cudnn_on_gpu" + value { + b: true + } + } +} +node { + name: "conv3/biases" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 64 + } + } + tensor_content: "\2576\366\272!\270\025\273\273\321\252\273}\355\205;\344h\307:\0055_\273\267\363\306\272\217\221\"\273o\005,\273\261e?;\360\340.:0\001\032;w\341\004<M\234\036\272\214\320\203\272.\260_\272|F\301:*\025\025\272;\262\207\273!\320\333\272\r\r\004;\331\250\377\272\333\356h;\235m-\273\334;%;\345\017\223\272\323\020R\273Z\327<\272\244\303\000\272\035\327\207\273\202\007;\272#j\261\271\376\261\210\272\307^\237:\267\355\377:\004\311\313\270\326\244\016\2725\217\000;\215\266\3319\022\"\0369\3242\273;\331\251A\272\323\302\204;\023w\254:\240\311\363\271\304\207l\273q\324\251\272o*\356\272\265\305\305\271\323\202\3119\265\244l;\323\367\333;>\004N\2732\333\226\273\336j\0229\250V\217\272\'\203\022\2735*\222:\242\\\267\272n\344J:\271C\035\272*\276g\272\322\300\313:\271\376\224\273" + } + } + } +} +node { + name: "conv3/biases/read" + op: "Identity" + input: "conv3/biases" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@conv3/biases" + } + } + } +} +node { + name: "conv3/BiasAdd" + op: "BiasAdd" + input: "conv3/Conv2D" + input: "conv3/biases/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "conv3/conv3" + op: "Relu" + input: "conv3/BiasAdd" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "pool3" + op: "AvgPool" + input: "conv3/conv3" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } + attr { + key: "ksize" + value { + list { + i: 1 + i: 3 + i: 3 + i: 1 + } + } + } + attr { + key: "padding" + value { + s: "SAME" + } + } + attr { + key: "strides" + value { + list { + i: 1 + i: 2 + i: 2 + i: 1 + } + } + } +} +node { + name: "ip1/Reshape/shape" + op: "Const" + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\377\377\377\377\000\004\000\000" + } + } + } +} +node { + name: "ip1/Reshape" + op: "Reshape" + input: "pool3" + input: "ip1/Reshape/shape" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "Tshape" + value { + type: DT_INT32 + } + } +} +node { + name: "ip1/weights" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 1024 + } + dim { + size: 10 + } + } + tensor_content: "r\331\361\272k\235\033:\301W\214\273\375\035\025<\225=#\273J\342P\273\346\260\207\272 \227\224;\205u\370\270\233j\246\272\233\201k\273\"\366;9z\345\027\273|\250\267\273]e \271\310p\345;\340_\247\272\036\242\320;G}\243\271\2439\230\271\361\035\345\272\317\363\263\273\312\177\335\273\244\316\237;\2530S:IL\002<\300\rY\273x\320\r<f\010\275\273\257\340N:\000\335\320;@\\_;\373\230\010;\244\306\r\272\265\"X<C\311\247\273%q\013\273\353\177\361\273\204\311\201\273\002\231\302\273\324\235w9n\033\3778k]\205\273\212\025d:\2234\321\273+\374i;\374\'\024\273Z\227\277\273\\/^<\326\216T9\267\351\271:yu\n<u\016\266:\314\022\372\272\200\346\223\273$\037@8\273\264 \273\'\330.\273)\017i\272\"\251\223:\222*\2079\354\310g\272\250\0202\273g:\323;\344\3036:\246\360\262\273J\014\223;(`\002\273\013\365\014\273x+\254:\004Nq:\311\\\2408\202W\300\272w\315\007;,\324\220\272\356\027\355\271\302\026\354\271|\366\221:\375s\3068N\\R\272kd\020\273+We:\014\222K\272\007\273\227\272a\225&:\303\2401<Z\354G:\347s\216\273\377\3261\273\026(\373\272\242$\216\273\006f\301\272\330\326\261;\305\271\354\272(\223\2639\355)\254<?\203\271\272\356\273\031\274\026b\315\273\345\240\t\273E\000H:W~\013\273\351U{<\276T\376\272V\247 \273:Y\320\2735k\344\272R\276\224\272%Q\207\273\337\341w;\030\027\352<\375\004\014\274\342\001\231\273+\3608\274n\272$\274\247\210\224\273\344N\327\273\342\354\2728\2369\\<\227\372h;o\177_\272\232n\2409\303[\332: LL\273<\334\334:\370\334[;R\307\003\2739\235w\273)\273h\272\2028n;Gm[\271\257\331\202\271<\207\2369\246Y/\271\363\364\232\271\361,\213\271\215\017\3509\3034996/y8\353\017X9~\304R\272\306V\013\273p\221x8\317Y\204\274zx\250;\356\213~;\252aC<\371\024\031:\246\373\257\272+\256\230\272x\271K\272f\247\005:\376 \341;\273#\246\272\020\356\307:\302\262\213\272\274\317\026\273\022\004\'\273\251\002\3249\211\353\260\272^\024\3079\010\236\324\272&\271\036;\232\275&<\321LK\273\311\030s\273nw];\377\305\255\273\252\256\230\273b\224\005;4\177\300\2718Q\2459q\205\272\272\037\007\205\273\276N \273\373\244\315;\033/\257:\254\353\375:R*N\271\217\336\263\272W\357-<\325\225\020\273\205\023\342\273o,\270\273\375\016/:\362\261#:\314\352\033\273\267t\027;\303\003\263;E\351(\273\'\220\261\272\014#i:N;\001\272L3\205;\261\036<;\326\220\365\272\257\0166\273\222\0050\273\214\362\3339\240\234v:\347\036\200\271GcH\272zU+:\234\210\037\273-\010m\2710(\203:\337\025\260\271|\376B:N%\002\273\274tk;\010\014\032;\t\311\224;`r\240\271\360G\243\273\352\362\232\273\000\356\r;%\302V<$\016\262\272/\243\337\273\246\341\201\273\335\206\275\272\224\341\2049 \037\2169H\021\r;\225\tp;=,\314\271\020\220\346:MZ)\273\321\"\301\272Z\216\020\273\"N\225\272\201\246\360\272\324\026\217:\320n\225\273e\251\315:\022\274!;?\261\263;<\357\027\272\220\320\213\272Y\205\273\272-\214\211\272\237\251F\272W\225\325\272\231\022\235;\340\003\247;\003\016\362\271\355\341\022\273\305\037,\273uX\315\272\313\356\3479\250s\001<\266r\342\272^\213\216:\341\326l:k`\205\273\366\334\035\273\335\r\032\273\307_%\273\3237^\273D\231\327;)\312\206;[T\201\273\330\261\334:z\200e\273=%\227\273\247}@;\367\251\303\272\201H\271\273\210\022u\271\372o/<\326w);py\034:u\025i9\247\336%\273\262\311<\272:\202\276\272\356\262\277\2726\350\233:\246\024\367:\245\322\243\271\013\347W\273\301\252\364\272O#\206\2731]\023<\260\243\245\272#j\367;\373?x\2730\223\266\271\244\310\254\272\024\366\025\272 kj\273;Nm\273-\357\300\273\347\304\251;\025\352f\274#k\206<>\025G;\374\\\346:\330\303\273\273x\320\325;\343\002\221;_g\215\272\300S\355:\233\ra:{\347\244\2736_\204\273<\356\255;7\331\':\032\363`\272\371G\005\2731)3\272\2469\322\271\3755\014\272\\8\3518\362\224\353\271-\274\334:\201F\241\272\314\222\266\271\225\324 :wT\242:\263\264\3209\234\0179\272\270`\352:\200\301\232\272y\330\036\273[_\3669H\017F:\242g)9w\337):\270\003\2738(\336\036\273\376\234\005\273\267\316\213:\026\240\t\274\340\231M<\371)\365:\312\'\027<\373\240\337\273V\377\215\273\013\0345\272pA\202;T/\037\272\265\213\211\2729`&\272\203\3435\272\377\326n\272\354yd:\206\276%:\202>k\2729\2443\272\250\027\304;]\023y;\317n\352;\016X\236\270:\302(:,)\017\273\254\017U\273\271\246\020\273\364!\274\273Y\035\207\273\233vN;#\0068\273\350\214F\273I\311\3549=a\336\272\242\200\204\272\352\314\033:\021V\220\272\374\372\334:\177\256v;U\367\3157#6*\272\203\004\";F\317\226:Y\003\311;\036\241\033:R\271\205\273\360mk\273\244\326\341\272D\355\254\2710\376Q\272\\\303@\271\006\304\274\272\204\324\222\272\3257k;+\374\2079:2\027:\222\341\230:\201\203\320\272_\327\356\271IR\235\272\343\33579\272\"\216\272)\032\241:\206\371H;\\-:\272\035_\3278j\234%\272\270\2569\272=mz\271$]\331\272\002\227\3439\007\251\361\273\333\246\'\273\274\320\271<\004\002e:5$\200\273ex\007;\246\300\345\273k@h\273\350\025?\273\377(\'\273\335\302\3149\312\202\235\272o\337\274\273u\367\372\272\002\200j:\265\214C\273\353\202[<V\203-;\230G\033\271_3\3779L\323Y\2714M\3269;\376\0039\214\306,\271\252{u\270\245\0352\272c(\3729R\231{\271PK+;\256$\3379\253\3178\272T\233\265\272\261i\3719\351\226X:\211t\027:\231\262\205\272 ws\2677#\353\272\004\020\205:K\342\013\273\312\225\231<\302\351\234\273\300=]\273^eo;\306\346\320\273\331?y;\334\210\364\273\340\010J\27329\2449\352\247\3209\312\277\311\273\031v\024;\311\246/\273\376M\2079\346\2110;\255\350\225;\2524\2029\346\356\372\272\252\261\203\273 \342U:\250\334|<\032:W\274\320\351\324\271/\273\006<\253\375\331\273\354\316Q\273\212\245?;\230\352\2327+V\257\272=\2323\273\316\311\320:\367\031\226\274)W\322\273\027u\362<V6B\2720\235\314;\234\242\273\273\353\261\014\2736\347M;r\222\333\272,p\\;\243\022\\\273\360{\000;\000\013\251\265\257\270/\273\005\013\000:i\346\347\271\211\276P\272\207r<\273\241\330\246:\261\261\\;\205\207\335\272\267\2464;\322\323l;\034\026*\272DT\025\273\241\n\r\273\264\373\262\2729\333\277\271s|\037: {\0169\2505\266\272\r\366\264;\311J\'\273\360\245A:b5\326\272\306w\255\272W\331\2109\023\020\231\271\227\001\245\271\350\033\211\272\256z\2079{\205\232:\t\214v\271\375\214\2239\002\220\326\270\023Q\023:rT\230\271\254\233\017\2731\351-\273\016\201\365\270O\031R;X\323\027\273ny\313:u\354\300\272)l\017;/\365\370\271\356V\025;f\360/:I\270\264:\266\001\004;d\223Y;\323\274L\273\241\211O;\276\021\315\272FY\277\273\177\317P\273\267PO;\2014\322\272X\310\3339tXb\273Nfn8\2023\363\271\031\321B;x\223(\273\240\262\014:\342mN\273\022\325\355;-\234w\272\277\220\307:L\315\026\273V\215Y;N\027\213\273\307\211}\273Tg(\273\302+s\273OY\236:V\337=<\026a\032\273@\3628;\\\323\203\273\311I\205<Z\370;\274?\206/<rM=:\016\201\263\273\217.\246\2724U\302\273\212\021\243:\022\261\200\272\321\232.\2737)\361\272$#:;\2645\206\272Z_\r9\342\367\3209\274#R;\220C\254\272\325\363\251;\254\203o\273\221\361K\274 E\204\267\227\304j\273\262Xz;\022\244\371\272\273\254\217<[\302\264\273\362\035\027:\241\215\017;2f\267\271\363xM9k\376\330:32G9W\010c\272\311\0027\273\035\312\376\271s;\3029\356\201\342\270\302\304F9\225{Y\273\324\307`\273\315\310\365;\201\245D\273:\275\373:\003PV:\253\0051\2737~`:\354\274\230:\205\0343\273\326$\217:\226\235\013\273\350iR<\351\331\215\272\271\251Y\273\323\035\036\273\014\276\2649`\270M\272C\352\355\272|r\302<\241B\305\272|J\354;c\021\255\273^\366\230\273U\210E\272\253\262\212\273[\013}\273\021w\227\273\251[\305\273\2024c;\037t\343:\226\243?\272\334\022\331\272\357\364\200\272\310\236+:nuH\272\034-\3109\234\210\367\272\277\327`\271/\252\254\273\301\204\224\273J\216\206\273Hb\005<[I\264\2737\017n\273\364\327$<9\212M\271\261P\304\272*\032\326;\"\262\201\273\214O\327\271\006z|\271\222\212\220\273\265,\343:\314K\255;\002<\310\271\335\200E;\010\325-\272Yz\3747\322!\344\273\333\037\371\2738\007\215\274SG\200<.\232\t\274\014\334\205;t\006\347\273gC\341<H\312:\274{\223:<X\337d<\235\024\275\272\375\332<\274\324\345Y\273l.\343<bB\2309li\023\274[e\341\273H\221W\273\277\r\326\273\250\224\305\271r\241w:tA\325\273\370\274\023\273\376\375(\274_\207Q;\225h\021\273\"\335\362\273J\002\345<\314tE\273\272\210\3549G\360\032<`?s;\000\333g\272\216\200\256\273\300\316\014\272\177\361\177\273Oz\263\273t\240b8\211\365 ;\014\355\351\273\260l)\273S\355\330;\275\301K<\307\371i\273\233\342\273\273\002\233P;\374\326\244\271m\177i\273\320\215.:/\334\2609i\336j;\324\255\347\272\270\360\221;M\035\225\272\026{\204\273\035\224U\272A\256\247:\302\250>\271e\016\340\272\370~\213\273X\277\356\271\226u\n:\202\353\305:\256\006\372:\313\345`<\210\320l\272s\037\212\273p\244\177\273\" \201\273\217\373V\272m\036V:\242w\231:\340\371\221;\016\246\247;4\326\271\2711\315\234\273\236\032v\273\334\324:\273\207\000\204:\376\240\215:\216\236\234\273Y=\245<\376\343\\\273\035\252D\273\022\243\275:\360\2074\273[*7\273\024\267\335\2738{I:EU\253;\310\254\347\272\360\327\200\273\207u_\273[\232\244\272&\325\3738\023\250H\273 \255\344:\330\301\n\271\256\336\323;\000\3010;\014\353*\272\261.\016:\317!\021\274l\353F;\221\270+;\032\315\013:\217\231\226\273\006%\244:\244~`;\322l!;\335\377\016\272a\363.\272l|\263\267\270\266\272\272\212\272/\272\264\324\013:\235p\0259\300(\0139\263\213p8\304\245A\272\217\223b\273\004\014\2349H\365q\274\272u;;YnK:*\367\251<\301\347m\272|\022S\273S[\320\272\317G\3537\272F?;JL=;\224I\025<\204S\033;\"qz\273-\320\266\273\003\330\367\272\235e_\273?\200+\273\267\206\357\271\024\232\305\2722\276\211;\354\023:9_\202\300\272\311\325\237:W\335E\271\370\365$:ug\035\273l\033X\271~\365\364\272\216\365\3569\t\321\305:N\371\014\274\260Q\242\273\365\373\020;\335\232;<\246O\232;W\301K\273\267\022\354\272\t\003_<\364\350\336\271\317\314\001\274{\014\254\272\2412H:\325\036J\273cB\225\273\315X ;Q@\351:\n\257\252\2727R\2539\337\377V:q\327z:+\"d:\253ht;<\306\222\273\"\333\231\272\313N\031\272\223l\3449\303Dl\272&\376\223:\375\033\031\273\037\000\242:\234(\323\272\315\024\331\272\002M3;||\n\273z\003-\272\3365\020\272Y\271z;2a\177;J\243\350;\262/\346\271*\217\363\273\214b\204\273\215\023s<8\332j<r\t\"\274\232\326\t\274`\r#\274b\201\307\272o\002\313\272/\345\221;}\300>\273\266\257\257;\323h\0149\000\031\010\271oo\207\272\251\007\027\272\267\362\021\273_\326d\271\370\204H\273\352\204\264:\234\210\003\271\324uY\273\322\3209\273\351\006\254;e\237\323;\363\265\320\272)\305\375\272\373w\340\273L^ \273\324.\207\273`\363K<\277\231D<$\227\021<\036\273\016\2740A\307\272\227x\252\273\207\263\222\273?z\315\272\321\3435\273\254\351\307\272\214x\021\273(X\027\273Ow\024;P\303\3779\341\213\200;dZ\037;\003\t\251:\310a\234\272\2240\'\273~\206-\273\307v\303:T\364\335\2714\234\234\272\004i\323\272\001[\206\273/\213\030\273\267\213n<\232U\205\271\251\360|:K\036_:\220i\221\272\317v*:P\377<\272\267\331\332\267?\206\3448:\352\r\272\201=\2418{\377q\272/\213\246\271\361\253\277\271|\366\203;9\022?\272@\251\260\272\371Z\00698\230\020:\262\373\335\271Te\035\272\010\306\331\271\214\347\356\272\300\n\250\273,\036\223;o\306\370\273\2441\335;:\203$\273\277\347\331\272\270A[;X\362\224;\324\3374:\2274\2169\260fS;\034S7\2739\334u\273}j\240\273#\364\276;M\216\277:\373\265\2239L8\244\271\026h\376\270(\033\303\272\216\233@9IA\2279(\317\013\271-\333S\271\305\240D\272\250\301\214\271\016c\2105\3568#;\252\341\352\272\347\201m9\353\214\321:\227*E\272\205\255\n\273\225w\001;kU\245:5,\0369=\372\3018\302))\272\211Y\033\273\272\241\200\272\226\303\n;\346\214\221\273Hg\022<JM\364;p\233\374:\034]\315\273\220hD\273\031b]\273\344O\323;\256\226\330:\025\250P\273\330\004\232\272\320\312\025\270\3637\327\271%70\272\242\273\004\273\272\327_;n\210\203\273E\272L;\202\007S;\232\361\014<LL]\271\257=\240\273^7\3748\370\261;\273=\020\330\272i\214w\273~\246\333\272(\2718:g\026T\273\255\344\231\272\022qP:\361u\210\272u\301\231\272\3360?\272*\221\222\272\237\305\235:\363\005\275;a\312\242:\261J\002\272\317,\016\273\237\010q;NM\021<\267\014\230\273\274BP\272\260\362\265\273\360:5\272\247\352\021:\227\377i\272\245V\2709\241`\227\273b+0:\3204\314;\325\272\236\272\"\364&:\225\243\253\270\226\275\233\272\221\305\3238\364~%\2735\310\3219V.\203;\341\315\002;F5\013:\0330H\273\276\006\002:\222\310\2349\034.\257\272V\026T\272\354VO;2\251S\273^\n,\274\372\214\273\273\3133\324<\203\354\260:A\300\202\273\323m\315;1;\n\274\226v\235\273\365\3469:o\006&\274\303&r\271\024\243\304\273\n\212\036\274\367\213K\272\271\251\250:\216*\031\273\364\342\226<k4\017<\026\324\014\273\317\373\263\2660R\033:\302\303\2239\261\210=8\323\310}9YJx9\025\264\232\272Q\330\224:r\336Q:\212\300h;\214~\021\272\332^\310\271h\276\3758\330\037\220\2710\002\261\271[\326\237\271~\2732\272\356\242\2369\341\222\274\272\310\256\226\273}N\222\273\242\345\033=\263\265V\274${\230\274\023_\351;\342\033\r;\223\260\227;\032+\027\274x>\000\273\242\315\010:\225\344|\272\356\006\224\273\346\257s;`o\204\273\007\326\r:\027\014\375:D:<;\241\306\2219>a\261\271\220!\020<\327\342n;(-B<\202M\324\273z\275\':qxS\272\255e\013\274\000.\200\273,%\307\271Z\336\236\273L\371\000\274\334-\210\2737\220\300;\334\224\207\274`\177G\274\37318=W\246\243\273\304\3439:v2\203\273YD\354\272\313\374\237:\343I\001:X\005\2639\263\nq\273\310\241z;B\351\337\272\222\350\r\273\266\213\371\272\322\313\004:y\'I;\001m\200\273\255\341N\272\270\235\267;M\242q\273\225,\304;\010?\336;\303>{\273n-V\273P\357\357\272-\364\210\272\340\312\"\272,\'\3649\305\260S\2710\000C\273xaX;r\315\215\272\272\317\020;D\346L\271\201\346\253\272\334\001\3279c\241\2529!i\2178\375b\260\272E\001\2259,\376\3139L\322\305\271\365\016\204\271\312^\2619\203\352\034:\013\034\364\267\324\267\223\273/\240\370\273\246\230\355\273\324\017\213\272j\340\306\273\017\317\227<%\321\373\273\027p\240<f\225\213\2722b=\273\345\255\230;\022<_;\322F\210:\332\216\002:\223\234\022\274Lj\201<\201\240\206\273~b)\274rk\222\273\236b!;\311\260\236\272}\223\3709\0143\224\273\2463\244\273\3668\r\272\323\376\217;Ek\316\272\341\035\313\271I\207>\273*\3418<\002o\271\272\304\260/;7\243/\272<+i\272\246\211\206\273v\035\223\2705\242\304\272]\250p\272\211(\013;\227\034\234;[-\316\273H\251\222;\341\273?\273wS\336<J\270!\274\030\243\356\272\302>\272\272\273\326\022;\372\252\033\273_\354\025\274\221d\343:LW7\273\336\354\000\273t\024S\273\3655\003;9\276\273:\307\364\302\272\007\217}:\372#\202:\355\225\031;\334Lj:J\224\024\273(q\210\2736\n\255\2739\2267\273\377\356\2049\247\204\223:Lu6<\341\247:\2716\006\250:\276\374|\270\356\031a:\345b\217;\231p%\273\225\022\246:\375\265\027\273\205U5\273\300G\361:\354Gv:\320\266\323\272\270\353\017<+\241\252\273+!\035\274\305t\020<\336\254\326\273%\242\367;\002\263\236:h\001\246\273\317\343\371\272^\353\377:+\336`\273\243\000R9\350ty\273\346b\300<oO\347\273_K\325\271\014O\275\273\"\224\3079\031\206\261\272S-\022\273\\@\004=76C\274\210\3461<\312\266\n\274\035\277D\273H\023\303\271\273\246\212\273l\241\361\272\242\200\305\273\223\325\356\273\375-\325;\317|\316;AL\2539F\306\350\272\013\002\260\272T\212w:\016\210#\272\231j\324\272\274\033\276\273\313\356>\273)\203\311\273\247\365\375\271T4\010\274\353\322\035\273\016\343\356\273\227\362%\274z\372\235<\266\253\215;\302\216F;\364O\005<\220\245\3449\3709\2178\027\272\025:\205\r\263:`/\344:i\340\020\271\372\314\006\272]\321\303\271\rk\200\272\203\252\016\273N-5\272\ro\237\273\374\265\305\271QG\023<u\002\r\274\201)\'\274q9\016;\347\321g<F\361\310\273\022j\266;\364Q\347;\022%\214;-\343\212\272\322\004\237\273E\313\322<\330\333\204\272\352\306L\274\220\213\010\274A\224\323:\321\3672\274J\003#;\0004\260:\302\374a\273r0o\272\200;\254\273J\277\2539\323\331T\273)]n:=-U<\341\343\252\273\366\231\207:\230\202\001;\342\035\336:\214?\010\273#Q\321\272\026+\3209\0333\371\272\231\343\2669[\271\026\271Og\2159i%*\274[\241\013\273\246\203\205<\376g\365;\336\263\324\2711h\330\272s\273\036\273*k\320:\316F\233\273\375\327n\273\210\216\224:\333\255\216;\034+\262\271w\274J\273\216v\t\272\031\336*\272dnO8\301\274\024\272:;x:\244R\253\272\324\304 \273\362\253]\272\300f\3019(\350\003<r\0146:\2139\270:R\3339;}o\352\273C\377Y\272\024\272\007\273\"\270\225\271\225\372\205:\'\023\332\271T\351\030<\312\221X;\3458T\273U|\216\273\203\332\226\273\021\243\016\273a\235\270:,v\2068\210H\216\273\310\351\013<F\347!<\013\322\010\272^\"O:Tb\t\274\"q\267\272\240\023\251\272:\257Z\273\244\350H9\262\277\006\271\350\\0;\275W\242:4[\315\271\000\331(8-\023\230\272km\212\2736\005L\271\336\002\376:\356f\216:MfH:\004W\026\272\006+\313\273\370s\333;\230Jk;\002X\300\272\177\204\002\273\214\261]:\266O4\273\355M\000;\177\203\006\271\312\336\324\271z\206a8\2266^\272O\356\260\271\233eP\270\326\272\020\271n\250\362\270\226\377=7\023gK:\277H\273\272\326Y\267\272\332\244\342\272\354\204\024;x\363?\272,\020\267;<\374/\273Y\320/\272p\361\2107\216\010<\273r\333\370\271f\227\250\273\016\236\277<&\260r\273B\032|\273_\301\310\2733\321\240;\033\3263\272\260~\262\273n\274Y\272*5\327\271`\r\224;\333\262\005\272\312\035\303\272\273\222Z\2736Dw:_\225\363:\350\000\3439\023e\232\272\022\232{:\310\024\357\272\002T&\272\014\370\342\273\375\001\010\273\014\022\027\272\023>\254:y\355E<5j\341\272\213\301\034\272\016~\031<\034\311\n8\337\322o\273\033\035\030;\221\306\010\271\316o\254\273K\034\014\273\033:\203:QV\264\272Tr%\271]\323\357:\262\204 ;\216Nv;0\201[\272\026\372\377\267\013LV\273\243\024\001:BCJ\271c\260\2309\320#\223\273m\220.:\254\370\027\273M*=\272]8\321:\262|g8\024\301\021\272\243 \016\273\3735\3559j,\243\271\260\264X;3\374\252;\206O\325\272rr\020\272QC\336:\035\335-;^\301\035<\245\035\210\273\035\351\322\273\313\000\212\273E)\021\273\310\362\360\2727\276-\272a\240\021;\344\252_\273\260\021\000\272\306\303t;\376\'\r<[\203;\273\255\'\035\273\335\300>\273A\364\210\272\3431\0179\t\213\204:\314\376\000\272R$\333\272\337\215=\273\332A\\:H\323\203;\263\266\002:\251\034\326\271\006UX\274dOz\273\205QD\273\277u\205<F\266\245;\226\213b<`\n\222\273\314\261\245\271x\025\277\273\372.\225\273\311\230\345\272\205\336\310\273\tT(\272\245\270\254\273\260\226\r\273I\236f:T\210N\272\366\340\026<=\032m;\317\002G;\037\010\3369\226oD\2729\322\202\2728\371_9-\306O\273\330\025~\273\272\335{:\016\244\317\271\3525\200\273S\202<<\361?\";\357\265,\272\245\372\3269C\023\241\272B\251)\272\245\342M\271Z-49\177\210\266\270\207\357\307\272\376\222\250:\320\000\253\272Y\026\312\271\232:\327\272a\226\020;\367O\202\272v\276\211;\005I\014:\3314\022\272\276\235\241\272\025\004Y\272\357\227&;\372\027\233\273Y\246\001\274u\254\013<\360\352\221\273\355\250!;\244,\327\273\274.v\272Y\177n;\013\211\363;\247JJ\272\2070\300:\312\327\273:\253\003\026:\000\'/\273\215b\310\272C\247\2149\346\375\257:\2303M\272\216{0:\032\247\2529\003\023y\271#\365\233:nu~\272`$\267\271\364q\325\272\352)\301\272\271vU;,E\246\272\330z\227:\227O\246\272\3603o\272\360+=\272a\"\207\2728+\252\272\217\373?;\312\217\361:\272\257\346\272g\024\326\271\203=,;\000\213\026\272\366\021\225\271\205\331\272\2726\350\033;S\207\236;4\313Q\271{\232\021\273\213r\361\272\3434\310\272\347\026a:\361\214@<\322&6\273\027P\007\273M\013\037\274\227\001\'\273\345t\323\272B9!\273=\343\005;c\237p<\213}\357\273\024F\t9C\177\355\272o\252\200;\016H\201;\214\233\224\273\262c\355:S\303\024;8Se\273Oo\253\272\332\263s\272t\317\206;o\317\244\273\355\301\001\273\332[\256\273^|\201\273\270d\250\273\363\267R9\037\353\307\272]\031\261;\303\264X<K\222\236:I\021\365\272\250\317\362;\270\035\366:\377\276\303;$T\231\273\371m\007\274\360P/\272\225wD\272\312\245y\271~S\027\272P\204\321\270\230\212\016\273\275\311\310\271A\334P;#\013\2129ih\237:LJ \272\r\031\305\271\203\272\350\271\356a\300\272\347\204\002:\322r\233;\030\036k\272wA\030;\254\350u:\256\205)\273\037\017\315\272MZ#\272u\314\262\272Z\200n\273\017B\037\273\253\205\031\272^\222\342:Z]\211<:\265\251:\226T\022\273G\007\254\273:\024B\273@s0\273\tXa;\030\360\304:W6\310\272\304\215\031\273\202\030]\273TT\360\271\363\242\353\272\221\302\031\273\034\320|;\316\277G;;l\347\272\326na\272:\016\2129\326\246\216\272l\305\215:\206\024\t:\027\254\312\271A\242d\271P\037\237:2\027\242:W\371-;#i\344\272!\362[\272\265\250\020\273e\241\371\271\332<\361\270\273}e9\332\251\232:\241\231\327:|\330\246\271V\3344\273\261E\273\272\310\307\277<\303hF\274xZi\274\343[/;\364\030\353;5%\004<,\244\247\273A\306\271\273\205\345:\272|z\253\272\000\364\210\272\025]\245;\007*6\271\345\377\032;$\nA\272\275\201X\273\342\217N\271#\316\2108\363)*;\256\303\014\272\270\000\352:J={;\270I\006;^\002\315\272g9\266\273;\246\242\271\321;<\273l\020\030:)\227\353\273\307\240y\273\335\242\272:\326\324B\274\373.\335\272\006\224\363<\002$\3659u\366\315\273\3616R\273\302G ;\265i&;\n\021\254\271jR7:\345\200f\273\033^+;U:\007\273\206\326\322\267\342\000\217\272\t\221\030\272\225\326\345:\317\271\256\272\265AC\2720\367F;\275~}95\036\t;\366e\035\273Q\344\276\271K\013\2649`\270\213\272S\324M9\376\271\303\272_\314\0139)b\014:a\273\203\273\306\210\211;\036\313g\271Z}b:\324\367\245\270\356\360D\272\240\330Z:\213JP\271\234R\212\271];%;\272\243\243\272.\221\301:\210\354D\273\024\351\321\272BM\227:\013\305\2739\263zS:\210\303\222\273\262-\224\273~\245\364\273\233\313{\2732^Q;\254d\202<(\334\252\273\254\0224<\377\373S\272\335\035w\273\271\305\355\271(\343\260;\233\333\254\272\357\243e\273\366\237\331\273\306\224\241<)ZG\273\312=H\273l\325\215\273m\0376\273c\253\r\273\272\020\325:\315\021\345\272h)Z\273\366q\220:\232?Q;\2345\224:\317C\t\273N\201.\272\031&A;\313\"\351\272*\\b;\330\242K\272n\375`:\262\325/\272\271A4\272\247\031\244\271\360\324,\271&>\227:D\324\214\272d\374\273\272\t\'\227;\215`\211\273J\001\272<^z\013\274\016\312\222\273\322\017E\273\275\n\320;\273\030\201\273\370\273\004\274\014\344F\273\023_\300\272;\365\2379:\341L9\276#\371:\n\220c\273\031\215Q\272\356&\314\267\261]\237;e\n\311: \275>7;=\312\273`\242\332:\006U<\273\002\212\004\273%&\025\272\360&\n;\371Rb;\330\003w:1Xb;\335\\\3639\330\307\214\272Z\037\230;&i\231\2739\202\206:\275)\224\273A\022>;S5\245\272\272\000\224;\326\332\003\2737\\\350;\374\017\225\273>B\271\273\310\336\2609\250\321!\273\233N\373;\023$&\273u\324\004\272\233\'\221\271B\013j:|\303\377\272\016\204p\270\237T\200\272\033V2<`ZJ7<\373\322\272(=n\273\374\312\':\234J\317\272\225\316\341\272\010s5<\024\246P\273\263\032><L\234\003\274\203\"\210\272\242\367[\273\326E\301\273|\353\214\270\226\217l\271\201ke\272\231\273\261;\001\307\237;\320\301\304\272\013\022b:cg\2109\273g\300\272(\336\202\272\267|\006\273\322Gp\273\231\234\343\272\2020\334\272\330\201I<_\013\323\273<\340\241\272\"\240L\273b\003\363\2739T0;\314\254\273\2729\336\363;\226\367\220\272A\200\t<\010\376#\272\252\361\336\272\001\315&;\003+\210\272\004`\244\272kF\002\272\334\224\312:Ntg\273\253T{\273Wm\224\273\005\002\203\272\177\242\214;B\244\200:\251\320X\273\005\306\r\272]Y|;\352)\235; \257\204\273\037!\001\272\244\016\201:59i;\202\000$\272{\202\025<Q4\231<\233\036\010\274\275\331\325\273\203r\005\274\371&\347\272\245p\345\273\260z\271;\202rR\271\371\245\343\2720+\005;\010\342\324\272\373\027\251\272\007\237n\272\307\316\233:\270u\006\272\227\313*\273g0\354;\305\270\227\273\255S\007\272\215L\265\273\336\226\317:\241aT;\035\001\000\273\360\016\371\270{$\260:\246\333#\2722T\332;\014\232$;j\003#<\035\247\325;\274\257\356\273\336\207G\272\232\235\302\273\2561\000\273%\353\246\273\344\014\227\273:\324\204;O\264?9\220\307\206;6\243\035\274\314\256\265\272a.\203:\334p\233\272\304_\013\273\003l\214;]i3:L,\034\273\235\002\277:\201i\2669\'dr;P\301x:\232\2250\273\373\001\253;4]\337\272\'\3613\2735\205\014\2730\2041\273-F\220\272\231;\010:He-<\314H\214;?2c9\273W\323\273g\013\220\273d\241\311\272\367U\033:~\331\227\270\360Ts\272\306\214;;\204\017\264;\225&\016:\304R\3419\307\267\246\273(&\020\273\261on\272v\243\t\271\374b\211;K\226\2009\004Ra\272p\376\320\270-\250X\272\232\325\030\273_[\027:\357\356\3209N\rI:\2554\007\273\234^\r\272\227\222\3469P\263\027\270\001\177!\271\274}\3248\346X\013;\";\3039\344\370\001\272\307&H\272\207\300\211\2722Ck\272Q\335\2428>\331\232:xd{\271&O\213\271C\313\2518BNU\271\372|Y\271\005e\3409\022\312?8\004tT\271\374\0202\267i\300C\270\267jC:\363\325*\272\333\377::l\343\374\271\036\t\3468\3078\350\267\'\004\032\271F\203\277\272\343\315\232\272e\370\321\2723\350t<\367\374\323\273AV\016;\361-\323\273\027\361\032;\324\365X:\002\253N\273\3264\322\272\213S\362:s)\207;\372e/:(E\3249E\3426\273\360\0300\271\350Ma8\241\262\275\272\376\030\217\272l\364\2429\323\243\211\272j\034u\273\224Aa\273z\237\257\270O\373\005;\217p\335\272\312\324\324;f\371\234\271\361\001\276:-p?:\347a@9\364\"=\273\210\367\033;Ay\220\272Nb\271:@\376\027;\241n\3039Rl\035\273\031V\204\2726\033\005;\200a$;\312\243\037;S\361\236;\317\367>\273{\367\245\273\n\255\3349@<\272\272rD\250\271@\213%\273\276\003\254:\364H\276\272\036|\246\272t\343R:G\370\316:\332\322\303\272\376w\003\272NH\026:\215f\324\2712\267_:\"\225\273;:\006/\273-\361\r\272*j?;0\352\226:0\005\032:GB,\273.\217\314\272\010\031\220\272\337\343\366\272\326eC\273\010\034\215\272\234\225\333\272b\351\241\273\342\241\33499\2572;\315>\212<\317D<\273-VQ\273\215\rW\273-\242\205\273\020\312\r\267kF\201;\'\266I\273\313%c\273\247\\\310\271N\372-\273u\331\016<>L\026:\207q\3419\375\024\325\273\024B\365\272\237\367\227:x%\273;\272\t>;\366U.;-\321\314\271{D\262:6X\316\272c7b\273\260s\253\273\022\177\025\273Q[$\273P\332\251\273\303,\206:\002Q&\273f\3510\273\361\332\204<0\224\3248D\036K;\365\316):\250\256\257:\257B\024;9\310\310:\370\356@\273V\032g\273\212\023\246:\'\351\272\272\246\005\320\272\021\205\037;;\251D9\225Q\2719\346\364N9\233v\304\272\241\025\3739\3318\003\271\224\222\2618\356\312\3239C\353,\272\276s\027:\007\332\272\272~\331\375\272\344\320\"\273V\003d;\253\313\251\272\243\036\006<\360u#;\r\014S\273x\016I\273\212#?\272K\332\023<\323\344\261\273\345B}\273\274\267)<\024\334\310\271\254\026\236\273L\224\230\273)8\251\273r@\200\272$\343\301;;\036A8+\027\345:B\366\255\272\360\302\206;\330[!\273\217\021\005\273dJZ;\214\177\207\272\325\2071\272B\254\337\272\r\002\217\272\014{\362\270,\005\246:\253\327\"\272!M\302\272\031\302\201;\265{]\273\272\330\204;\270\207\202\272\221\016\321\272\357I\225\272\"*\227\272\362\r\037\273\353\035\020\273\316%g\272\264\337\253;2e+;\024A\032\273`<\"\272\347\332>;\263\\F:s\177\227\270F%\205\272\314\320\r;H5s\272\236\235\252:\370\3274\273\270UW:p6\321\272\267\005\256:\355\232\270<\242\211\033\274x\214\343\272\377\212e\274-\337\306\273\201U\264\272\366\\\270\273F\253S;\037&D<6\202\026:h\021\371\272\341tV\273^\002L\272\271\367\n<8\004+\273\313\317\352\272!:A;\006}\273\271\341\330\204\272\346j\2319\300i.:\221\373\302\273\356\037\024\273\327e\342\272?s\212\273\275\260\226\273\270?G:\253\220\212\271U\030\302;:R?<r\315\261\270\372\004=\272\032\024\035<\232\354U:vn\037\272\204\232\010\272\221\033\306\273^t\034\272C\203\222\272\177\2179\272\261\320\023\271\022\260)\271\337H\352\271\231\345\231\272\236\316#:\304\336\3669\'A\215:\241\377\2257\335M\020\270Ynt\271\n\241\236\272\355wG\271j\350D\273qk\332:\277\350\0219hu\354;r\216\331\2711b\002\273\003ev\272\333\302\246\272F\336{\272p\3063\2738\351\005\273\324<\264;\217\200\216;\254\206\270\273\016%\226:a\377\020\273\027\265\243:\026\230\260:$\341\013<#\367U\273\373\3441\272\242\341F\273jc\245:\267~\343\272\217n\344\272A\356\017:qB1\273?Y8;\314A!\272\344{0\272\254\347\341\270\034\242\302\271i\276\024;\020\2000\270?\300\177\272\302\n_\272>\334\2109v\333\220:\233\025\026:P\035\335\272e\270F:\3018I\273\271\207\014:bS\003\270\t\271\2057\240\010\337:\345\377\365:\265A0\272\264\273\263\273\260\335\026\274+\261\226<\343\022[\274\330\232\310\273\266oO<e\310\331\272\222q\245;D\370\366\272\177H\325:\023\373\237\272(U?;\035\262\255:\266\257\245;\216\256\250\270\220}\216\272\300G\201\272\271\232\223\273\211\370Q:\241\260\021\273,\241\033\272\204zT\271\243\020\303:\352\300\213:\264\374\222:\353P\221:\225\256\273\272f\355\225\272\3375D\273rH\317:\013q\2649\337\372\220\272\352*\2448L\313\217\273\014b\037\272\002\311\035<\307\273\r:i\362\247\273?\"y\273\231X\221;.D\3259\354A|8\276\330\256:\376\301\020\273\230\206\273:\3210\237\2708\223\210\272\260\005\264:]\254\242\272\253/\034\270,\310F:\236v\317\271i,0\272\275d\000\273\231\314\347\271D\375\332:\231\023\231:v\n*\271\244\373\23490\010\213\271o\373\266\272\252\360M\272 9\2508Eoq\272\223\222L;\217\017\365\271\016\345{\272r\260!:\257n\232\271w\374\203:\251\304m\272\346\201\201\271\377U\344:\234|?8\255>\267:\327r&\273\\\025\315\272fI 9\345\357\3369\016\350\303:\230s \272X\226\206\271\362\013\344\271\0174\2749.\344::\204\353\317\271a\331\2439\034\321\254:\232\207\374\270\260Ag\272 \275\327\272\300O\344\265\014\241\300\272\232c\374:\311A\222\272\332\341\260;\231\331\032\273\351\363\212:\215|B\273n\246\232:B\t\372\272\010!\033;\205\210\320\271\276\270\':\010\361J:\323\274\261\271\372*\2719J;\2268T(X9\343\257\347\272\306\355\274\271cr[;\313\236\326\272:\247\3319}1\337:\360\'\345\271\230)\215\271\340\306\3549\316J\262\272U\370\362\272\000O\314\266\216\323\214;N\257\216\273\226\237\264<0\303\007\274v\315\225\273X\017\034:\310\233\n\271\362!\245\273\305&\225\273U~W\273\344\232|\273\032\212J;\206\265\215;\024\316\010:\201\305#\274VMV\272f\372\'\272\304/\034<\326\262\223:\346\026\322\272\022\035\221\272!J\202;\006\372\006\273\274\364\017\273^\262.\273i\267\235\272\302x\037;\271\273\256:\370\253J;\323W\037<\206\'T:\240/\033<L#\347\273U\003\273\272\267\254\244\273\022\314\24593\321\201\273\220F\371:9\340\233\273\265\331:;\203\031\007\273\016!v\272\253\260\n\270\376,\331\271\013\003S:4\213\206\272\374f\2669\357\267\2509!\227\0339(EC\272\344\022\"\272\252\230\007\273\362\331\242;:fj\272\345nw\273b\340\025;cX\204:/L\024:\231q?\272*\021e;\3161\327\272\344\210\317;c\272\035:\003\212\236\273)]N\273\016\357\360\272\306U\357\272\221r/\272\215wg;r\375!:i\003\244;\241\265\021\273\324;\364:\350\202\346\271k\303\'\272u\250\325\270\\\272\031\273\230c\256\272\364\236\324\271\237_\027;PK3;\031X\326\273t\3178<&W\361\273\2321\205\272V\n \272\020\032K;W\206\000\273*\005\375\272\364\312\'\274\307jD\273\327E7\274\314\316);j\277R\273f{\242<I\036O\273W\320\232<d\004\200\272\265g\034\274\235\2278\273\326~\303\271\363MU\273\236g}\273\246\205\';\240\233-;n\345\2009\250\'j8\026\005!;L\321\031;\356(\347;\306<\005\273\314[K;~\331j;\265@4;m?5\273\t\304\213\273<h\212\272\347\350\224\273Z{\357\272\316\211>\273\322\000\256\273X\335\231\272E-V\273\032\302\316\273~k\0079\253\257\\\273\244FY\273\351!\226<\352\315\353;\206\207i\273\'s\342<\200\301\016\273s\327*\273l\377\241\2738\266\037\273\252\333\2249]\317d\273\361\217\251\272-\344\362\273\201\314\334\272F\253\033\2733\305@\273\331\246>;\231\212k\273LU\':`\271\036<o8M\273$Qu\272\230\030\273:\303\313\264\272m\214\2429\274\217\201;\021\325\220;\314)\307\273vm\323\273\247\026\300\272\246\326\t<\304\323[\271\3031\306\272\21543\273B)\261\272s\235\324:\326\300\235;0\322#\273\207\364\246<\327m.\273\303\\E\274f\323}\273\366I\330\2726q\210\273X\240\322\270\306\177\273<\305\023\252\2737TU\273|\027,<\334%V\273J\243\365\273\335\243\335\273\242\264N\273Up\342\2727\013\033;\002\024\276<\030\357P\273\350\215\\;\274\241\241\273\376\341\025\274\332\033\220\273\343_\326\272\351\336\177\2736\"\257;\024\002S\273\367)\036;\227e\004\274|\232V\273\334\326S\273@\242\014\273d=`\271Y\201\210<\243d\213\273\330\177\201\273\t\273\232\272\225x\305\272\327\021\001\274\233\363\322;k3\364;>\3639\273\0037\241;;\317\221\273x6D;L\343c\271\361\014^:r\306\213\267\223\261\035\270\032\344\317\271\221\324J\2725\027\3619{\325\2459\260~\t9k\324\243\271\251\255\013\274\346\241\244:\0025\265\272{K,\274)\257\221;0\007\0068\331\303\211<Jp=:D\020w:6\207x\273T\301\\:l\375\264\272\321\3222;\264\210\201;7\350\302\272\\b\034:?\237X\273\260\270\030\273y}U\272\355\371\234:\331$-\273\352\306\204\273\205K(<f\201\200<\237c*\273$O\227\273\"\026Q;\306!\337\273\260^\342\273,4\307\2726\027d\272\244\177\270\272#w\353:\301\363-\273\263 \356\273\352\202\203;,\246\363:.\005\362:\307s+;I\377\2158&\240\264;\034\366|\272\256\020\340\272\202\316\034\274B\352\237\273\363|\333\273\n\322,9\344A3\273\3122\025<\342\355@<\\\237K;\354R6:BY\332\272\363\372i\273NW\324;\200-\236\272p1\302\272h\036\247\272\3206\001:\\\224\316\272\305\374\025\270\261\266\033\272nl\213:\212{\025\271\007\321\2349\372/S\272~\270\3709\016E\304\271x\232:\273\377$B;\266\255\311\272\340:\021<\326\375\336\273w\373\363:\336`\245\271kh\036:\350\341\312;(C_\273G\231.\273\036\3546\273{\205\014;\004\361\237\272c\357M\2735\006\006\271\216\217\007<3\375\3037\033*\206\272\360\247\031\273d\257\307\272=\335\212\272\n\035\302\273\236\224\3509\320\013\221;o)\r\274>\265#: 0-;\264\026\316\270\002\0216<~\212\002\272G\314\206\273;.n\273\224\225\3369^\017\037\273\306\305i9\217\022G<`\224\027\273\3748\267\272R\017 \273\273bt\271\314\252\267\271\375\371\340;\023\303k\273\256\321\204\272\t\303X;\340\335\251\273\020\346\211\273\r\n+\273\016\330;\273\037\247$\272.\302\"<\024\223\023<nw\024;%\267\253:\341)\231\273`A\353\273${\357\273h\240\306\271\256]\206;\252L\234\273\261\357\371;\246x\342;\367c\222\272\262\233\004\273\007\025\223\273{A2\273\266\257\034:\344\313\2178l\345\301:f-\271:^x\375\270\2643\027\274\276\304t\273\010\272\330\273#\337t<\324\367\316\273,\305\264<p\037I\273b\010\221\272\350rJ\273L\230|\273\313\356?\274\374\250\243\273\264\331\t\274\005\271?<\235\275\226\274\016\224\253<\303\213\025;\233dG<\364\032\'\273\361M\206\272\004\347\261;dF\3619\207\004\006\274\355j)\273\352\321&\274\202>\304\273r\020\316<\365\251%\273<Y\244\272\216\020\253\271\245TF\272\353i\206\267&\210\341:\316:\300:wL\310\272\033\233\001\272\375)\214\271\216!\216\272zH+:U\213\2409d/\273:d\3342\272i\027\237\2718\354\333:x\334\3219\326x\215\271e\241+9A\311\220\271\252\344\271\272;\273<\272l\363U\271\213\301\026;\332\037\226\272\207\326)\274\004Et9\027O\002\274\032:\001=\237\200\366\273\004\240v\273\254\000M\273\225:\031;\025\330\2108\367\206\362\2729)1\272\312\364\3008\305B\\\272\301\372s:\376\242\266\272\010\3444;\230\367\272\272\361\263&<\233\247\003;95C<G\"a\273\'e\036\273J\007t\273<{G:W\325\241\273\311_\327\27352w\273\037-\346;&t\033\273s\'\232\273\366Lk:.d\343\272K\334\343\272\352A\322\272\226VB\271$f\201;\201\177\3659\nM\305\2727\335$9\226|\2347\361R\263;]a\232;\370ZQ8\205\266h\273\353\356\331\272$\306\302\272L}\020\273\233\0048\273\213\344\310\272X\225F\273\031\253+\273\035cN<\3714n<\212\312\200\273\006\311\315\273E\274\252\273\257\022\342\272N\237.\273\206\263\367\2727\231E\273A\237\201;KT\225;\016\363\306\272\347\235\331;\205\326x\273;\374\353\272[\023\366\271\342D\0279f\010\233\273\327\273\'\274\377Qe\272\260\t\254<C\022s;\000\230\234\264f+\272\273\205\356\316\273\\\271C;\303-\332\273\215r\210\273\307\033\1779*\371\';B\271\021\274\262P\346\272\322\302;\273NN\215\273\214\203\300<\324\245\031;q\277 \273\007\314\312:W\3064\272w\211\366\271\034\246);\342/\311\270\304\320\231:dG\267\272\350\3353:\204\362h\272\023\250\224:\245\275N9\362\354G\272\035\024\313\272\322>\3759G\227\220\270M\364\336\271\n\377@\272\3162\236;\016%K\273 \262\030<Vz):\310b\361:Qm\236\272\332\371\243\273\t\227\224;.[2\273+\315A;\217\221\244\273\211\025\257\273\353\263\207\272\341\305U\272\303\021\212\272\230e ;\337S\235\272\224+F:j\222\0229\305 G:o\354=\272\022^>:I\302\363;\250\306\331\272\254\002^<\277;\254\273\334\271\211\273\005\360\370\272V\356\240\273\277d\347\273\257e0;\231\357\253:\345}(;i\277\253\272\371\264`\272\265\340\366\273X\2323\273e\000\024<\353\345\366\2711\367\352:\307\237\212\273\000\212u;\277\003\016;\335\223=\273\024\222\351;,\354\354\273@Z\256\272U\222F:R\200\220\273\316\342\350;\352k\'\273\251\347\244:a\357\353\273\246\306r9\343\355\022;A\233\355\273L}\236\272\216\244N<~T\361;B\250\035\273\330\204\267\272\031\227C\273w\365x\273\333\363\2379C\264\206;XT\311\272\316\355\221<\013\016\361\273\350hS\273\010\332!\273\345\257\333\272\277\376\r\273\311\204\354\2728\202\207\272y\221\346\272\2249\2317\367B\277;\030$\275\272\227iK:x\203\307\271m\262i9\177\371\342\271\326\005C\272\036\261\201\273\000w\257:\311\356+;o\252\t\273\r\311\013;\272\356#\273\320\013\370:\320X\006\273V\234\\;0\337\326:\010\312K<\314q\232\272\221x]\273wz\201:\276\363\266\272\337\256\374\272\203\037$\272r\246j\273\001$C\273\010)\323:\t5\213\272\265\325d\273\362\027\025\273\272:\265:\314\271);\351%\304\272rO\037:IpL:\342~\261:\207G\026;F\360\"\274\233\002\322\273.\253\224\272\237\266\004\274:\373\255\273V:\273\273\204\027\311\273\330g\242\271_\371&=\260^q\273+\244k\271\021\233U\273\307\t\333;g5g:\325\233\360\272Z\273\034\272\021\237\023\271\255\360\034\2737\275\225;;4\036:\377\352\240\272-%\332\272\200\022\2408`\375<:\247\320\307\272$\\?\267\016\201\0076\235\242t;\350\2069\2724o\362:!j\000\274x`\241\274z\266i\273WF\215\273\324.6\273\'\375\202\272@1A=m\325\272\273\363\001\207\273\222\311\274:\\N\n\273\001\307T9\020\323\321\270,\236/\272)\323\262\272\321<\200\273\236L;;\036\035\034;\212\031\247:\243\211\035\272S\216\365\2726\212T\273C\200\217;\214b4\272\375L\301\272AqF;\207\243#\271\\\335\014\272A\345\230:\370\347\004\274v\230\030\272a)J\273\273c\275<\377\317\255\273\216\267\315\272X\t.\273z\374H\272\325\027\007\272/\221\n\272\275\303\213<9FJ9\254\362\376:\216(\256\272\230: \273\246\235a\273\240\351\226\273c\255\271\272\t\226l\273D\017\032\273\376k\235;E\250O;\260\0054\271\263\026\221\272\223HR\273\215\000#;\345\216u\272:\r\354\272|\367M\273\265X\301\2700\005\263\273\223z\034\273\244\002\315\273\314Q.<\360J+\274\325\336\316\273`P\203<\235\355\";\014 \221\271\323\337\002;5d\007\274&\016\344\272\305X\017\274f\300y:\340\213\224\273\243h\232<\276\233d\273\342S\017<\342,7;\227\301\225\273\205\212\220\272\272\231];\366\263\343\273\266\222\304\273vO\205\273\304U\013;<\233\214\273\270\273\211<\363\233\331\2730\211\334;T\363\366;\220\220\2349\002\264\232\2717\314\315;\207\204\332;\206;\216\273\005\217\017\274[\316!\273\325C\002\273\222\376?\273\335\242\250\273\374\250\206\273\014\266a9r\203C\273\3312\331\273z\274\313\273\303\332\033\273xs\262\2732\311\022=+\363M\273\355\361\353\2731\310\032=G^\340\272\326\024\3569P \236\273\221q\311\273-\270\032\272\030\363\266\273\367M\313\272\0215.\274\266\234\304\273\227\305\204\273\360\017E\273\034\304\271\272n\252\035\274\217]^<\223P\203<:\023\202\273\214U\375\272H\265\3219\311\2658:P\300\215;\224\336\276\272~q\363;\247\332s\273\352\'=\274\302\260\026\271\331k\006<\275\375\026:\255U\217\273F4\320\273<m\014\2735g\002;\237\372\022\274\220\001\225\273\204BB=\232\346\233\273+\220V\274\217N\212\273\253\220\256\273\322\234\277:\250\344\261\272K\223\365;\354\346\303\271\345\014\242;\343I\2449\016\336\223\273g\373|\273\004e{\273&b\220\271\212\3023:\240I\020\273\264\270\333<\'K\t\274\313\n\002\272\250\245\002\273\270\375\250\273cb\206\273\243\\\250\273\024\212\'\270@i\037\273\276\257\343\271&P\025:\273\341\261\273\315\312\023\273\367\267\3669OS\361\272\262\224\35597\231\322;\214\203\222;\342\023\260\273\257\201\244\272\227 \275:]$\215\273\3101\226<V\325\310\273\200\306e\273^V\027<\323>\223\272\033\333\357\273\33611;\204\021\206:M\303U\273o\010\356\272%\014u\271\0011\035;\235\316\3119K(\256\271\223\240\222\271F\320\031\272\213\016\016\274\250\303\2157\230\374\222\273.\260G\274HR\316:\320\242\253\2716*\001=b\005\342\272\376+\022\273\000Qe\273\334[\225;G\346Z\271^\221\261;a\265\215;o\271\223\272K8\372\273\332=k\273a\220M\2737\274\016\273\344@j;\010\302K\273\332W\216\2739w\321<\302{\343;\340\211\277\271\335\360\036\274T\004\362\273\372\236\025;\360@\255\273\230\317\226\273Xr\030:\255\224\215\272\357\307\276\272\336e\222\273T\221\335\273\236\302\213:\373\273\017<\023\345s;\237\2229\273o\334 ;\236\376\330:h\236\014;;-R\273\357\027\313\273\210\274\\\273OWR\273\214\351f9\356jp\273\214\252!<\255\322\274;\022*\334;6\324#\271Z}\003\272*}\261\273`\223\226;\016[\350\272\324!\206\272\332\350\204\272g\316\371:R\230Z\273\227\344q\273nL\356\272\021c\343;\321YF\273pF\023<\013\313\3438\212\251\200\273u\200\006\273\350kQ\273\246K\333:\3606\271\272M1/<\361\375y\273\032\357s9\237\314!\271\346\021\366:\001\021\266;A\274\206\273\016\341k\273\270\217\254\273\214\202\3758z%&\273\254\250\235;\364\317\247\273\224\267\324;\035\000\2209\2653\256\272\351\274\031\273\335\000\2379S\302/\272\003R\341\273\317\302\355\272\202\276\254\273\320\010\005\274+\356#\274M!\236\273\320l\221;Ot\034=\365|\342\272\231\337\202\273\353u2\274S\036#:\020R#\274\033>\253\273\321\367\r=4\2157;\377\3070\273\242p\233\271\377\254\027\2730)\333\273]T\003\273A\022\367\272\271\326\346\272\363\203\2448\251=\n\272_\267\312\271kJ\277\271\007n6;N\034w:A\277L;gz/;\310\372\037<v\206\007\274;\210\233\272\323\2167\273`7\332\273ue\231\272DO\306\272M\235\336:\276\200\364;e*\2369\t\006\2209\212z\313\272\316_\362:\344=9:)\273u\271AV\'\273\325\200u;A\204\300\272\304)\214\272\234\211p\273\246I\243\272T`\230:z\203\336;\354\216C\273\010\256\306\271Z\251\337:\272\027\351:*\020\031\273e\321W\272\346#\2617k\"\236\273\257\364\307\273UiP;\"\316\337\273f\021\367;74\231\273\310&\037<\nO\236\272\313\320P;Q\207\320\272\362\341\2648n\351\211\273\036\366:\272bG\023\274\365\324\n\274\024]\341<\013`\224\273\266 4\270\004\031y:\322\305\021;\205\256\311\272\361\323X:\302\337\224\272\235\226\004\273h7\312\2710\275\034:\236\n\005:\203\242\240\271i\230\241:@\350\351\2662\220X\272\205^\026\272\342\213s;\267&\240\272\245P\017;\372\306E\272\353\033\213\272n@\004\272\323\"z\272\362r\007\272p\331\2166\016#\306:E\376\345\273\323\252\017\273\"_:\274\336\300\006=\212u\000\274\342%S\273lj\023\273\311\261Q<\2115\207;\013*\033\274\362\316J\270\020\272\303\267\211\357\266\272\341\362\302\271\"\225\'\273\366@t;\220V\336\273\"\212\356;8 \005;\302\324s<7\177^\273m\010\016\274\342x\217\271\372\302+\273\255Z\'\273\303@\237\273\237\260\356\272\236WK\272\241\021\234\273\206\304o\272\2165?:)\3703\273~\304\"\273\032\206D\273\312\327L9\311\353N:\313uT<Ncn:\004(\236\272\265\272d\273\0238\325;F\241\323;M\021\322\273\264\373\350:I4Q\2731\316\221\272\347\313{\271C\035\'\273\277\t\232\270\244\212\273\273c\345\204;\177\355\275<(\337\003</\\\r\274\356\243\025\274=\306\320\273\264#8\273\205\235\n\274\317\242h\273\024\374t;\032\365\007<\3151\013\271\327\370\004\274G\302\236<\342\315\233\273U2\207\273\0133\025\273\036$\3319\036\364\277\273\270^\215\273\364\227\n\274\326f\034=+\210\000;\321\307\322\273\357T\360\273M\230\370\273\335\252\026\272\274\346\315\273\214\377;\274\333O\n;|\016|;\257\224a\274\0018?\273\036u\025\274\257\241\257\273\320\n<=n\235@\273\274a\241\273\032Po9\335y\356\272Z\205\001\273\327\021\020;U\233\272:\373Z&;\206\026\035\272\323\232D;\231\334\217\270gn\240:\256\205\030\272\351\241\370\272\362\213b\271j\370\266\2718\342\236\272R\214v\272\342\246\2109\235\247\322;\350\0142\273\305\307\373\273KM\213;@\300\232<\261\307\302\273#\361\\\274\343$\001<_\224\006:\037\360\323;\376\375\215\273\022\353\326\273\337\001\265\272\253\324\342\272\3759Q\273\"\270\204;\\\021\304\271*\375\t\272j\246*\272\255\346y:C\234Y:`\177\004;>\013\246< \200\036\273i\000\014<p\232\007\274\274\202\301:\205P\251\271\341.\007\274?\261\364\273s$\013\273h\215\273\272\241\034\004\273f\273E\273\244%h\273v\r,\2742S\203\273\037+\327<\267B\032;\370\037\205\273\375\025v\272\246\361\013\272\313\244,\273l\265\3659\374!>;e\314a\273\000\242\230\273V\221X\273\276\310(\271\333\034\256;.\216V;~2\023;\312\216\007\274\202\301i\273\232\nV;\000\246q5\351~\331\2729\255\032<\367Fu<\300Z\034\274w3_\273\031 \241\272\236\025\267\273\232%M\272\007\036$;\\w\215\273-7J<\234\255\316\273\217W\356:\217\357\264:\206\233/\2729\307\307\2714\346\275\272\214{)\272.\223\020\273$3\371\271s+\347:\213\221Q:c\010/;\242\256\3338\0251\222\271\023r\224\271!\205\275\2734a6\274\244\327^\273e\301\245:\246\246[\2742_:<\254\244$\274\034e\014=e\353\250\272\003K\363\272a`\370\272\307\2057=cq\244\273f\221\222\273\370;e\273\202g\017\272!k\251\273\330\336\230\273\"\205z;R`\277\274*\300\026:;\017\341\271\007\241v\273V\030\036\273{s\':\3430\361:&\000\3039U\366\236:\341V7:\354\t\246:\016\320\226\273\300&\257\273\272\267f\273\350\302>\273\205\255\276\273\222\314\230\273l\340\\\273%\355e\273\214$\271;\026\034\346<\302%\211\272:@R\273\205y\346\272@\031\267;\302\327E\273\214\264\032\273\010G\263\272J\232a:*\"\036\2735\362\016<.J&\271}\222\330\2723\2425\273\207\353\n\272\206\252\005;\010\325]\272\206\374\253\272^~\207\270\007\n\247;v\275O9\353\330N;#F\270\273\344t\003\274\330M\364\273G\237\010;\307+\223\273\273{\r\273\017Y\270<\0075\026\273\034\303\026;L\201\023\274\356\322)\273qd\306<c\n\341\273-t\204;@\255\313\2738\240\n\274\022\203\307;<\305e:\036\273\010\273\266\273\201;iFI\273\253t\334\273\321o\214;\241\024\237\271RNx\271?{.;\370RW\272\333t\252\272\377\247\313:\217Q\013\274xP\214\272\336\321\204\273\206\333d=\030c\225\274\014\033\277\272\257\022\017\274\211Gj\273\020]w\273X\010\324\273\003\213\335<\277\026\232\273\360\001\270\271\311\312\205\2731\206\255\273\001\027M\273\226\375\210\273B\034n\272\031\376\317\2726\3607\273:>\225;\241\2672<\206\361\037;\235\005\000<(4\342\273\004\366g:9\234#\273\261\246\325\273\000%\353\273f\304e\273\235\361\022\274\355\377\316:<}\005\274\273m\032;\214\206\'\274\263\350\r\274\220\000\342<<\257w\272S\255%<\261\241\231\273E\367\307\272\321\236a9_n\3109\306Wq9a\034\275:\267\273\217;\006\350\320\272\244|\203\273\2531\371:\214\345\272\272\322\016$\273KG\207;\233\013\300;*n\036;-\241\007\274\211\233\203\273%1_\273V\206X<\353L\234\273\312\227-\273D%5;q\033\020:\206fZ9,\323\276;\342@\221;*\207\344\273\032\354\207\272\363=\211\273\340\351P;12\234\273\240\344\026;\345\007\345\271\2529\212\273\227R\350:\303\215S\273h)\243\273\274\271(\273X\357\210\273\207n\245<Kz\230\273 \353\266\271V.A<1\013\312\272<7\311\272\316\267\033\273\241\024\346\272\365\330w\272$$?\2718\317\\\271c\303=\273\250\212\365\273\255\340z\273\320\023\210<\351H\354\273I\207&\273\'\303\003<\212b\237;\267\214\253\272\272c\211\273+\323@\273p\270\300\273\363\0305<T\271\307:w\200b\273\327\037\373\272\346l\241\273R.\326\272\252\342\377:\217\320\377;\\0\224\2736\226\273\273W\217\365\272\r{\207\273\217\013\2659\2674\253\272@\222\224<\037m\302:\227\264\251\273+\311\261\272\315\275\314\271D\221?\271(\0236\271e\342\035\273\335^\316;\272\325\013<n\347x\273\"\016(\272H\r4\273v\371\246\273\2363a9\n\311\201:P\355b\273utk<\311y\024;6\277`\273\215Bf\273\220F\036\273\026\311\307\2720\014\315\272\227\037\341\272\216\303\245\273hRe\271m\271\244;\007L\3779\353U\272\272n\025\231:z\335\017\273I=\352\271U6\262\271\005\214D;\261\365U\273HJ#\273O\022\343\273s\315\315\272\272\255\267<\300\357\351\273%\303\204\272W;\3359\t\361\203;u5\222\273R/-;9z\0169}7i\273\205\036\230\272\330\261\365\271\315\204];1\000\201\272!}\3059\366\250w\271q<\005\271<\311E\273\262\354M\272J\264#\273\250c\250\272\020C\22471]9\2730\3569<\246\224K\271\242\314]\272@[\2728`p\256\270\3468\204\272\316n\212;M\2313<\257\301\007\273D9\350\273\220\371\223\273\311`~:9\022$\273\035\335\221:z\332\250\273\274\325B\273O\004\211<\035j\255;F\332\353\273\276\010\372\273\354=\017;\2460\265:\312\225\211\272\327b\317\272\\\326D\272\004e\223\272\336L\r\273\020\245}\273\203\227\000\274|\223\200\272\340\340\215:\361\370x<\306,1\273u\332N;\221\353i:*Z\343:V\310\034\273\001\377\016\273lU\242\267f\021\30692\327\305\272\255\032\313\271\034\265\350:{,\340:w\022\270;z8\2769\364\331\31181\327\216;M\350u\273\234\203\246\273\017oI:@\254\347\271Q3t:\250l;\273qJ8\273J\303\000\273+\177\034;Zs\3137\202\323S;\262\203$\273\260L_\273\177<9;f\335\2069b\334\000;\257_\342:0\231\204;\315\263_;\007f\013;!\340F;\332K\2049\266\241\264\273\271\035\220\273\177\343\203\2736\177.\272\002\256\233\272<\263[\272\253\020M;z\365\220\273\344\200\3759\234\210\322;6\355\326;\3341\340\273\223e\001\273\212h\256\272]7]\273\030\344\002;E_\001\274\0043B\273{\342\362\273\274.\355\273\352\356\003<\"1\265<\253\315\315\271Je=\273\223\351\260\273!\236\016:\304\346\211\273\037\367\305;\374\351\030< \376\211;\203Q@\272\372\235e\273N^\033\273QD\200\273\254%d\273\203\023\327\273\177\304\305\272F>;\2737\007o\271B\251\310\273\2103\2239\254\016\205<[\336f;\317\327;:\037\230G\273\324\315P<\344D\222\273\326\376\345\272\374OH\273\214\332\036\273\362jE\272\364\362\371\270\317yL\271Z\006I;\317\221\026\273\206\337\262\271\235\013w\273FJ\344;\235\264\371\271y\007\244:\374\te\272a\2545;56\335\272&n\312\272#\243\206\273\255\352\026\273W\343\333\272a\354\"<\203\266\302\273\232)\022<\335\204\216;4\364\005\273H\211\237\273@\313\024\273\226-\322;\226\215\023\274\354N\010\274\343\377=\272\310b\317\273\332\037\226;PX\223\273 \317\033<\313\273a\273#\223A<\034\200\t\273\355\220\302\272\t\010\';QS\';K\246r\273\304\364\342\272f\211\322;\3715n\273\355\320\230\272p\031\026;\037\315\244;\354U+\273@\336$;\n\014\252\273\262a\226\273t*\342\267\317 \347\272\241\310|<66]\273co\261\273\263\245\237\271\311=G\273/}\004\273\020\273\246\272\212i\031\27390\003;\334\r\177;\205\013\276\272\'\206!\270:\342\225;\'I\3409z\034G\272Y5\201:\306\361\302:+\206\373:\367\324\020\273\026BA:\007\265\325\272\331\010\345\272\274*O:mi/<\2009\222\272\247\274\322\273\025 \221\273\026<\373\272\320L-\273\346a\370\272\213)%\272\336\222Z<\201Z\243\273.\325\014\273\004\240\250\273\350\216(<\023\363T;\350#\226\273f\263\23177\004\267\272\223\001\240\272Md\216;\315J_\273\252,\315;/>\027\274\376\315\310\273\262zK\274\240\215\257\273k\335\376\273nZ\307\272\206~:9\006\3034<\251~\314<\t6\002\273\311fC\273\212;\200<*\332\177:<\232\t<,\256\273\273\27665\274#\010\n\272\365&\336\272D\244\211\272\320\250\204\272\364\351\365\272\264\341\235\273\220\313\264;E\207\031<\330\24239J\026\3169\344\362\221\273\215>;\273O\000\344\271F\033\232\273\341\n0\273\344\021\353;\356\352v;\177J\305;W\360v;\245\327\004\273h\337\274\273\217]\017\273\260^\\\273\263\270\"\273\221\273\225\273\266\016\035<\344\353\030\273\224\331\235<\263\r\002\273\240\212\332\273.\205)\274\362\030\2459r\\M\272\232\341I;\224\310\327\272D1\353\272\006\243&\272\246\223\220\273\201W\016\273\245\2439\273`Z\260\272\367Fj<3\210&\273\376X)\273\242N\005\273\335\260\345\272\221\005C\273\307\213L;\n\335\226:\337\227\002\272H%\240:\006G\211;w\t)9\016\0052\271\335\340k\272\353\221@\273EJn\273\255\025\235\272\020c\3278\330\352\0009j\272\262:\004\240\014<\362N\256\272\177+\354\273\243DJ;\024V8<\342\254\0328\334\301`\273=\374u;{\220_\273\343\n\263;\365n\003\272!%\024\274\310\206;\273\313\211\257;\030]\342\272<\340\354;!\306\000\273\364\n\367\2737v\010:\263q\217\273\327\334\023;G\246K;\306\256\206;\240\357X\273\205\312\212;L\t\330\272\277c<\273\260l\3525\333~\235\273\304l\001;\001/\010:\226\232\353:\006x\021\273\360\3512\273\252q\214\27348\214\273#\321\237:\345\206\203<,v.\273\034\355\347\272\177L\260\272\377(\004;k\320\243\272:\341c:V\366\003::)\374\272v\204Q\273\333\362\014\273\336\213\237:\347\024\225;\000\250\374:\'\026\017\272\254F\022\273-\375\020\272\223\332\347:\307 \020<\356\200\260:\267\242\007\2740\341\203;t\366\212\273\232\2478\273\0062\017;O\270\202\273\211\341\021:\364+\0079\352\367\306\273X\260=<\273\"\026\273\351\306\022;\216 \3108_\260\341\271y\366\355\272\323mT\273\020|\253\272\337\014\240;\215\243\224\273\252\374\204;\007=\2019\317q\253\272\341s(;;4\026\272\277\326O\272\212o\332\272\037\207\\\273h\000\020\274\302\352#9\364\347\323\273\207\201D<\251r\354\273\213\263\272<\002\267\214\272\010\210\320\273\207\235R\273\232\275\305<m\262/\273-0\216\273u\335\205\273pw\202\270\244@\352\271\374\260\357:\343\331\266:\363ZM\274\370\nA:w\201\2509~\353S\272\206\3537;\216j\2647\266\367S\272\350\241\376:\027\226h\273\223O\303\271\260\372\227\271\272\363\016\274\304\322\262\272\302\'\026:T\371\226:\tM\267\272\033YZ\273\342?\225\273\234~\016\273u`\035;\352\271\216<\356\325\360\272\242Y\232\273\254(\227\273\313\n4<\202K\272\272M\006a\273\223;\202\273D\033\001;\351o];c\\o;\346FZ\273\215\351\260\272\233\017\353\271`\021\014\266\026h?;\370\327(\273N\334\243\272\262\361\037\272N\220\264;\365\364\224:\360\014 <\362O\324\273\255\034\275\272\023\320 \272\264\361\233\272z\017\265\273Q\207\252:\215\223 ;\303\332\300\271k\212\t;\374\360[;Co\004\273\014\366\247<\013\261C\273\235\2711;\236;4\274vr\003:n=\036\273\340^\013\273?\306\325\2731\321\200;Z&\030\273n\307\373\272\031\263\225\273\025\027\272:\027\311N;\312\354\301\272\373\027\3439\220\t\206:\266>\2549\022B\374\272a\210\267\272J\022\030:\376\002\311<A\213\323\273e\300\t;\311\273\005\274\267\215\273\272\360M\243\273^\217;\273\344\273\232;\021\232,\273\223C\203;\307\320\350:|\021c\273.\033I\272g\251Q\273E\237r\273\346\373\002;m\257\245:JY\024<\025\226\303;\377\234\203\273\277\315\002<\370\205\323\272\0207L\273nu\203\273\227\333j\273\342\314\257\273\000h\252\272tO\247\273$i\030<\020\334\207\273\234(3\274\013\307[;\t\256\244\273\306\312\325;\230\346t\273\272\314\236<\301\212\035\274\217\036\203;2\357\r\273\032\r\t\273\300\2627:8\037\2129\\C\366:\226m\005;\226/\024\273\254\004)\272\373\037\341\272<\342\366\273\t\321\300\272$\031\016<\323_F<\307F\004\274\342\t\251\273\332\241\t\273]\3301<K\226\037\273\255X\237\273\24347:q+p\272}\2211:\306\274A;\"\307T\272\257*3\272\370R\254:T\361\364\272U\256\215\272\211\005\226\271 \370|;\325\034\025\272=D\264\272 \\/;F\247\031\270\266\321J\272_o\251\272\312EU\2732\222\3708\032\244):\341\273\265;\346n\013\271\205\304/\273\364\314g\273\222J\032\271\017\371\r\272\311\207N:\322\223a:S\"\3439}\336\031\272\n\355\354;\256\204\356\272h)\214<\005\336\264\272\031|\324\272(\214\246\273^\037\020\273\260\334\211\2734f\001\273Zo\306\273\350P\026\274\231|R<SI\027<-\370-\274\2619w\273FO\226\273\227\313\'\273Z\3573\273\212R_<J\252\023\273\244X\206\272\321\311\274\272\177\224Z\271\200\303\'<\251s-\273\335\023\311\273\034T\232;\2158s\272\370V\030\273\226\262{\271v\251\212\273\221\226\t\273\347V\034\273\215\225\017<!<\356;\300L\3277\365\257!\273\262c\034\273^\216+\273\203U\0069\214\361\3019\276\370\260\272\273\255\262:\200\223\035;D\364\254\272\327\317\340\272N\310\221\271_\333\2639\203\265K:\302\347\033\272\256\330(;2\2520\272\314D8;\244\336\025:C\356\016\273q(\214\271\037,\020\272V\206}\272\267R\005:\223\276\357\272\327\202\3079V\347\300\271\247\342\205\271\346\323\222\271\224\233n\271\'&\2438\205\317\006:\354\276\3049f|\231\271\035\367\2368Wc\3429rD\2359\365:\203\272H>[:L\224\234\271\307\240\0208vC\237\272\001\223\324:c\n\006\272\311\303O\271\020\'\007\271D\177+\270\000|s\271\334\006\3579\\b\226\272\021\005=:\344\322 9\210\311\3439<>0\271*\320F\2707\001u:R\260\260\272\270$\306:\027\031\r<\217\256\244\2729\366=\273\035\034\270\273\322\224\231:\020j\"8TZ\226\272Y\244>\273\022\020$\273\025\226B<0\327\313\271\034\\\335\273D\334\027;P\3116;f\357*\273\003\005\357\272(\"\2676\276\337\201\273gv\177\2728\212Z\272\234Io\273\010\000s\273\332\245\002;\220\n\023\273\361\233\031<\312\n\303\272\322\001\264;\035\205\254\271\330g\3748\362\330\326\271\220\307\373\272\003\2368\272\t_\034;.\271\300\270\327b?:\022\014\231\271k\r\006:\320\016\\;<\371\210:\204\371\214;H`R<\251=\345\273\025\376g\274\255\251A;\020P\314\271\312&\246\272_0\335\272L\032K;,\250\3119\200cK\273\226ot:\352\300\302\272\025i@\273\372\346Q9u&|;(T\003\2738\242\213:D\361\021;ph\200;\260J\213\272\013\315\214\272P\321\200;)u\372::\017\"\273\007\357\223\2739\227\223\273Y\020\327:wu\211\273\350\232p\272\370K\2729l\206\315\273\362#\246:\324{\360;\267lN<\025/\276\273\255\2632\273\222\253\327\272\301\276%\273\337\356(;\214\010\234\273\375\031\200\271R\021\263\273\367\247\243\272\237\034\017:\370\210d<XB\336\271\353G\035\273G\263\000\273\231\261\230\272\212\246\026;\217\017\312;Fh\"\272L\376\233\273\267~\334\272f\001\247:\272\263\3349tN\0018\272\260\347\273\034\3378\273\256\373\365\271\340\223\244\273\261#\\\273\303\354*\274x\316?\273\223\326\374<aS\002;\277\353D\272\033/{\273]\034\007<\356\203\306:\265$Q\272\352\322N\273\036\322~:\277~\254\272\203 \202:\204B\325\271RE\021\273Z\212=\272/\177\236:-\316b\273sa_;P\216\034:\334\254\2449\324D\250\271\2079\300:\336\311\301\272f\261\204\272\214C\222\273\004\325\264\273&\206\373\273\n`\217<\206Y\002\274V\244\212<\227\227.<\006\207\026\274\272\210\353\273O\260J\273\2346~<\325s3\274\244\234\315\273VzJ;\335\264\370:/A\235\273\2735+\273\210\361\244\273d}\212\273\321{[<|\233\016\273\354\317\027\273]\003\300;\351\266\370;\324\016\305\273M;S\273\233\2003<\024\306\204\273\033\200\202\273N&-\2736[\304;\2372\312\273\314\371\':h\210\326\272\325\373\020\274\021\t\207;\227u\303\273s&\272<c\222\240\273\340(\303\273\357&\274\272ic\264\273?\240\215\273\005`\276\273\3711B\273[R\314;\217\240\337;T\346\372\271p\210\t\273\013\203\034<:r=;?\363\223\272A\376\254\272\004\215\321:8\267\204;(\177w\270Mf\363\272\331l`\271\024\347\035\273\334^\313\272t\365.<\221NX\273i#7:\026C\201\274\031\265f\273a\037\360:\363n\001\273\311R\255\2730\222\230<\025\017\001\2737`\214\273\326C\203\273\245\342k;\325#\030;\335\326\341\2727W\273\272\371\352`\272\270H\211;\221\265l;3g\272\272\200[\222\272\307\254\353\273b\360\372\273\026\365\270\273\\\255\002\274 O\340\273\352B\330:\025\344\2349Y[=<P\317\273<M\302\254\273uU\331\272\r\017f<&K\230;\373\255\245\272\245nE\273\276\347\033\274\033ZN;\360I\025\273{b\230:\360\276\022\273\022!\274\272\233U\245\273\315m\210\273wi\023;_\014\007<%\257\237;\325-b\272a\362\351\272F\033.9\321b&\273\217\200\2577\251Y\327\2739\327\202;\014J\022\273\305\332W<\316D\276\272T\227Y\273\334J\254\271\033UC\272\207Wl\272jP\241\273\204T\2679\260\242\022; \246\203:\365\031\247\273\002\306F:lo`\272\3215G;v\337\217;\306VC<\\\250\007\272\314\266R\273\014.\252\272%1\022\272\322\0315\273Kt\024\273u\252\352\272\035.\212\273f\024\232;\003\265`\272^\3315\273\310\237u\272\020H\000\272\223\001l;\226t\2327\304\2438\273\021\033}:\000\303\032;R\220s:\243`\262\272\226o\331\272(\370\020\273\215\263\205\273\222\032\252\272\220\021\302:\307g\007\272\376\321\222:\204\205\020<=\312\232\271\020\177\036\273\234\231\216\272\235\'\315;\203M\353\271~$\3439\026)\247:(\314s\273\357\242U;\305*\202\272,\324\'\273\251eh\273g\237;<5\207\234\273\354\213\365;MC\205\273\262/\002\274\352\033\342:\264U\302\273ez%;\372FI;9\270\006\273\0078\217\272\232:\320:\374n?\273uF\331\272\256\222$;\273\217\307\272$x\002:\226\363\333:\202\346B;^\327\354:\367\317\024\273F\336%:\247\221\330\2722\216\2619\356\206<: \316B\271\343\177+\273\210\037$\273)\327\273;\352\217\325:\246 \262\267O\270/;\177\267\215\271\320.\357\272}.\250\271\304\362\240\272H&];\\\'\272\272\020a*\273\242\"7;|\267\037\272#\344\3148!\"Q\271|\3246\273#jL:\376l\362:\006q\003\272S\313\254\270n\207\257\272\001\001%\273\372\351h\272\233\255I\272Q0\225\272\221\034\033<\263+\246\272\263I\222\272\022\001-9^\211\343\271\357\256\304\272\370\2674\273\260S5\272E\256\272:\376\365\367:\213\031\t;r\314\022\273%]\021\273\356]V;Q\034\205\270\000\2519\272:\364Y:\201\306a\272\330G\2549\266\375\007;\n\005\211:\210?\013\273\310\2019\271`\202[:\232\037\207\271Ts\336\272!\026\024:&\226\306:t\374\267:\320\301]\272o\3674\273\314\344\021\273\214\265\22091\010\030;(\260\'\273\r\217\027;\376\010\2719\226\303\311\270\230N\2609\305\272U:_\217\3378\347\250\350\270\273\361-\271ycv\2720\013\3128\320\330\316\271f\267\353\2723\261\330:;\255\244\272\372-I;\325vs\271.c`\273\0149\340\271\023qS;lFv\2728\340\0069^\376]\273\343\017c:a\036\017\274\377v\246<\373\212\232\272\253\027\255\273\317hL\273Z\025$\272&A\004;\004\211e\272\003\262\001\274}\222\r\273\365#\231;\334)\206;6\004N;\'\216\365\2733\317\371\272\257x%\273\337\324\035<^\036\3579\321\346\001<-(s\273p\365\355:r\210\373:\214\223\340\273H\347\235\273\026\200M\272.\303\000\2714\215\222\2727;\274;D.\347<\301\262,\273 \306N<\342H`\273\314\232#\273\224\245C\274O\356~\273\371\323\350\273\234p\276\272\344c\001\274\362\021\305:\265\033\033\273\206\206\332:\300\024\233\273[\335\224:\274\331c\270\310]\202\271\304]\323\271R\322\352:\327\317\337:\351}\n\272d\345s8\322\274\222;<\312\017<\243\323\302\273\207\212\325\273sCB:\213\263\222\272qb\020\272\346,\037:\254\025\367:\226\226\021\270\244\335\244;\\T\205;NTF\273\025>*\273\254\nS\273\013\2569\273;)\224\272tn\363:\'\207\317;\223\315N;T\350\317\273-\226\020<\006\270\212\272\336\240\257\273\017\217l\273a\036\214\2727\210\353\272\271pr:\256\256\240:\375l\317:\220\274\016\270\234\244\326\272\341\312\033\273p\242\243;\316\332\313\272\014\364i\271\246@\273\272W6\017\272_\310\257\273\302\361\262\273\202\210s\274\375\315\263<m\327\010\274SOJ<\002\367\370:\t!&<\265\237\220\273Z\036\001\274p!\335\271\254\237\036;\306q\027\273\321?\004\273\221\227#\273g\315\211:MZ\322\270\234,\034\273\252\371\241;\335e\254:\026\005\356:\033)z;\265r-\273\013\0277;Qc2\272R\355t:}\030\336\272\360\033V\272\377\037\243\272DO\025\273\217\300T\2736\257\342\272\031\026\205:\342=B\273\205\330\254\273\330]+:\\\020\027\273(\324~\271\207+;<\036\364.;\337\266y\273\004\354K<\014{\263\272\374\3210:\262\032$\272\017)\210\273S\322\337:\272r\325\272hZ\325\271S\2349\273U\260>\273\327\243/\272\000\347\3044k\2718<\004}\320\272\027!\013\273\221\024`:\013\006:\273)\010\200\272\327\333\205\2726\027V<@\256\017\266hR\'\273\227\007&:\307\024r\273\"zq\273Xj\333\271\307M(:\r\336?\273)C\213\272\361\335(\273(\005\256\272\225\200e\273C\366\267;&{7\272\014\355\3709:\320\030: \334M\273MF\205\273\230\232\r<1d\300\270j)`:\026BC<\322\373\311\273W\306\037\273\276\00739\313\306p;\263p\325\272<\254\206\273\0218\017\273\300\330\261\271\251\372\000;\3710\221<\232ON\274\323\361\264;\237\177k:\311\263\310\272\033/G\273\372\244\206\273\301|\223\273\250\000\366\2732@5\273\354\240\334;p\265\021; \334\273\273\277k\342\273\372\226\n\273\311\202\360\272\264\205\205<.\t\323:~\276D\273Q\017\037:)(\340\273\322=\273\273@\274y<\324\233\306;b\272\301\273\214\320\201;\265<\267\2732\326\233:\345\020p;*\313\3539\253\272>\273P\316#:?v\353\272\262\t\003\271\"\347c; \033\202\272:y\212\272\316\350\256\272\327\035\023\2738C\241\271\327\346\257\271KJ\220\272\274\226\333\272\205.F\273\337\322\370;L\242T7y~\260:\343\273\212\271\211;!:\272H\217\272\010\014\351:L\373\34798o5\272\036\371N\272\031\216\022\273\317\377\2449\301n<:;\362v:7\021\003\273t\035v98\244\303;\216\340!<\325\367\007\272\r\247\003\273\247\026#\273H\223\202\273\3133\007\273C\005F\2738&g;a\350,\273\373*h;I\027\2409Y|#\273:/3\272sQ\231\2729u\014\272\263\242\007\273+E\021;f\364\006\273\025%\276<\035I2\274M\361\315\273$y\350\273f\305(\274Fsg\273R;\371\273!T\253\272\277\315\323<`\374\260:\342;p\273\263F\315;Z\347&\273\270@~;\2151\265\273\3343\007;\343\tj;\211\0043\273\022\377-\273}\325\232:OI\365\271\216\2648<\330w\223\272\312\277\007\2721\376!;\3626:\273\244\'E\273\357\213\227\273\230o\031\273\236\320\246:\223SO;WW\255\273\310b\263\272\277\221>\273\362\177\315:\304\361\001\273\271\362\363:\032#\264\273,m\026<\336k\210:P\363Z\271\227\'\t\273IN\034\272vf\241;r\370\242:\333\004\347\272F\235\222\272\221\256\213\272\322b\277\271\210Z\007\273R\327\252:E\365\024;R\036\0219.\005\205\271\031\336=\273{\273\030\2728\002\274;R\01348W?r\273\004\024\"\273<_o\272\021\030k:\357\264\272\273\201\030O<N=E\273\316\t\005\271\030\321\331\272B\205\276\272\210\315\353:\021y5\273\341\377y\273\2114c\273\010]\314;\274!\362\271\336\270\326\273z\303%:\241\200k;\335\215=;\004*s;\022\322W<^,\233<\nT\244\272\366\264\034\274\256>\237\273l\330\325\273\236\340\261\272N\003\217;9\035\001\274~\020\245\273\336\3674<\276=\250\272\376\215\235\273\340\n\251\273\266\342l\273\333\021\235:\324\354\r;\274x\020:J\362\230\270\020<\274\2679\367\253\273vJ\026\273\253K\234\273\374v\210;F\202\211\273f(\227<)\210\214;\366\021\225\273\225\t\022\2730?m\273\316\322\005\274\336\371m\2721\362\034\274_1g<9iA\274\205c\315;\215X\2348;\332\203<\254\2610\273*X^\273\361P\370\272R\031\3669(t\252\272\024eH\273\267\220\205\273\220\3244\273\225\022:<LX?\2731\320H;qp\222:e\212B;n;$\273\023\266\3569\"\246H:v\2333\272\267\002\235\272\250\212\322\272\267+\2669\26319\272;\345\r;\331\004\020:\014y\363\272\341+\336\273\005\010\273;2\304=\273\360\2409<9\347a:\236 w\273s.\317\271\335/4\273O\014\263\272\237\3050;=\3333\273\242\271!\273\014\272\200\273\025\247\211\273T\014\246<>\n\202\273\270\025\013\272Rqv\273\311}\036\272\312\342\2179\316\333\r\272\002\273\241\271\347$\302\267\025\233\004\272\272:\320\271\360|\273\272\242\3734;\223\032K:kyq;\375\346\'\272\256\303?;\317]\223;\247\300d\273\330\305\203\273\306N\327\272\014\033#\273\247<\230:\200hq7\373\036\377;\264\342\374:!\225x9JG\223:\300\023\260\272L\344H\2735\267\315\272\360\355\343\272}\204\2259#\314n\273\274\021;:\223^\250:\\\217?\273\225\235;\273z\2300:\210\024\000<\n\263\234\2733\233\313;jh/\273\366\246b\273\363zx\273\224\350\2679\252-\207;\211f\214\272v\266\2129\312Tq<\323~\210\273%0\010\274*\2433\273\0015 :\316\225d\271\337_\334\272\304\334\214\272iS=;/\352j\273\322\304\372:\331\363\005<\231\tC\273\257V\351\272\325\374\331\272g\206\313\272\nC\257\270\376\320Z\273r\2137\273\007\"\213;\026i-;\241_\3769\331|\222\272\253\027y:\222d\024:g$\230\273~X[:\367\252\265\273\337\003A<;\267\026\274\205\t\327\273\322i\035\273\332$\352:cY%<\362\232|;\343\257\327\272\340\221\374\272\265Vl\273\223\355\243\273\200#\266;\327\207\027;\233\235\262:\343\247U;\214\375\220\272\347/P:f\304\256\2722\373\3279\205\367\031\272\233\342\3049U3\334\272\366\267\335\272\271r\274\271\334\304\331\272]\037\010<T\245\351\272`\036\341:\374\326\017\272[g[\273\206\211v\273AMc;y\200\221;\002e\203\272\014\240&9\242qS:g\025\375\272\004\017F\273\314\244[\272\335\202\231\272\217\226\";7\010\017\272Z\226\231\271\265,\327\272{?\001\272#m\254\272\\\032\340;\302,[<\357\244\262:\230\222><jPX\273)U\340\272P\335\240\273W\331\263\273\2664\262\273,A\002\272\370\266\244\273\375\017\005\271\205E\3169|\252\255\271C\230\037\272X\177\037\271}tR:a\364u\272\205*q9/\006\323\272\021l\031;\375\350\362;jG\330\273n\300\334;\213F(\274e\212N;]<\274\273\236\361\270\273=\245\237<\203\241\020\274\306\036\2349F\306F\274?\177\204\273\027\261\221\273\200p\3719&\177~\273.G\221;\241j\275<\3623\220\273\314\266\367:O\030\205\272*#\200\273\3744\266\271_E\230;\262\246\215:\266O\r<\367\n\204\273\330\2103\273\254\372L\272&\305\252\272\343\237\242\272\024r\354\272\371\020\302\272\030\303\237:\033T\000\273\235\323#<\r\272\255\273\372\375V;W\360\031\273ay\312\272\263\317}\270kP\227\272T\374P\272L\214\210:\016]\242:s\314\243\272\253\235t\271%\327\204:xR\2448\246\026w\272Q\264\203:\2266M:\220\336\022<y\316\013:\3011\320\273\"\026E8bt\324\272hy\261:\370\224`\272j.\226\273\371%\343:\367\332\225\273r\t\'\273\317K\245\272/\260\r:d\326\321:z\023_:\232s\314\270\014\215(\267\233\371\364;\342\235\001\273W\0335\272=)\211;l\310\213:\222\270\200\271_LI\273\246\274\235\273\226\301\030\273\324Y\373\272\372\354!\271\200\236\002<,P\260\273,O;:6\213\224;\214a\250:\006\364\032;v\025+\273x\334\361\272@\334Q\273\317\r\3159\216\317u;Z\215u;\242\372\267\272\002n\r\273\205\212F\272\204\032\322:\206\036r\272\230\320\341:4\367\3469\223\276\207\272\332\331\241\272\364\301\220:F\342m\273\014^\254\273X\000\303\273K\037\257\272\226\034\277\271\025\331\227\272\306\315\215<n\006\341:\213S\037\273IAP\273.\227\325\272I,}\273w\324\246\273\335\031\337\272\251\327\337;\231!\3059\'1\226;\027\236\177;!Nn\271\300I\211\272^\322\334\271\265\3709\272\026O\2068o\323\036\271\367\326\2178\030\036i:\362_%:\000\366\336\271Dq\220:\235\2129\273\205\237\2219\263\223\253\273\000o\332\272\022nS\272\241Q\323;\323\217\203;\342zA8\237\247\005\273\240\337\352:\342]\225:\220\332\207\270=\237A\272\271,\250:\357\306\275\272\325\301\027;\376\322\200\273XO\000\271\254T\214\270\037!\326:\025TA:\301@\330;>\310\233\272E\271\236\273h\343\r\273\212\362\332;\037(\242\272\002\314\336\272h1\223\273y\266\322:\224\035^\272b+{:e\352g\273\327\203\223:\226}\017\273\274\203\344\272\222\322G;\022\315m;\025U\240:nK\332\272\031\322\314\273n\016!\273\244}\204\274H\031\301<Z\334\021\274\315\260X<@,\0338\332c!;*O\247\272\243\322\212\273\020\310X;\0324M<#\334\311\273\236\356\276\2732\022<\273?\026\374\271\336\243}\271\272`\220\2729=\243:@\021\320\271\032\320\335:\224\265\307;\230\261\0359\367\250,<\361hD\273N\311\305\273dj\233\273w\344\001;%D\240\272\032\342\262\273bW7;TX\033\273\304xG\273)\323\200;\304R\322\273\204\254\226\273\307\200\343\272J\324\254\273\2479\222<\246$\225\272%\323#\273\201\251\223<0\021;\273\341\227\205:\203\266&\2730\030\364\2736jX9\323\023\325:\252\216\306\271\365\305\250\273~]\213\273\207-\234\272 4\223\272O\321\247<\321\3251\272)\262\001\273!\275\311:c]\311\273\226Z\361\272\300\266\235\273\277\372\217<mD\221\272\336\"\273\273C\320\243\272\002\276\210\272J\026=\273\357\342:\273H\276\220\270\333l&\272\217\214\004\273\201{\204\272^\366\232\273\227(\327\273\236E\"\273\366\304\314\272\277\020\305<R\212\200\273\336\260\262\273@@\242\272A4>;\354\347\266:\014\260\222\272\332b\016<\362c\005\274]4o:\357\273\214;\264\304\241\272\016\272 \272X\237\214\273Pd\3368\2004\347:Y\340\036;\004\365D<f\231\250\273\306\230\2479Z\001\311\2730\003\037\273\325\371\322\272\326\264\337\271\273UN\272,-\347\273P\\\315\273\217\0136;\260}\260\273\177\374\010\274\265/\031\273\244!\035\273\2056\237\272\333{\300<1\001\335;0\245`\274\304\324\025\273\244\220\373\273r\326w\273\222t\003=\353\345\203\273\347\215\255\273\336\271\027<#\'t\272\374\335n\273\3645Z<\210W8\272\243\200(\274vBl\273\252v\034\273b\366\340;Y\200\007<\017\327\255\273\257yN\273\243\314E\273\334\277[\273\236K_\272\030y\346\272t\370^\272\2672I\272G\333f\273W\2341<P\274\245:wSe\272\221T\004\271,\244\020;\262\304\003:\311a\374;\2032\254\272$\204\3519K\227\262\273\353\007{\273\034\021\005\273\3505\226:\375\361\":\273~?\273\374\257\250\272B\353\211<\276\262$<\220\313\2149\027\007\366\273j\001\027\274\366\231\3719q\001-\273\330\002\205\2739\326\261;\251O\213\273\222;\301\272r\204a:\202\377\2349\371\264\273\273\370)9\271\360\327\006:\024\242\212\273\216\206\017<\264\204\247\273\004\332F<3O\360\273;\271r\273\330\244\255\273\177\227T\274\007.(\273bF\013\274\346,\360;\3061\325<\030\246\217;6{\031\273\311k)<\267\034\216;\037\343\352\271\324\352<\274\002\332\202\271\341\256\337:ME4\273\235\350a\273\313\353\026\274%\212O\273\273\352\026=>9\004\273\272\343)<\200s\017;\014\250-\274\202>G\274V\332\021\27448[\273UH\244;\376\3235;\331\247\355\273\027\257\010;\352[\375\272\246pM\273B\302\352\272\352\317|;\263\2743\274\250\3679<9\377\214:@\373\201\271\315\303g\272>\024\255\272e\nB;\376\243?9<ax\272\207\017M\271\267H5\271\205_\354\271&k^\273`\271?\271b\201c:\010\270\2259I\317\232:N_\371\273\223\263\326\272\321wD<\037d\240\272_i\221\271N\277\371\273W\312\034\272\0376\273\271\236\245\033\274\251&\334<\347\030\255\273\202\010\0079\262\244\256\272?\322\034\273\236\333\2538\260\253\020\273`\266\014\273-1\006\272\0259\243;$R\242\272D(\331\272\372\301\2739\261\215\330:\375\323\345\272Md%;P\t\304\273O\371\007=\325C\217\273]`\213\273]\233\216\27341%\272\376\374\266\273\n\330\325\272\210\360X;\206\360\036\274T\335\252;\275\357\003\2734\001\270\273\305w\203;\024\022\027\271+\331\211\272\370\230\303\272\301n\205;\331\200\245\272\310U\341\272\275^\030\273a j\272\265IO:\371-\241\271\331\353\343\272\325D(;\322$\225;\353\017\017\271\2078\341\272\267DO\272\347\363z:\366\223C;\r\\7\273q\213\222:D\324L\273\360\303\033:\243-\347\271\263)\301;<\240N\273\324$\005\273*\211\240\273\304\037\3339\304\350\270\272\216\334\337\272\222\013\204\273,\261\203\273dsK<\022\013}\272MQ#;\310\\\337:\373\017\236;:\202i\272\304\212\317:\367\r\233\272\033\217\371\272\257A\224\272:\24409\373\233\230:\t\223\332\272\267\220|\272\020d4\273\273/\020\273\234\243\322\273[\266R<?\020:\273\270,\220;7\222\2439`*\240\272\025\315\355\271L\326\333\2721%\267\272^\177\0378\377\214V;^{\231\273{\202\221\273\256\217\373\273\006G\272<\177\316\"\273ic\322\272k\225v\273h`\363;[\265\032:\020\000z\273\352\321\023\273\013\220J:\377\320\027\273\033$\251\271\350\354\363\272\345&\026;\225\354\005\272\026.\377;6[9\272\332\272\031;\010\217\014\272\373\343g\273~\030\263\273\302\367\033\272\350-\237\272\237 \347:J\321\'9\2124\352:\233\202%:JMN;\344\317\020\273\251\230$\272\351%\"\273\354\'m\272\026M\007\272\247NU\272\243\304\001;\361c\022\273\373\261\024\273\340H\312\273\240\315\225\273\225\274k\273i\020#<\033\270\017\273\016\017\212<*\304\274\273lx\364\266l\204\231\273\324\241\3139\3478\201;2\244\247;\353d\001<S1\\<\006)&\274\333S?\274\322\360~\273\347\321\314\271\027\310\236\273*u7\273\341\362X\273\363XB<h\317\031\273S\371\305\273\273_\242<?\340\276\273\002\241-\273\306E}\273\376[\237\273VG$9*G\2009Tf\016:\217\263?<\251\355L\273\235?U\273\014d\247\272)\362\261\272%\266\241:\341T[\272H8\262:s\231\245\273q{\244<\223\235G\274<LG\274|\245\027\274\320\r\256\271\231 \262<]\365[\273=\260\271\273\300\0072\273OU\r\274\360\222\337\273\253\3209<\371\226\001;\016\245\020;\355?^<\232C\204\272\255<\213\273Sv\211\2732\312J:&\3034\273\332\350\271\272e\033\351\272ko\351\271\t6\3019\004\273\271\272\311o]<\025[-\273\002\361\312\273\370|\026:9,\240;\243X,\273\323R\r\273\323M\225;H\245\217\272\036\222\352;bY\261\272\263\252u\273>\357\332\273\014U\3519\367\337\207\273\022x\003;\204j\202\272\221h\237\272\333\220\001\272\362P\001:\333/&\273F\tW<\205\\\366<9B\210\273\345\223w<\377\"\350\273q\237\005;XN\311\273F\214\242\273\303+>\274\246\360\367\273\253\341\272\273\316\007\216\272m\022\237\271i\3430\273\251wB\273\270\334\242\272}\343\030<\242S\3319\242\253\227\271tV\255\272W\177)9\322\270c;q\3736;\271eM\272q\3033\273\332B\341\273\276\264\014\274\374\006\200\267\030\033\211<\254^\344\273\203\245B;At\223\274\263\310\204\273%;\003\272\314\245\354:\0352\331\273\246\266r\273l&B=\376\363!\274DL,\273\211\200y\273)\364h\273 \003q\272B\277A:\225J\020;i/\367;:\250g\273\031o\007\273_\246\322:j\010\261\272\227o.\272\213mB\273\0237+\273B\205H:\267\372\3578\356\256\006</.q\273o\220\343;\201\013\277\273\214\305&:\260\212\320\2727a\254\272\001\324L\272\252b\334:\324\221\231:Re\377\272V\366\013\273\362\237d\272\336\013\244;5\330\311\272\231B@:\315nC\2731\216\367<\003\203\233\273\342<C\273\240d\035:\250\330\307\2737\2323\2728z\233\273\272\266\005\273/k\326\273Q\241\327\273w\342A\273\004aR:\3212\030\2733\322\244:C\'u;X\244\370:\262\261\231:\305 \261;G=\036\273\276\270\245\273\272\341\226\271\321U\222\273\320/99\305\213\n9)\270\254\273M\275Q\272zT\027\271\203~\203;]\305?<\177.\000\272\344\225\237\272\030\177i:\240\210\037:\2437\303\2721\232\212\273x\345L9\224t\263\273\217\236\203;G\241\353;\267[N;\332\207\270\272f\364\327\272\017G\241\272\262\226\007;\275\010g:\215\272\353:\375\352\006\273\260\257\260\270u\177\300\272\316\251<;\375\305\255\272;\254T\273\033G\221:=\021\2708\t\222K\273|\001,\273\230\014\270;\373&\005\273\243\"-;\255r\306\273\323\005\r\273\342h\322;\345\234\023\273\027\030,;Q\214E9\236\220\326\272-\3276;\203\355\302:x\200\271\272g\374\230\271rf\2728\020Q(\272\027\344\257\271\017%\244\272m+\225:$\313\003:\000\274K:\247\222\325\271\357S\335996\004\272\014\225\334:\252F\361\272{!i<\331F4\274\371C9;gEY;\344T\254:U\361\020\273^\343\377\273\364\266\260:\342\252\245:\237\350\200\273\0222I\270\335\205\025\273\332$\211:\336m\214:P\255n\272\240\276\007;\030r\3039\3604\250\272X/J<\264\236\n9m\327\263:b\243\001\274c\221a;\300\31256\215\215\220\273:\241\375\273\333\371\204;G\210\327\273\3628\257:(\374v\273\344\272%;ygE\273U\335-\273eP\002<9\343\221:\204\203\332;\026@k\273\242`\356\2727\231\221\273\340\334\257\273\362\346\260;\317\251\0138 @r:?\213\r:t\035\'\272\200\273\261\265U\005\260;\245\310\363;\3111\303;\340\303a;\272\2761\273\337z\214\273\252<O\273\n\277\351\272g\3772;\014\323\357\272\006\377\275\273\253`\351\2723\033\316:\305\2303\271\320\377\027<|\303]\273a\354\311\273\323g\275:\347\317;\272\204x\376:\374\202\004\273\366<\312;\0073(9\007\361d\273\013\314\265;\224\244\265\272\361\034\202\273\207r\350\272R\327\342\2730\2303<p\210\255\273|K:;\334v\214;\247]\2239\353w\211\272\353#\213\272\324\321\200\273\002V\344\271s\364=:\210\223\030\271?9\306\272\330\n\214\273\003p\2049\376\250\332;\220\367\332:\324C\261\272=\376\246\272\274\243\244;\032\301 \273\'\361\207\272\363\262S\273[\317\350\272\001\002\375;8\244{\272y9\241\273\353n\306\272\2324\212\2724\275\225\273\335T\000\273\014\0251</\347\350\272n\273\321\273\330\352m\273.\350\216\273^jU\273\312S\331\272`D`<\247\211\037\273\031\220\036;\016~\254\272\256\266\343;\261\3323;Q.7:\037?\346:\250\024l\272\330\370\314:C\"\204:*&D\272\032\244\243\272Nn\222\273\032\260\324\271\030\360F:C\267[\2725\223\216;|\026+;\351!e\273[m=\272W\223\':\025;\336\272\266\036\302\272M[\004\271\247\037\322\272\300\177\023\273z\250\003;\206\240\263\272R5k\273\332\237\243\272\343G~\272\270\224(\272em\337;\365\230:;/Q*\274c\305@\272\226\247\223\273\230c\243\273\023\375\364<\277\226E\274\013:3\273\003\347\252;Y\376\300:\312(\232\272\247o\337;\2023\217\271\211_\270\273\376x\005\272\016\326\216\272q\024\251;,i\020;&|m\273D\027\017\273U\274e\272\2463-\272p\222\020\271\266\253a\2729\027\024\272;\337\25198\320\020\272tbu;\344\\\272\270\2774p\272h\033\226\271\346\341\024\273\002)I;5\370E<\264\205v\273V\343\353\272\350\177f\273\'\033\036\273e\203\275\272\342\216q\272\322\037\200:6!\177\273\306m\200\273\221&\201<\035p\273;i\005\222\273Kc\207\273n\252%\273\035T\000:\271\257e\272^E\016\273\007J\t<\271\205z\27368\021\273\316$u\272\224\24129\030\371e\273A\300\336\272\333vH;\302\346\241\273c\262\264;h$h\273\325\254\254\272\236\347R\273\370\304\234\272Yo+\272\002\320\033\273\242\200\315\272y\217\303\272/@\315;\033\036\025<\216\023\307;d\241\351\272\325\237\037<\240\367\347\266\356Q[\273G\201W\274\334\360(;|xX;\234\317\231\272\025\214\022\273\031\320\345\273\030|\276\272&l\244<&\020\"\273\026;\205:\253{\305\272\3768u\273\016\260\220\273\204\014\'\273;S\007;*\376t\271\256\235<;\336\362$\273s\237\200\272\244\271c:\035Z\\\272\351\300K\273\376\314\3379a\230\025\274l\021O<\315k\274:BWN\271\334u\2529\220\274\3259> \365:\227\004\217\271Q\220\240\272QH\345\272\256\246m\271~\374\301\271\270[2:^u\311\272\2420T\273\206\360\3659\210SI:\347\324\r\274\\\222\213:\034\332:<\233w\r\2722\366\331\271[\241%\273\266s\3328cC\232:\205\316<\273>_\375;\306|Q\272\301E\001\271Qy\257\273\225.~:<-\337:\005M\307\273\354\031C\272\370\224=9\315K\360;E\333\223;a\327\346\273)\257\223\272\230\351O;E\035\035\270B\\+\271\036K<\274\361X\322<\370\325%\271\343\312\270\273\307\026E\273s\343u\272\0045\226\273\001\324\020\273\240\001\022<\203\360\326\2732{\310\2737#\207\273\nK\323\273\347\367\200<\372\270\3559\365w\333;p\217\"\273]T*\272\330\033\241\271LK5\273\344V\350\272\240\253\237\272\344T\205\273\021,\r<\032\367\234\272\243\0269;\200\336\212;5\337\310\272\343!X\273\315\313)\273E\236\017;G\204H;\221\326\211\273G\3077\273\327\320\017\273\355&\330\271Q\207n:\001~\024;\035\305\216\2721j\025;\261\257\026\273\242\232\275\272`\377\207;\361\333E\273Vp\277\272Q\022~:\022\367\350:B\351e:i\203\3308\277\236\2669\340\301\031<9\016\262\273\307\351\'9\356z\013\273\333T\350\273\355\365d\272\302n\037\273crh<\t\226\r\273k\314l\273|o\025\272K\247Q\273\221\352\216\271Nn\227:\305\324\300\272\210S\270:A\272\2239\334\025\304\272\277\313\231:\277\027D;@C\003;%i`\272\341\212\033;8\351\331;\310W\210\273\364V\2079\354\247\030;\303Q\276\273\301w\004\273\3248I\272y\335\014<\253U\275\272Lz\200\273\206\234`\273\364\320\2249<xU9\232\364\033\272\340\273\200\272\242a\037:>\\1:\'\253k\272\021\312\203\273\255P\014<\266\257(;\303\226\305\272_C\246\273^\356\002\272\242\004\340\2725*\207;\272o\313\272C0V;WYm;\241\323k\273\333^\352\273\262\336>;\353\234e\272\373h\240\272@h\r\273I\3166;\271^\036;8#\357\272NRt\273A\371\255;\t\276s\273|\036\257:d\177{:\252$\337\273B\357K<\\\301O\273c\310Q\272\001z\231\272fD\004:\005\347\245\273n\315\024<\034:\213;l\264l:\347|\010\273\020yr\273S\236\037\273\004G\226\271\216\013)\273\240\032P\273~p*\272\364\000\007<\\\016\200;\267n \273[W\311;a\023\274\273\325\305\237\272\210\224\"\273\034\260&\272j\203\346\272\361\331\321:\027\206\010;\342\245\206;Y\370l\273\223\317q\273\276$\277\271\235v\254\2711j*;c\027\004\273\017/\005\272\235Z.:\304\315\377\272\020\236\221\273\316\346\237\2721Q,\273\037\307\3369\317\312L<W@W\272\263\330n\273\355G\207:vs\302\273\216?\342\272Xv\013<h\240\031;\213\317N\273b~\332;A\300N;\351\225\355\273\261\272\022\273\236}\353\271\031T9\273Z\"&\273\347\201\031\273\325\365&\272+\002B:ML\323\272\354\354M<i\267.\272\371\020\220\271g\230c\272\310\327\3349[\003\350\272\206\236\001\273#\244\2709)k\032\273QX\030<-\032\266\272F \274\272#,\037\274^\244Y<\315~\205\2724\240\231\2705@\316\273\"7\006\273;]\004\273\254\364\321\273\215\373\000\273\2344\205<\320\305\3467D\231y\272[*\005<Ck\242\273\374!\364\270\212b\034;ymb\2731z\375\272\177^\257\271B5\236:\233\264\217\272Z\023N8\315OR\273\005\000&;\231Kw:\024\311\3769M-j\272\006\336=;\033\237\223\271\"2\273\272\036C\\;t\014\333:f\263z\272\336\235q\273\t\315\306\273\260\320\344\273L\354H:\300\337A<\313\350=\273V\353A;\327_\020\274\362j\';\217\023\301\272\350\236\245;\t\2279:\014\355\340\273\237\361`<\225\"\037\273\344b\\\273]Nj:\311\350\263\272\350\027\037\272\200^+5\346\036\362:\373|\265;A\341\035\273\245\26559\232o1:\310k\270\272\354\371\037\273m\300\300\273\360\217N\273dO\254:\336mT\273\213\337\003<\207\346\215;R\366\313;=\036y\273\364\367.\271\020\323p\273\351\326\307:\226Q\213\271\242C :1t\242\271\371\304\026:(\313H\272y!)\273\273%(;\227\331\006\273\346L8:\372\345\327\272\215\322\202<ds\005:\234\222/\273.n\352\270\232\372\257\273\022\253\"\273\014j\230\273u\242o\272\272\210\264:\307\374\017\273Ji\205\2736\r!;a\356\031<\'\305?9\213$\307\273kv\032;\244\032]\273\303\256\030;Pj\203\272^\343r\273\220\"\226\273\013Ry\272\320^k;\327\263\202\272\017\001\206\273\2440\210\272\2746\027\273j\336\210\272\027\316w<Ps\2729\200\314\217\271N\207\223\272\2555\252:\025\212\335:=\363\262\273\270\374\032\273\346\304\003\273\034{\326;\270\201\260:\3761\020;G\310\020\271\357\205X\272t\277\232\272\256b\210:\377\263O\271\334\3364\272\221Q\363\271\255\324e:\177\266\"\272\2647\272;|\351\002\272\253\307\2029\221\242\266;7\272&\2739.\314\273f\270\201:\275\276\017\273\"\tL\273(\257\010;w\014J\270\330\333o\273\007\272-<i\304\302;\352Y\001\270\335\267\334\273\335=\324:Cf\371\272\372=\226\2732\317\235\272TG\\\271\213a\247\271\342\242[:\344\237q\273F\334(\272\245\t\036;\206y\3368\240\235\020:]\205b:<\026\2738\340\266\037;6h|\272\365\"\34794\363\342;\203a\205\273\315\314\014\271\327\250\265:\215\230\r\273\264\217\314\272\325\313\026\273ub$\273#\331\030;j~K\2728\210\274:1\2765\272\316\323@:\212@4:f\222\246\272\230\247\216:\247v\207\272\363P\330;\304\273\330;\207\356g\273\277\035\365;\033\254\364\273J\237{\272\227KJ:\255\254(\272-\343\027\274\003\366\32790\214\275\273\257M\357:\342\346=\2728\'\343\273\014b`;\260;\221\271+t\252;\214g8\273\324x\014<\te#\273;r\315:\376\3336\273\000C\016\273\246S\2719h!8;\2142\201\272j\244=:\023\307\3159\345\315\336\272{\360\353:qN@\273D\304@:\245a\263;O\202\025;R\243\302\273AD\004\273\260\252(\273\202\304\036<z\300\262\272i\244[\273\260WD\273X$\1779\347\326[\271\rK4;\364\260&\273\362\000\217:GA2;\003%\260\272Ss\"\272A\331m:\341d\003:\214K\322\271c\226\005;*\343&;k=\031:3+>\273N\350\014\272\262h\023\273\226\227]9\003\275Y9\236\376\220;\020\221\2678\343\032\2519B\260\253\272\242n\0279SI\244\273\004\252:\272\336S\261:\205\370\343:\307\311\207\272\210\026\344\272\251\2623;\020\337U<\205\302`\271\354\323\271\273\211\r\352\273v\007\230;\250v\263\272A\360\375\272\016\260\032\273\362\273H\273p{H<;\347D;`\341%\274\321\211j\273N\024}\273[\256I;\004\340]\273\276\376!<f_\211\273T\0267\2736OI\2728\265\022;\002\001\023<;\203\252\272\332_\364\273h\020\375:7\\\r\273\201w\274\272\360\3015;|>\221:\007\346p\272\235\313\337\272\213\335\244\272\220V\353\270\364Q`:\351\203\257:>\202\002;Z\261T\271\026\210\217\272\272\242\236\271\223C?\270\277.Y;\273\007\323\272!s\214\273u\211\211;\':\211:O5\221\272\2705.\272\242\354\r\272\342v\036<L\251:\272e\336\031;!D\227\272F\242~\273\263\006C\272\272\\\247\272\313\227\365\272\227\240\000\272B\316\366\272j\021\\\272f\252\3339\025\210$:\'\000&:\024\225\031\271\207L\031\273\333\345\000\272:\371\233:?$k\272\230\261\361:.&\321:\370\346\331:\371\301+\273\307c\212;i\027\225\270\360\313\2279\002S\001\273]W:\272d&\010\273jm\246\271\216\257\2226\243\366\\\271v@\207\271E\031\r\272\002\376g\272\217\307\340:~H+\272\317\026+9\223D\300\270\002\314E:\025\347b\273\353_\317;\265&\356;X\303%\272\371h\345\272\036\030\214\273\037\221(\273){\006\273\357d\252:\257m\037\2711\036\007\273\217I\230\273\'Q@<\251?\213\273\302N\240\273\262T\335:m\323Y;rt\302:2\256\361\272\203]\r\272.1`:\273\265\241\272\326y\314\271\214\240\335\272\205I\356\272\317p\0279\260\303*\273\343\341;;8dr\273\335\020\370;y\230\354\272\365\321\337\270\232\307#:)\273\034;\261[\370\271*\027\0359\337\204:\272\013\373$\272\014\035z:W|\316\271\266\276\247;\356k\256\272\034J\351;s\201\374;\267\"\221\273M\022\214\274\331\371\202;l)\351:K\030\371\272\325\013|\272_J\303;\231\360\374\271\210\350\302:A]>\273\360\3415\271\307\261z\272\351I\013:\001\017\0379\037\334j\273Z6\365\267\001f@\273\336\226\274;\353i\235\272\301*\362\273>\214\3069m\206C;\364C\227\272\010c\224\271r6\304\273~!!<\322sJ\272\241s\300\272*\374\2519\257~\3769\273C@;\244S@9$\336\223:\366~\246\272\363\313\"\271\302\010\266\272\204\351\245\2722\374\202\272T\317,\273\262N\214:\277\343g\272\177\305\036\273l\201\373\2726o\333;$\260\014;6\265`9\377\345\'\272\"\327\320\272\332Bu;\304\035\3279\204~\016:Z\014$\273\240.\346:\256e\261\272\310;\324\270`\273\210\271zd\n\274\\\23179[\007;\272\356\213\316;\244_\321;\345W\337\273yoJ\273\371\327\004<8\267\267\271\233\215\314\272~7O\273Mb><\274\351\330:^\376t\271)\243\221\273B\220\006\272\337\260\264\272\013A(\273\377H\236;uB\276\273%\033\326\273\354\2673\273\314n\221\273\\b\035<\260G\010\273\244\177\177<\354\r\227\273\325#4\273{\317\021\271\377/\372\272\343[]\2735o\r\273\266c0\274\312A\236<u\007w\273\234\217\355;\356\321\302;\3717\227\273Xm\271\273,%\016\273\243\361\243;\310\210r\272\231\032\274\273\361Y\"\272y\374\2149\376\337\234\273\254\206,:\3127\204\273\203\336\005;\230F\005<\341\330\034\273\344\314*\273r\031\034<\270I\3118\347P\035\273\223;W;GFP;\322\036@\273\237W^\273\313\304\032\273\271W2<\r5\n\274\342S\274\273\231m2;\003A.\274\366n\305\272T\t\343\273s\224\263<.\036}\273\264\360\317:M\301\000\273n\343x\273\206\n\307\272]-\017\274~\372\212\272\207\325\001<\367Lg;\006\271\275\272\246\013\306\272\376\246\014<S=a: M\031\273\375\355\t;\362q\020<\341\230\367\273\265\371\007<\352\251\0148\352\325\003\274\340\322[\272\334\246\256\2724C <\306\236\217\271\214u\234\272\345n&\274\247\301Y9\360I\267:T\177|9\207\036c\273d\213z;\275\303\274\271\324\372n\273i^\215\272\262\2755<\3773>:\231\323>\273\353\333\323\273S\025D:\374\031m;3\200H:\264\215:\273\366yD;\257\tu;\341\226\271\273\0341\026\273\033y\023\273\336\350u\272\310B\021:\214\231\262\272Ziv;u\302\276:4)\n;b\2256\273\307\347\013;xOc\2720h\237\273?8\336:\035Z\235\273\214H\036<\004\027f\273\016k\241:R\221\013\273\275\031\247\272N\367\260\273\306\023\322\272p\322\225;\332\363\350;\303\026!;\204\255\267\272\256\375\360\272#\334\000\272\002\3775\272\202M\224\272z!L:U\353M\272\345\nP\2734\232\276:\215;\200\270\2355\310\272u\376\001;\361\364N;\350\3367:\347]/\273L\301\2209\034\247\213\270O1\203\272\254\364\032\272\013GH9\030\276u:\232\304\014:\362\267\335:\372\257\021;<}\007;4-c\271\334\341\256\271\355\354;\272\253[P\273\240\224\220\272%\216\256\272\240\260\037\272L\266Q;\003\020\213\272()\032:\264-\367\272\007\024\272:\350*\270;\200\357+;\246\023\301\273\375\273\2479p\205\032;D\010\206\273\340y\024\273\006\343r\272\370RG\273-\353\215\273\375\014;\272Mn9:hJ\361\270@/5\273AIA<v\260\330:\324\334\021\272\030qI9>\366\345\272\370v\3657\177\026\2229\352^D\273W\r?\273+Z\244;\363G\240:\325\202\302:\277\2732\274t5\311<\033\036\244\272\372\246:\2733,D\274\374\243D\273\004\221(\273`\364\006\274\316\n\204\273\273\370\245<\0355\252\273\312\37619\211\230|;\251\361\007\273\3163\252:\206\315\241:\262\203+\273\314g\010\273\336O*;M>8;\303\021\355\271\035C\307\271>F\234\271\036G\300:Z\025\006:Q\017\021\273/\350#\271\337\247N:\221\30289\300\010\022:\0069\260;\317\300D\273\307\006\014;\371\206,\273+\233\226\273\267\312\006:\231Q\t\273\346+\247;\234\037\17796K\211\272[\347\341:\206\200\262\271<\274\270:\315\365P;\235hr\273r\366\270:\000\n\3016\243b\235\272\276\375u\272\373s\314\272u\220\2748\223\257\202\272\212\003\t:\'A\";\247\255\220;\210E\370\272\003\206\205\272\272D-9\320H\211\272+\2641\2736\273\224\273\360\231\315\272G\025r:\350\365\305\271NKe;\2069<;\302_C;\332\007\217\271\314\377 \272\307F>\273\253Oz9\321\333\220\2714\246\267\270\244L\242:\370q&\2710T!\272\321<\324\271(,\036\272\031\266\2608\376Y\031:\256\321\026\273\264\021S;\232\242\305:\245y\252\271\263\263\323\272dg\351\272C&6\271\014\002\314\271\257\243\223\272\335\177C;\205\003@:w\254\316\272\224\357\333\272{u\255;u\350\211\271\235\0005\273\344\"\210:\360p\250\272\366\225\335:F\010\234\272\375\231g\271\223\216_\272\316D\233\272i\323\272;\301\231\211\272\000~\035\273\\g\034;d\364\005\273\266\323\216\271-4}\270\245p\330:|\246\262\271\346\355\020\273\314\016\025<\335\026\232\271\366\322\357\273Q\374@\273\340h0:\260\000\030:\3243\221:1\230\3709\351P\231\271\257E\230:RI\31394\244\232:\335\315G\273\021\001\234\272\201w\":\2128\020;\013\236\303\272F\320\272;\241x\366\272\003\226\016;\330\367\240;6V\351\272=\036\316\273\311\353\234\272\217\036M\272&\225\212\273e\345\\;\303Ke<\261\002\353\273\263l\315;\250\222\211;\255v\227\273g\002\003\274:B\355\272.\355z\273Hfn\270\321,\206: \206g\272\225{\267\272QS\240:l>\201\273\235\375\0269\265\'k:\325\332\300:\2766\252\272Z\377-;\277\316\224:\265T\331\272\203u\251:\302~\000;\311\257\231:[\222;\273\3463:9o\375\33294\217\337\272\376\305\217:\256\237\3418\211\326\323\272\327\267\244:\362x\016;r:\350:o\022}\272r\nx9\207t5\272\240k\303\272\030\310\251\272\206\320\":`\031\003<\211\252\021;\226\255v\273\214\361\214;\337}\345\272\327z\003\273\217\254\316\272\244\2400:sO\261\273\311\3724\272\n\371\2369X\271\327:x\264\003\272\206\252Z\271b5\345\272\007D\004;\275\235\260\272z`\0019\200J\022\271\265\375\026\271\265\014^; n\320\272$\315\253\273\324\005);}\213|\273\362\252\261;\356\022\3629\211,\023:\n\\\003\273,\030\2229`G\217\273\350\275:;Xf\327\270\313\001U:\030\237a\273\024\036\003\273\261\344\2429\206\334\263\272k\363\267;\374f\337:q\343M\273\364>\211:\323\000#:5\307\032:7\030\264:\021\220\211\2727\234\001\272O\231c7\377\245\323:\222\006\027\272Gh\031\272{5\353\271\214`;9\320`\322\2712\212\2339\014\211{9\337\247\r9\303\003\2748\021\307\0069P/\3009C\367Z\273\230\301i;7R\363:q~\324:7\304F:>\013[\272\252\233$\273\334u>:\025\314J\273\260R\244:I\246\010\272\224\305\314\271\374T\2158\257l\010<\311p\024\271\007\376\365\271G\020:\273;\032\r\273l\337\203\272z\350d\272\362\211\352<\210\335/;x\230 \274\300\207\033;\260\371\310\272\275\177f;\030r\307\272\272S\005\274L\351\t\274sT\377\273\246u\342\273\313a ;\257\037v\272\347\262B;gXx\272\014\243\323\273\224\2351;@i\220\272%Tw\273r\251D<V%\006\273\374\275\200\271\336$\363\271\"p\301\273\307\202\256:\331\342H;\301s\003;\221\233\344:\017\037\004\272\367G\205: \376`\272\334\035\3539\365n\207<\207!\324\273wE\323:!\371v\273\314Rn\272\213r\014;\271\361\317\273\315\374\033\273\344\245\301\272\302\035\206\273\010q\220;\234L\r;\304\003\263\273\335\023\221\273\214\334=\273\350\322p:?\021\000<\340qH;(xM\273\300\334?\273:\0305\273\024\336\036;f~\242;\026\307R;\311\324\031\273\003w\007;B>\371\272r@\3269\217\210\253;.wC:\330\343U\273\020r\220;D4d\273\377\3148\2720\346F;\254\200\315\272\233\206\001\273\341&\037\273\364\266\3149d\000n:H\334\357\271\302(\325\271?j\304\272GC\322\272j\354!;\345\'\023\272P\304\0137\314\310D:\331\355\220\271\\\267\215\272\263\250W:\000+\2358\001\363\323\270\257j\371\271\026k2\272\237A\3239\372,%9&\245\227:\204,!\272\320\277$\272n{J;\000r\311;\n`\375\271/\002\010\273o\014\210\273\010\320M\270\225\346\331\270\234-\224\272\326\004\t<@\020\303\267\356\317\255:\006\005\3548\360\314\014\267\\\345\"\272\332\002r\273\307\"L\273\261\315\212\273\222c\370:\310t\213\274\273\260\335<3\014\032\273\341\353\245\273\326\226\r\274\035g\021\274\276H!\273\007\023\336\273Rn\2249\221[\303<h12\272)S}\273m<\017<\365{d;#|\001\271\024E\330\273\305~\256:\021\261\312;\353\215\340\273\311\343\324\272\2650y\272\256\242\001\272\3265L<\033\031\303\273\217\317\036\273\234\255\205;3\314\235:\306\306\213\272\230,P\273\021Rq\273L\230,<F\225\001<\226\205\376\273\005\353\205\272;C\332\273\"\004\212\273M~\360\273\356\375\325\272X\272&\274\265\327\246<\202\326\2448\026\372R\270^D\r:\2234p\270\352\315\2339\017o\237\271\203]\217\271\n(\317\270\0135\006\270\007%\317\270\350\310\356\271\337\"\t\273\r\306&\272\005\321\216;\347P\030\273s\220\241\273\000\322\317:\017\317\";\251v\270:b\'\036:\356U&\273\034\301\253\272\027\020\021\273\265\021\024\273\212\030\320;\235\032\034\273\"\241\025;\203\330\202\2715\n\330\272l\000\202;3\n\313\271w\352?\272\336\3408\273e\335\345:\220\317\3007\305D\007\273\374B\213:\357\242\016;\363\031\327:\000\001+\272\"\347\300:\022;\240;W<\330:\337\371<\273\336G\356\272\036WK\272&\275\237\273\3378\243;x\252\271\272\026m\240\272\'\030\221;\232\224=\272K\025\270\271\243h\260\2738\263[\273f&\201;\026j\2239\234\340?8\231\256m:\253lY9nK\020:+\252\022:\227\322Y\273=N\203\2725%,:\034\223\211:\213wU;\n\327\373\272?{\361\272\375\032\004;\361\374\007\273&\030\253\271\357\341\235\272q\325\254;\201x\177\273~\343\271\273\331\001r:\326%/<\255\324\032\273V\273\260\272\370\020r\273\374cq\273.<:\272\376z\245\273`u?;\3407\3039\325\241\303:\215J\341\272t\221\r<XX\273:n\241[;>\222b\273\215\014\201\270H\271\024:\372\\|:Heh\272\037\325\355\272U%\231\272\024R3:7\353\356:\026L\353;\310\224\233\273\337\337e\274/\034\223;\211\2239\273\014\263\001=\212\254\224\273P\2175\274\346\311]9{\005\317\273O\370\236;\350\221\020\273\210\221\351\273\226X\035\273\003ob\272~\342u\271\237\321#<\274\306\277\272y\372:\2712\345\304\271\317\370\256:>d\2208\334zY:6\327]\272\320\034\024\271\245ZB\272`\013\201\272B\344\2419\237\210x:\347\243K\272\010\204\355:\300Cp:2\325\t\273\326\245\271;j\251\222\272xb\363\272kCI\273Fu\340\267]\335\t:!\332B\272\200\372\253;\273\251\227\272\357|\2119\027\002L:J7g\273k\036\227\272\214{J\272\300\200\277\272\326\325\013;\277\002\270\271\232\025\001;=\\\325:\010\261\304\273\330\t\246\273\311o\032<\205\364\223;\326\013\345\2730p\004<C\204\006\273\204\215\264\273%\202\324\272?\250\335;~i]\2731\332\216\273\374=\231\273f\220\365;{B\355:\351p\211\273\337\256D\272\375\325>;i\251\201;\036\006\356\270r\'\242\272\366\210\367\272\206u5\273\035\212S;\352W\3009N\016\255\271,:\356\271\267\003G\272m\303\210\272W\335#;\205\206\201:b\212\311\272\232\024\016\2738\274f\272s\255\3439*\036\237:\361~\303:\212\364\202\272\260X\206\272\355\307p\271d\372?\273]\033\257;\327x\241\273L\004\345\272\317T\010;&V\322\270e\272\002:\211\377F;\227_\26099\247-\273\222m\216\273\037\2510\273\300\331\235;G\3144\272\244p\3269}\020\343;\"V\'\273Z1\3509\300\327\311;\306\264\033:\006\343\177\273eY\244\271\006\371\266\273f@-\272\200\326\213\271\314\t\274\273/ZS<\330\306P\273H\202\335:/\302H:\032\307\002;\323D)\273\"/@\273JGv\273\266\243\3229\3430\314\272\255A4;\303\204S;J%A\273\207\002P\271\273\377v\271\024Dv;@F@\273#\246\375\272{\306\250\271\016f\031\273w\270\324\272o\321\017<\337\361\341;\2443\032\273\\\331\306;\251(\252\271\200\377~\267\370\221*\2734F\370\271k\010\022\273$\361\025:\363{\265\273\334\n\032:\331\216a\271%\375\257:\034\312\231:\323\321N:0\341:\273&a\243\272JlN\272\2445h9)O\201:\267\273\017<z\331\242\273H\310\005\273\346\347\237\273\237\003\310;\037\253\342\273\341n\213\273\312\213Y<.(\305\273\'\250r::I\\\272\3704\3106\356S\263\272\022=v\273\340\201Z7|\247\261;\316H\024;]\300\340\272\350\t\0009<\330\027\271Y\267\2067\025\312,;D\342\207:\275p\211\272\007C\275\272)\372,\272\362\250\3718\220c\3759E`\243\272e\306\3408|?e\272D\337\340\270\210 \236;\000\222\375\272\216\376\026<\365\032\272\2736\354#:\013\370\247\272\235\361*\273\363\362\016\273\223\307\3059\230\341\312\271\237\346\026:\343=\307:\316\240\225\272+/)\270>\010J\2703\207\3758.\371\177\270\261\021p\272\"\007\376\271\350\237\206\272\241\336<;\304\244*\273\017B);\027\2407\271\331=,\272\363B\213\271\212\025V\273]\234E;\000B\322\273\333\212\013\273\267\037\021;o\034\353\272,/\2339\344\344\310:\323\220\340\272\024\336\310:\363h4<\302\222\224\2733\316\006;|l5\270}\017F;B\263\210;\220^\234\273\003\255#\273\374\226[9}/\353\272\266\024\026:\003\255r\272O&\303\272\216y\251\271u\370\236;\351\027\326\2720D\275\272\266\273\364:\260\325\336\271E\004V\2722v\3059\266\305|\272{W=<\356\371\215\272\345\257\357\272h\232\016\272\252\252\3009\345\206\370\271rS\242:\010W\322\271?q\262\2739\343]\273\364\212\3729\250\207_\272&\002\221\272\020[\244\272\311\201\004;\355H6:\262iM:\214\277\345:\200 \014\273\220t\307\271H\017N\272\374C\010\272\244\312\353\272\343s\225:\026\356\036\273FVG;\254\371<8?\"\374:\034<p\271\034J\315\271\357\375::\240\031\213\271\273\332\245\271\021T1\272#3\375\271#\275\213:\223C\322\2710\246E9\261L\2218,\362\3338(b\376\272V\367G\272\370t@\273D\234\236\273ph\007;\223\013\214;\347\2579;\2436\330:[\036\352\272\210A\275:L\246\2319\276\223\317\270\360\353\363\265\264\315\2159\323\243\216\271\034\250\024\270\275\301\200\270H\233\0217\216\335\337\270o*\2166\007\022R;b\350)<\342\303\355\273\024%\016\274\010#\344\272a1\034<C\245\354\272`\206H\271\214v\261\273Z|\001;\252\367\247:\330s\375\271\200t\020\270-K\t;\343d\2249l\022\255\272\333Q\351\271\250\035\276\272L\352P:`n=\272\202\347\2139\337\301\034\272&XN\273^\247\315;a\317\242\272\364\261R:\377\306n:=:\375\272k\t\224\272\025|Y\271F\024\355\273\257dV<\370\367\222\271*\371#\272\001\216{\273\200\330H:\265\035\310\271\202J)\273+\257\212;5\305M\273\302?\255\272=\3550:\263}c:|\321\331:\037\234\024\273\2036\247\272v\244,\272c\301 :\005\302i::pU:\364\230\3319\224>\023\272\2154\227\272\321\2479:21\270:K\36219\234\026\256\271\373\233b\272\n\321\033:\314\252\307\271\033\215\334:L*4;\375\300\275\272U\206F\271(\337\317\272\"\230\303;\334G\206\273o\265\3379>D\235\272t-\027\273\354\334\236\272\324\253\2159\347\352V;9Se<\363\327\267\272m\267)\2734j\201\273\274\035e\273\027\016\001\273=\333?\273>\273\013=w\254\341\272\326G\007\274n\310N\273q\315)\273W\272\321;\216\214\333\273\203\362\037\274\244\222\222\273\271\224|\273!\251\213\272\255T\000\273Z%\225\273\200\217\304;\240=Z\270[\242\005;\r\350\353\272\t\006w\273+\322\006\273\332s\354;\027\004P\273\374sS\271\322\205v;6\223:\273\346_\024\273ia\";*\0047:\234*#8\253(\2479\357(\234:b\235D:\005\260\221:\257\224+<,\0277;\253v\242:\034\327C\274\3564\214\273t%&;\334H@\273x(\2129W<0\273gt\323\273\013\211t:\333E3:`u\346\273\302,\330\273`\341\335\272\213\323\0219]n\231<\010k\202;.7\367\273\206\3006\273\227S\371\272\230\025\006\273v\313_<H\323\3109\022\225\325\272*\371&;)xk\272\240Ue9\014rY<d|x\272\352E,\274b\242\200;\331b\267\273x\303\373:n\'1<\006Y\265\273\200]T\273,@\206\273\342\307H:\334\277\3419\261;\r\273\\x\002;\205\023\310\271\313N\362\272\364\247\334:\211,\n\2724\213\'\271\2550S9E\341\373\270\003Yy\272J\367\217;\321\255>\273%\315\3329\372\314\2309\320\235\324\272\364\243 \273x\356\027:\201x\033;S\005\014\273\003\241\024\272\231\r\243;\360LR;\334M\022\273Z#2:?\346!\273\346+\210:=\243\362\272@N)\272\223R\014<\023\215e\273\213\255C:M\r\200\272\016\324~;kD\005\274]\254\360\272\340\203 \273\021\032\247\273p*\020<\201\037?\274\350}\261<\214:i\273\010f\231\273\301\207\007\274\3325@\274d\330+\273\314}%\274p6Z;\204d\342<C\366\221;\337k\234\273\214z\212<\243\243\035<\336\207\235\273\336O\203\274\032\321}\273\345\345\203;\221{\263\2734\';\270\204$\361\273\356I1\273\023\207\376<\301E\r\272\010\020\240\272\2268\362;\351\265\031\273)BS\274.w\366\273k\020~\273j\236\221<L\215\0069~)\373\273b\260\3049U\333\202\273\376\214S\273\330\001\006\274\305s\261\273\256Vp\274\271\275\313<\246{#8ZF%\267\213I\32486\034!\267\300tg8\277\347\313\266\366\371@\271\341\016a\267\211y\0378\265\357.\267w\221o\273Ro\225:\363#W:LQ\217;\205\276%\273~\337\276\273\366\326\2129n\331\\;\033k\335\272\004mt;\261K\336\273\tcP\273\220\334\2767\305\333\2128\"o\"<\010\361o\273\r\365@;\326\240i\273FK\2739\247\234~;0\r\372\271,\363\313\272rv\364\270\002\2153;]Fc\271\310\206\241\272a\371\026:3\207\217:\342W\2328\362+h\272\231+\361\2738}\035<<\332\037:MR\014:8\220\354\272s\245\223:H\023\270\273\245\217k;\341U\374\271\260\037a\271v\322\207;\277\022q\272(>\017\273\216>-;\267\246E\273\000\374s\272O(r\272\343\302\026;#r\356\271\252\225,\272\225\323\3649\330\336\2115A|\301\272\331C\360\272\271\230c\272\375b?:UC\013;\372I\253:\332_\006\271~\004\252\271\306\363\216\272`1\023;Rp4\273\312\t:;\264\342\367\272\371\004\237\2722a\230\271Lc\214:\\]\2619\344FC:\371\013F\2732\230\027:\357\003\211\273\302\352\002\273\227\020\004;\037t>\272b\\\302:\004\346\306\272\257\036\333;\345\2612:\352\037\010;\267\337\005\273]\207\221:<\247\3768C\266\315\271\305?\217\272\266\354\034\271\214\253\34594\236{9\024\030\242\271>HI9\204\025\t\273\2667\202\274\333\017\250<}\276`\273r@Q<\256\305\335\273\246\023\261\272A4A:\253]\231\273\314\337;;E\014\334\272r\350\361:9~\t\274\244\212\335\272#\334K9\353\306H<O\327\327\272\2408\350\272=\341\001\273Y\303\t;\373,\311:\343\033\010\272|\375\013\271!s\337\272\302u(\272\336f\2069\277t\210\272\'\200\233:/\035\210\272\363e\211;\273\013\315:\026.\223\273\352#\213;8\203\271\272\303\234\237\273\355\306+:\253\376\031\272\225\371\264:\325\3040\2725\244\335:\355?\254\272i\2227;+K\212\273\356\206\3028\301\017\372\271\025\r\200\271\242.\200:\250\003\2259\036K\3269\004\250L\273\3055Y\273\033f%\274%\016 \274m\246\335\271D;><]\001\236\273\270!\322<R&\255\273\255@\342\271\004x\344\273\247 ;<o\243\251\273\006\366\302\272lz\202\273\240\007\354;\244\235\306;\3508\022\274\376J\\\272\022\3200;\214\024o:\305\267\2209\247\314\362\271\242V\2219h\340\2659I\340\3129\357qI:\342d\025\273\337\223\370\271P\253\2029\217 q\273\335\221l;\312\010\266\271\\O::Q\307\223\273\244\036\2669B\033\270:P;\013;\224z\371:\007\377\314\272\214\371\325:^\277\n:\334\212\200\273\251\337<<\274t\270\273\237\265\344\273H\310\0039\r\313\240\272\003,x;\274\300#9\017\327[\272\307g\014\273\271\372\010\274\0214\223\272q\305\007<\236\266\027\273\372M\3079\234\334p<\362\025\231\273~V\200\273\255#^\273W\014\303:\214\271J\273KI#\273/Wo\273{\376\177\273\310\373\001\2720bo\273\274\333\324<k\203\336\273*\333O\272)\004_\273\341\352\210;\316\330v\272Q\236\220\273\325\016m;\233b\217\273\333\254e;\'\352\234;\307\262\r\273\003\321|\273\2507\361\272%\340&\273\242>\223:\216\215\005\273\354HT\273\264\347\3119\300h\017\273b\332\017\273DQ\206<\303`\251<\203\355t\273H\340m\272\260p\202\273\024\000\306\271\345\366D\273|]G;\t\324C\273*\346%\273\333\325\312\273vi\336\272T\333\000\272\272O\3409\375t\324\272\205~\034:\014\371v\270s\261\003\272\313\352\251:D\240\242:\303\374S:\002\365\014;J+\314;\312\310\204\273d\201\";7\020\203;\300\273\325\273\"\333(\273\207\223\237;\200\310\305\273\334}\016\272\023\250\2269\022\257Y9\307\227/\273.\307\r:l\235\361\2728i\222:\202,\037;\302wZ:zA\027\272wO\245\271\227N\010:\240\250\n;\322\177\3509\034\206\023\271\363\213\244\2722\266\205\272\226\322\032:\216\233`:\301\332\251\272\260\203U\272\361\220L\273\014\n\311\272T\350\253;d\331Y:\316:r<GO\327\273\332\" ;\252\000&\274e\"\253\271>s\324\272\017\223\201;\334\221\013\2723JL\271/\032\214\272~\377\023:\027\341y\271\312&\2548\240\244\r\273\035\273m\272\271\201\003:x\032)\271\036\341\227;\311\001-\272\374\376\261;,~?;\241\366\332\2732;\017\272h\022\003\273\324 \036\274\273\362\333;\016\363/\274]\300\272\273\375\315\t<\316 \365\273*\253\027\2737o\263;\362x\241:\311\303\225;rlO<Di\310\273\320\262U\271.7\n\273N\025e9\307\021\030;\341\203\332\272\206\337\377\267\256\326H9Jk\032\272<\016\246\272q\205M;\311\317\3519\010\232W8\223\370x;9\3131\272\312(\261\272\327\244b\272\224\315\3777A\307\334\271\006\246\317:H\032*\273M\370\016<o-\322\272\225+p\273.\314\247\272)\032\271:\3008\230;\313\317A;\262!\244\273Z\327$\273flp\273\272`\346:\355\357\304\271\251\211k\272\312\225\2657\303\323\261\270\322e\3017\321\335\3778\024!\260\270\253E9\272WCl9j\203\242\271\371\232r\271:\260U:w\355\246;\353\261\013\273@\351\251\271[;\037\273\001\262Q\272Ou\234:Z\302e\272\270\233>9\032W\206\270 \364\3219g\330)\272\326$\275\272\350/\365:v\262A\272\373{\3229/\352\2039\262Q^\271X\360\022:\2074\005\273\227\305I\272o\321&\273!<\260:\332\005\273;\275\213#8jb):\313\311\205\272\263\340\374\2729\341V9\335\306\0168R +\271C\n\2659\225\036\341\270|\306$\271\0064#\270\205UL\270T\213\326\270\262\332\0138\362\347\021;Pc\303<\217\326\023\274\357\204\014\274d\3134\274\225\210[<\324\337\3159\336\3403;t\005K\274\026!\320\272<\366l\2726\310\203:\333\223\272\272 \007\033;~\022H\272@\257]52\200\223:\r\352\257\272<\2147:^lH\272\250b\301:|\263$\272\002\231K\273\347y\004:NZ\236\272v\265J\272)\240\243:\311.\317: K\262\271\002e\243:\324w\006<\217\032\200;\274V\211:\304\302b\273\254\r[\273\340\232\275\272\371\200\003\273\276\257\306\272 \260O8\322\017\275\272/\325\'\272-JD\2718\332\3459\241\2413;\357e\317\272B\324\206:\355\024\217\271\322\357(\2723\'\342\271Z\265\353\271\t\372v9\300f\020\272\300\221\2168\337\206\3649t1B9\200\305F:\346\251\007\272\031\313\212\272t\2773:B}\216\271\036^\000;\244\247n\2711\026\271\272J\304\331\272b\224E\270)\020r;\233=\252\272\245\343\0059\224\211j:\023\342\004\273\330\214\032\273y\022\247:\221\3145;~\312\t;\010|\025\273\213\257\362\272\205\220\014;\356\237\2308\3277\005:(c\034\273U.\355;\010\372\334\272\260\237\321\272?\374\364\2721\227\220\272\035\224\021;\004\313\255\273\023\014\216\273nr\272;\344\273C:b\204\026\273v\364c\273\302\345\233\273\2741%<H\\\306\272\"#\375\272\034R\323\272\275\364\033:?\252.\273\237I\370;sT\277\272\002m\230:\027\304\304:\3127\237:\214\356\271\272\013r\376\271J\245\251\271b[\2049\2276\31294\211Y\272\037\276\'\272b\263y:\343\355\363:\260k\261;\271\177\332\272e\222N\2731^\252\272\347\205\316:@p\363\272)\252\233\272D\311\205;\200^\212\272\032\277\016;\034\t\312:\005Y\221\273\014\002\315\273\nz\007\2720\037\246:M\367@;\3315\2039j\007#\274\235\312\302:Z\263\235\271L \260\271L\017><\254\351\302\272\255\243|\273\001\254\027<\340\321g\273\002\220<\273\374g>;\017=\307\272\256-W\273x\035/;<\340\023\273\021z\\:,\023\275;$I\223\273\240\271\263\272\306DA:\202\326\327:<\247L\271[\226\2029\340*\210\272\244l\315\270\313,\2649)\354\21391\372M\272\355\363;\272\025-\2549\360\324x\273\273\016\227;\354\024\323;\352\230\243\273t\022<\272\316\312e\272\376\005p:\246\253`\273t\374I;\r7\242\272)\366\301:T!\025\273\226K\027<\010\356\3469?\307\340\272\203\267\234\272\202\271B\2734\311Z\272h\305\223\271.\204\366\272\2421\201<X\277\246\273\335A\243\273>\306 \273\341\341i;\231\330\323\273\374k\305\272\014^u\272\335&:\273\225\237\241;- \273.\266\2529\005\023\203\272\213\010\014\272\321\'\225\273\306\3433\273\204\002\016\273}\231R\272\344P>;\031x4<\352I\207;\255q\242\273)\370\225<\230\222\2629\316j\037\272uDl\274\030l~\272\323E8;\351\303t\273\325\371j\272\2111\205\272\241\356\203\2735 T<\240p\300:\266w\330\272\354I\037;\313~\334\272G\031\352\273E\337^\273\010\000\010;3\251\216;y\021\240\273\336=\037\273Lf\n\273\202\216\020\273\264m[8\014\264\252\273E\355\240\272\257(\227\273\255\270\225<9\242\2559\315Y\300\267\024\032\324\270\357\232\211\270\210,\n\271El\231\271-ez8b\026;9j\036\0238\002\360\"70\024\3509r\274\252;b\200\205\273f\221\271;\030\351B:\206\221\350\273B\035F:J\241\016:6\214\256\273\r\264M;/r[\273Mz\317\272\364\023\360:\277\347o:\000.\334:\261W\017;\336Ni:\203\331\204\273Q\034^;\240\000\372\272\007\313*\273\t\350\264\271<h\214\272\2028o;\027\227;;\250$i\273\026\35039%\2446:\257\315\272:i\272\240\272\030j\300\273!\211\345;\022\341\322:a\267\'\272\270\265\"\273V\030-\272\010?\203\273\214[ \273ph%;\013\326\242;\354F>\273\274\322P\273K)\177\2739\234W<\231I%\273}\270\005<?\n\033\273Q\310i\273Vn\203\272\026y\366\272\305l\277\271u\212\026:\317\002-\273\347\251Y\273\350\273\2359Jd\210;V,0:\345\277\272:\314\037\014\272\375[\217\271\033\370R\273\360\037\250;\260T\330\272\201k\300:y\210\234\272J\344\360\2712\344\2149]$\301\2727\214A;\355\021\357\272\2427\026;\235\024\274:\'\"\004\273\255uR:\027\334\201\272\342H\264\272\305o\262\2714\240\214\272\233\220\262:\252\236\312\270%\301\247;_\311\034\273\'\021\031\273\356\013\241:CM\271\272\370\260\325\272O7b\273\340\377\201;\245P\227\27251\007;\211\340\231\273\2577\237\273\nz\302\272\217\373\214<\364\027}\272P\036\261\272\257\001)\274W\230\027;~_a;\376\3239:\372\024l:X\316\240\270\235{q;l\263\365\272J\326\223\273 /\333;p\320Y;\226\216\251\273\023\r\316\272\033\376\265\272\364Ft:n\030\2728:`\003\271hR\220:1\222p\272\236qC\271\220\261\232\267b\017\210\272\007\216r:\365\201G\272c\372\240\271j\367\007\272\013\036\344;\257N~;\026\366\202\272vs\016\274\243\320*;\321K\262\272\341\277\037;\245G\203\273\320\260C;\276\367\234\272\213\'\3109/\277\300\271hh\3729\301\251\n\273k\2411\272\364\263\252:\255\354)\272\326\273\023\271\3057=;\350\262\267\272?\320\354\272\023\213\002\274P\205p;\222\321\303:bp&\273\004DP<\023\201d\273\263Vi\273\357\201\312\273\255\\\320;\260\305\217\273\316\313\215\271\2204\367\272\224k\";<\252K;\377\272:\273\260T\213\271e9\177;\265\037\210\271^\247\033:\352T\231:n\302&\272\374\010\302:\211^p\272kG\303:\301\353/\273\356]\300\271\024s\0109\n\346\036\272T\271\r:\nuI\273\252|\021\272\255\233\207:,J\225\271\023E4;\372\304\200\271\016\323\253\271\346\"I:6\210p:Lp\226\2714<\003;\232\353\014\273\026\311N\272\232A\305\272\276\255\024:A\246\021\273_\240X;_\216(99j\326\272\023\337\207:zd\247\273\265]\026\272\251\026\364;a\032U;M|\365\272\224k\215;S\\\377\272W\006\241\273\250\2378\273\302\262\305:\004\177\235\270\267i\247\273\'\031P\273Km\032\272\353\355\3139msy\272\206\214\210<\224\245\277\273H\252D;\365q\230\273\241\373\305\273\035\344&\273)\007\214\273\036\306\337\272i\256\300\272\"\2313<\255J\303;\364\216I:\225W\302\273^L\344;\234Z\223:u\033\340\273T\006\206\273\354\220\226:\204\333R\273\244\234\376\2725\332m\273\377l\206<\367\220\237;\262\203\251\272\315\303\200:%\347\205\273\0178\257:J\024\277:X|\346\272\203Q\023\273\2454\320\271\257\370\224:?\271\334:\225\257\3629\352\2273\273\020\251\036\267h\254V\271\313\237\314\272@\026\354:\374\252\271:[Zt\272*\226\2568\304\334\225;Q\261\014;\037\345\212\273\372 \247;\244B\346\272\303b\340\273\216\210\205\272\265<%;h\035\213\273s\306v;\\$\226\272\212\014\232:\327(\265\272\030\2117:\375\000%\273s\273\267\270[M&;\256\367\257:\2729\006\272\022\275\347\270T<\242:\277\023P9!\013\273:\204\007\000:\327\330\301\272\\\0317\273\016E\304:D\373\314:4Gs\272\306\336\236\272a\361\345\273nr\006\273\r\327\346:a;\323\2734\324+<\261\322\250;|0\033<_\314\022\274\273oc\272\222\224\310\272\355x\364:E\331c:\366\366\n:\022!\022\273\235\2143;\371\232M\271\224q\327\271>8\315\272\325\325f\273\204\273\373:\304\203C\273t\301~;:\324\027:;#\242;\354\254~:~\344\256\273\252\221\220\272a\032\223\273J\301\021\273\274q\274;\214\357\221\273\n+\365\273^o\035;\344\355Y<\334V<\273\rY\322\273kQ\035;\307\202\033\2724B\335;\323\335F\273NT\255:0=\341\2720\013\335\271\"\326\343:\004d/:\216\327*:oA\245\272\271\326\253\272\270\304\270\271<\0023:(\032\005;\335\243J:\366E\356:\246\017u\272F\037\014;\336\375Z\273\245y\022\272F\365\3439\374\177\314:\357q\200\273\031g\231;L\203-:.\316\223\273\224\0174\273\016~\327:\316\307H;\032\241\3719\265\264\213\272\232\034\367\267ru\016\273\200I9\271\301\"(:\002\316\036:\032.\201:bn[9\256\266\261\272+\245\3249\2535\213\272\322\003\360\272\276\333\316:\304|\2259\256\212\211\271\310\263\031;\026C\023;&\215\t\273\027\241Y\271\236#\203\271\332\333W\272Z\260\365\271\326\361H\272\205\330Q\271q\260\310\271\037u\212\271\233[\325\272r\305\315\272\361qB;\2467\244\271\266\310-:7\354X:\312\256\320\270J1\2239P\2574\272\202\211\313\272\020O\352\272\r\340\307:~[\t;,\021\370:\357\240b\272\344\217v\267\223\rh\272aA^\270\343>\3617\000\34587\010\210\3057\322\302\3477 \232\360\267S\006\230\266\332qF7]\234\003\2677\221\022\267\223\3669<\024q\037<\377\366L\274\2430,\273\271\300M\274\343tB<W\223M;\010p\200\272\255\203S\274C(\264;\347Q!9\037\343\201:\030\335\020\2733\342G\273\250R\032;6\262\253:\274\265\344:\261\316\250\272\022\372|7P\325\261\267g\034\027;1]\253\272>>\016\273\374&\024\267\372\311&:]\320\353\271\235\303\205:\004\025\353:\201\370\200\272\311\017[\272\010)y;\271Yf:\r\277\313::P\014:\023T!\273f\366o\273*\230l\272.\265\014;+\354\314\272c\r\254\271\270\352P\272\372\276\2528\235\322!:e`1;c]\221\272\307\233\010;O\366H\271\223\306\267\272\372\336\025\272\n\346\271\272\370\226\331\271)?r\271=\3466:j\001\0169\337\277\0259t\361Z\271I\026\202\271\336G\3719\306\351\252\270\003\365\213\271z\321\";\347?\213\271\020\236\211\271i\027\025:\004\213\323\271\225\\\312\271\r\327\272\272\270\036\31294\031\2159e\350|\272l\344\316\272\335\345];H\177_:\374=\327\272N\266v\273\315\314\025\273\244y\246;\t\237\361:\332\203e\271\3707\333\272\307\2543\271p\014\204\272\214\277\022\271.\033}\273)\312M9\306\230\357:\n}\324:\330e)\273\204+\331:\266p!;\311\373(\273!\375~\272\265^%\271\210\255\020<\325\2176\272\276\003\365\273\254\320\275\271\004K\254\272\324\\M9\016\210\224;\202 \227\272\355\346\221:\177\271\367\272:\010\022\273/L\301\270\205\317:\272Gq\n;\240\251Y;nmP\272m\003\2529a\236\364\271B\025+:>j\r;\033T\234\272\257\033\254\273,H\262;b\022\200:\221\014\254\271\317Y\216\272v\225l\272\303X\202<\231\344<:\261\314B\271\006S\010\273E\202\255\273\374\005\r\273\357!\360\272\377xH:Fr\333\272\213\344\210\273f\303\3777\243\237\021\273\254\330\026:\020\253\202;.\222\266:\272\303\026\273(\267K\273\362;i;\350\360&\273\231\352%:\276\321-\272C\340c:kF\340\272\223Dn;n[^\272\312\333\355:w\371\034\272\302\260\352\272v\034\307\272\304\243T:}A\220\270\206s3\271\016\335\2159v\203\322\271\327\032\202\271\226\030X:\233\260\037\272\007\245\237\267 r09\260|\2059\276\004\276\273l\220\006<\315\214\334;]\214\342\273,\356\035\273e\245Q\273:\014R;\004\301\"\273\366\335\245;\315\254\035\273\261V.;N\230\036\273\373\2729<#\375\320\273\035\240\027\273\340\273\034\273+}?\273\333`,;\2214\265:\017\244\317\272\234)\355;\033`,\273x\202\353\273\226\013\3708N\306~\272m\371\320\271\346\311\'\273\321V\006;\026\337@\273\"\272\356;\036\202\026\273\376\004L\272^]\213:\255\035\260;Z2\363\272\365\01079?\305\377\272r\204\364:`\225\300\271\016\026\241\272\214N{;\341\366I\273\2715\r<\003`\352:\036\t\000\2736+\000\274VJ\277:\347\312\350:,|`\273\360m\234\272J\362\025<\\qw\272\313$C;d@\302\273\276\260R;D\nb:\2232B:\332\223\267\273w\003r\273=\226L\272z\371\025\273\014*\241:.|\353\272\"^\221\273\2112|\272\227\014\242:X+D\273\211\201P;$\036\340\272x\347\013<R\346\2119\207\300\033\267\002O6\270D$\335\270\356C\323\267\377\303\220\271\234\356\2579~\025\310\270\222\205(\270\365\317j\266ex]\273\t5\216;h\214\034\273r}{;\253u0:\345\360y\273\274<\222\272Vp\021;!\355\205\273\020ys;\376\320\253;\355\356\201\273\001\251\3239aLx\273b|\2409\204\263l;zz\320:\204Y\320\272&\2532:-k#\273i\nL\273\220\224%\272\240\214}\273*\244\255;\321UG;\343\371\001\273\327_\005\272\022\177L9c\352\321:PQ\003\270\000&\363\266\242\202\354:\r\014\260\272@\355\031\266\346\202\014\273\321\376\341\272,\300\244\272\242Y\343\272@\225W;\276^L;\'\264\t\274\025\347@\273\312\275\206\273 \347o<j\377\025\273\273\315\220<\251\203\264\273,\245\266\273rp\217\272\263\315\034\273\030\027\264\272\035C\030;\221\014\270\273N\373\210\272\366\271\014\272\006d\004;\225\341 ;\030\367\202;\211\236\344\272y\267\372\271+\374\021\274\254u\n<\352\215\341\272\206\360\235\273\314;5\272\332\234;\272Px\t;\253\006\360\272\373v\300;\020\225\024;\356W\010;^\240l\2724#\014;\370K\223;\354\362\\\273`H\276\271\252\237\354\271\263\214\251\272\364\246\322\271\232\377\374\2721\346\330;(\225*\273\251L\004\274R\013\355;\365\367\247\272{\314\262\273\027`\363\273\266(\004<\260 \200\272\032\247\177;G\212\214\271\336\024\221\273\023\017\':cF\2219F\007\304\272\226v\013;\360SC\273\327\310P:7\037\221:\316v\212;^.\363\271u\352\255\272\235V@;8\311\273;\'X8\274\021\240><a\363\273:\251\342\006\274\250\313\2667\216R\374\271\323<\3439\244+[8l\355\302\271\037\026\354\270\271\202\3008.2\2669\314*V\271\336\317\3308HlM\271+\325\026\271h\220>\273E\232I;\222\267\234<n\255z;7\344\334\273P\336i\274jn\024;\266\221=;\261\240\271\272\361\260\277\273?\354K:\306Q\363\272\326\265\315:v\205 ;E\356\022\273\343V\\\272\306\240~\272\320\177\023\272\300\276\240:p\321\3539\266Q\317;;\321\212\272\321\021\230\273\354\346s\272v\301\207\273~| ;J\361\325\272I\356\033<\"\354b\273x\353\036\273\334\331\221\271n\255\2269\027\254\016\273\334\275\020\273e\340d8\212\263=;<\332\021:i\327\301\272\013\223\003:\2721\362:\202/\216\272\\3\010;{\273\322:\250\251\353\271 M$\273~\241\016\2733-\3578\r\271\222:Ba99\343\374\222:\254\373J:]o\2009J\005\337\2712\235f:\370Q\2009\353.E\272J\321N7X\221\000\272y\231\323\271=)\261\270\260\352\217\271\312\2700\272\021.\037\272;\356.:\364^\313:\241\311\005\273y\260\310\272\032\351\245\271\227\201\210:6*\017;B\317h\2727\377^9\226\322\222\272\362\272\326:\212\326#;\262\024Y:\355\206\355\272\246\323\233:Wk\340\271\374\344\n\273\253\371\237\273\306\2527:\201\261\0079\227\255f\273w*\244:\313\371\001:\232\205\000\273Y\315\357\272\216A\001<\263\032\342:\222>\211;\345\010B\273\372\016\232\2736{\004\272KY\3439\262\370\003\274\254M\234\272L\367\322;\276E\343:\303j\227;\302\\\343\273t\223D<L\214\020;\306T*\274:\021\026\274\016L):\306.G\273\273\210[\273B\203\247\273@\234\275<\000\204\227:\354@\221\272D\016P;\377.\270\272cc(;\2703\330\272W\315\007\273\356\030z\273\020\202$;$\t(:\204\373\357:\337\365\1779\227\304\014:7\233\305;4\352\233\272\354A\301\273`1\244\271X\021l:\354\264\253\272\006>Z\272\0047\211;\007h\002\273\242\017F\271\257+\240:\352\254G\273\227\224\254\272\234?\272\272\352\242\2509\017m]:\353\330\265:(\222M\2727\003\265:r#\2359\274xR\267qnt\272\343\260\351\270\034\177\242:\340r\330\2711\n\004\272\030I+\271\032\260\017;6\253\027\272Q|\010;\016\350\352:%\006\204\273^k\237\273S\355<;\220\203\030;M\311\226\272H{+\272\315\027\227\273\341+\2449\224>.;\316l\002\274\263\225\260;\346w\002<\371\206\257;Q>\315\273\277\243\220\272wW\344\272tW+\273F\225};\313\253\201\272W\333\022;\036`\213:a\3451\273\334\227\202\267\250kn\271`\332\301\272\230,c:\327\211\210\273\303r\014;t\231\357:\237\233\252:W\354\206\272\000\322\0249x\231t\272\002\336\217\272\370\013i8;A\346:ps:\272\010f+\272\313\253q\273%\217Y<V\326\377\272w6\276\273\225u[9a\322\321\272g\307\212;M\037Y\273\324\245\2649\324\256U\271\210X\020\273\246/J;\023\340m:\014\233\276:\236\243U\272\177\3070\2731\277d:\226WC\272\355B\353:\255\271d\272\222^\\:&\232\270:\231N\002\272bY\207\273\201\034\355\272\334\356\270:\330\027\226\272^\221D;\304}\217;Y\037\007\2715D\210\273|\365+;v\314\t;>H\207\273/e\000:~\031\277:\304=\334\271\357P\021\273K\"\006\273T\253\3509}\'\002;8g\270:\324?y;\353\320/\273.V\371\272\375\375\211\272z(\275\272D\002\303:\001\234\3339\224\226_\273>\031\231:vdO\273\243E\213\273\276\217\250:\213\257\005;\333\246\234;GtT\272f\274\376:\205\350\3339\304\376\204\272s)\026\272\016\322\360\272\023vU\272\201!5;\361CP9\370/$\272\021O\210:o\217\3509\344\020Z\272\274\0276:\312\246\270\272\360\307\020\272Q\245\232:M\222\2778]\177{:+6E\272M=R:\277\322>\271^o\001\270k\2026\266\031\236\0108\344\356\002\267\211\352:\267$i\1777\212\253\320\266\230\231\2367\261\0255\2666\204\260\266\033\261#<\004\014.;%\014h\274\234\323\210\272\254\314\270\273\267\332\010< [\305\272jn\3369\0269\341\273:\245\377;" + } + } + } +} +node { + name: "ip1/weights/read" + op: "Identity" + input: "ip1/weights" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@ip1/weights" + } + } + } +} +node { + name: "ip1/biases" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 10 + } + } + tensor_content: "w\211-\274\n\017\206\277\367g\016?\244\245\237>\262.\273?n\331\344\275\035n\371=\202\233M\276\356\356\313\276\337\370.\277" + } + } + } +} +node { + name: "ip1/biases/read" + op: "Identity" + input: "ip1/biases" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@ip1/biases" + } + } + } +} +node { + name: "ip1/ip1/MatMul" + op: "MatMul" + input: "ip1/Reshape" + input: "ip1/weights/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "ip1/ip1" + op: "BiasAdd" + input: "ip1/ip1/MatMul" + input: "ip1/biases/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "data_format" + value { + s: "NHWC" + } + } +} +node { + name: "prob" + op: "Softmax" + input: "ip1/ip1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +library { +} diff --git a/tests/TfMnist-Armnn/Validation.txt b/tests/TfMnist-Armnn/Validation.txt new file mode 100644 index 0000000000..175778ff07 --- /dev/null +++ b/tests/TfMnist-Armnn/Validation.txt @@ -0,0 +1,1000 @@ +7 +2 +1 +0 +4 +1 +4 +9 +6 +9 +0 +6 +9 +0 +1 +5 +9 +7 +3 +4 +9 +6 +6 +5 +4 +0 +7 +4 +0 +1 +3 +1 +3 +6 +7 +2 +7 +1 +2 +1 +1 +9 +9 +2 +3 +5 +3 +2 +4 +4 +6 +3 +5 +5 +6 +0 +4 +1 +9 +5 +7 +8 +9 +2 +3 +4 +3 +4 +3 +0 +7 +0 +2 +9 +1 +7 +3 +2 +9 +7 +9 +6 +2 +7 +8 +4 +7 +3 +6 +1 +3 +6 +9 +3 +1 +4 +1 +7 +6 +9 +6 +0 +5 +4 +9 +9 +2 +1 +9 +4 +8 +1 +3 +9 +7 +9 +4 +4 +9 +2 +5 +9 +7 +6 +9 +9 +0 +5 +8 +5 +6 +6 +5 +7 +8 +1 +0 +1 +6 +9 +6 +7 +3 +1 +9 +1 +8 +2 +0 +9 +9 +9 +5 +5 +1 +5 +6 +0 +3 +9 +4 +6 +5 +4 +6 +5 +4 +5 +1 +4 +4 +7 +2 +3 +2 +7 +1 +8 +1 +8 +1 +8 +5 +0 +8 +9 +2 +5 +0 +1 +1 +1 +0 +3 +0 +5 +1 +6 +4 +2 +3 +6 +1 +1 +1 +3 +9 +5 +2 +9 +4 +5 +9 +3 +9 +0 +3 +6 +5 +5 +7 +2 +2 +7 +2 +2 +8 +4 +1 +7 +3 +3 +8 +9 +7 +9 +2 +2 +4 +1 +5 +5 +8 +7 +2 +5 +0 +2 +4 +2 +4 +5 +9 +5 +7 +7 +2 +2 +2 +0 +8 +5 +7 +7 +9 +1 +8 +1 +8 +0 +3 +0 +1 +9 +9 +4 +1 +8 +2 +1 +2 +9 +2 +5 +9 +2 +6 +4 +1 +5 +4 +2 +9 +2 +0 +4 +0 +0 +2 +8 +4 +7 +1 +2 +4 +0 +2 +9 +4 +3 +3 +0 +0 +5 +1 +9 +6 +5 +2 +5 +7 +7 +9 +3 +0 +9 +2 +0 +7 +1 +1 +2 +1 +5 +3 +2 +9 +7 +8 +6 +3 +6 +1 +3 +5 +1 +0 +5 +1 +3 +1 +5 +0 +6 +2 +8 +5 +1 +9 +9 +4 +6 +7 +2 +5 +0 +6 +5 +6 +3 +7 +2 +0 +8 +8 +5 +9 +1 +1 +4 +0 +3 +3 +7 +6 +1 +6 +2 +1 +9 +2 +8 +6 +1 +9 +5 +2 +5 +4 +4 +2 +8 +3 +9 +2 +4 +5 +0 +3 +1 +7 +7 +3 +7 +9 +7 +1 +9 +2 +1 +4 +2 +9 +2 +0 +2 +9 +1 +9 +8 +1 +8 +4 +5 +9 +7 +8 +3 +7 +6 +0 +0 +3 +0 +8 +0 +6 +9 +9 +5 +3 +3 +2 +3 +9 +1 +2 +6 +8 +0 +9 +6 +6 +6 +3 +8 +8 +2 +9 +5 +8 +9 +6 +1 +8 +4 +1 +2 +8 +3 +1 +9 +7 +5 +4 +0 +8 +9 +9 +1 +0 +5 +2 +3 +7 +2 +9 +4 +0 +6 +3 +9 +3 +2 +1 +3 +1 +5 +6 +5 +2 +8 +2 +2 +6 +2 +6 +6 +5 +4 +8 +9 +3 +1 +3 +0 +3 +8 +2 +1 +9 +6 +9 +4 +6 +4 +1 +1 +8 +2 +5 +4 +2 +3 +4 +0 +0 +2 +3 +2 +7 +1 +0 +8 +7 +4 +4 +7 +9 +6 +9 +0 +9 +8 +0 +9 +6 +0 +6 +4 +5 +9 +9 +3 +3 +9 +3 +3 +2 +7 +8 +0 +2 +2 +1 +7 +0 +6 +5 +4 +3 +2 +0 +9 +6 +3 +8 +0 +9 +9 +6 +8 +6 +8 +5 +9 +5 +6 +0 +2 +9 +0 +2 +8 +3 +1 +9 +7 +5 +1 +0 +8 +4 +6 +2 +6 +7 +9 +3 +6 +9 +8 +2 +2 +9 +2 +7 +3 +5 +9 +1 +8 +0 +2 +0 +5 +2 +1 +3 +7 +6 +7 +1 +2 +5 +8 +0 +3 +9 +9 +4 +0 +9 +1 +8 +6 +9 +7 +4 +3 +4 +9 +1 +9 +5 +1 +7 +3 +9 +7 +6 +9 +1 +3 +2 +8 +3 +3 +6 +9 +2 +4 +7 +8 +5 +1 +3 +4 +4 +3 +1 +0 +7 +7 +0 +7 +9 +9 +4 +8 +5 +5 +9 +0 +5 +2 +1 +6 +8 +4 +8 +0 +4 +0 +6 +1 +7 +3 +8 +6 +7 +2 +6 +9 +3 +1 +4 +6 +2 +5 +9 +2 +0 +6 +2 +1 +7 +3 +9 +1 +0 +5 +9 +3 +1 +1 +7 +4 +9 +9 +9 +8 +4 +0 +2 +4 +5 +1 +1 +6 +4 +7 +1 +9 +4 +2 +4 +1 +5 +5 +3 +5 +3 +1 +4 +5 +6 +8 +9 +4 +1 +9 +3 +8 +0 +3 +2 +5 +1 +2 +9 +3 +4 +4 +0 +8 +8 +3 +3 +1 +3 +3 +5 +9 +6 +3 +2 +6 +1 +3 +6 +0 +7 +2 +1 +7 +1 +4 +2 +8 +2 +1 +9 +9 +6 +1 +1 +2 +4 +3 +1 +7 +7 +4 +7 +0 +7 +3 +1 +3 +1 +0 +7 +7 +0 +3 +5 +3 +2 +9 +6 +6 +9 +2 +8 +3 +4 +2 +2 +5 +6 +0 +9 +2 +9 +2 +8 +2 +8 +8 +7 +9 +9 +3 +0 +6 +6 +3 +2 +1 +5 +2 +2 +9 +3 +0 +5 +5 +2 +8 +1 +4 +4 +6 +0 +2 +9 +1 +4 +7 +4 +7 +3 +9 +8 +8 +4 +7 +1 +2 +1 +2 +2 +3 +2 +3 +2 +3 +9 +1 +7 +4 +0 +3 +5 +5 +8 +6 +5 +0 +6 +7 +6 +6 +3 +2 +7 +9 +1 +1 +2 +4 +6 +4 +9 +5 +2 +3 +3 +4 +7 +8 +9 +1 +1 +0 +9 +1 +4 +4 +5 +4 +0 +6 +2 +3 +3 +1 +5 +1 +2 +0 +2 +8 +1 +2 +6 +7 +1 +6 +2 +3 +9 +0 +3 +2 +2 +0 +9 +9 diff --git a/tests/TfMnist-Armnn/simple_mnist_tf.prototxt b/tests/TfMnist-Armnn/simple_mnist_tf.prototxt new file mode 100644 index 0000000000..f8573ec23c --- /dev/null +++ b/tests/TfMnist-Armnn/simple_mnist_tf.prototxt @@ -0,0 +1,117 @@ +node { + name: "Placeholder" + op: "Placeholder" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "shape" + value { + shape { + } + } + } +} +node { + name: "Variable" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 784 + } + dim { + size: 10 + } + } + tensor_content: "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\222\n\007\222\000\000\000\000\0102\346\276\000\000\000\000\0102\346>\000\000\000\000\000\000\000\000\316\232\373\216\000\000\000\000\000\000\000\000\320\330\223\222\000\000\000\0009\006|\277\000\000\000\0009\006|?\000\000\000\000\000\000\000\000]\273\211\217\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\250H\311\210\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000p0\206\206\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\310R\027\225\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000/0\007\275\000\000\000\000.0\007=\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000{\226\024\246\000\000\000\000\231\222{\300\000\000\000\000\232\222{@\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\311U\277\273\230\350\276;\000\000\000\000\022\002\000\301\000\000\000\000I+\001A\225\224\224\275\332\221\000\233\000\000\000\000\000\000\000\000\220~\221\276~]\221>\000\000\000\000\224\365\034\301\000\000\000\000\215\346$A\377\375\375\276f\304\033\235\016\026\340\260\314z\243\242\2447a\276E\373`>\000\000\000\000v\324,\300f\254\277\221\004\2404@\372\370\370\275\037+\016\235\177\345\002\262\257\264\267\254\366<\333\275M\263\341\276\000\000\000\000\236\013\370\277\'$\351\222\"\026\037@\341q\331\254\t\\\355\231 \233\257\261|\303@@{\233O\244\361\315f\300\000\000\000\000R:\263\300\362\241\327\222\273\325\331@7\000\360\276\214\004\'\211\235\305\022\2763\201\276@\031\022U\244\264\371\222\277\333u\213\232\216Vo\301{qP\277\363;^A\027#\027\300A\365J\222ln\017\277\325\236\034@S6\244\244\363\023\254=6 \206\230B8\217\3008\370\t\277$\020I@\010*\357\276]\224z\275\224\027\005\276\360\270~\230\236\316\200?\235\204\014@\250\210\357\2710\265\231?\021\215\030\260\222\256\200\300\212#9\203\t\017\303\276\254\230(\244KKK@\022\327\327?^\037\222@eJ\020\272+\267%\301\342\340`\276\252#\003@\265\2644\276\275\255\307\271\364O9\277\356\352\nAA[J\277f1\263@y`\255\270\373\2074\301\237\236\236\276\301\366\017?\370\366\366\276\301\236\305\266\234B\375\277&$\204@Qy\000\277\373\227\352\276\202\347\365\211\345H\217\300\006\205\010\275Ew2@\354\352j\276\366\311\307\233]\327\233\277\322G#\227:\334N\275=\024F\300\000\000\000\000C\240\n\277\240\303\310\276\3072\235@\000\000\000\000\204\014+\233VUU\277|\204\273\230\000\000\000\000\215r\330\277\000\000\000\000\026*m\300+\200\203\276\305\375\270@\000\000\000\000jOl\226\203\202\002\276~)\027\231\000\000\000\000f\373\216\276\000\000\000\000\036H_\300\207\030\356\264\213\'q@\000\000\000\000\222<4\225\013\274\031\230\373\005z\230\000\000\000\000Q\254\241\265\000\000\000\000\342Gb\300\000\350D\264\346Gb@\000\000\000\000\000\000\000\000*\000E\226\000\000\000\000\000\000\000\000F:\016\240\000\000\000\000\226\233\217\300\000\000\000\000\226\233\217@\000\000\000\000\000\000\000\000\035\306.\223\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\210y!\300\000\000\000\000\210y!@\000\000\000\000\000\000\000\000H\367c\221\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000A\n,?\000\000\000\000\000\000\000\000\000\000\000\000\201 \277\231\352h\275\206:)<h\221\340\256\000\000\000\000\230\266<\251u\361p>\000\000\000\000\212\203\213\267\000\000\000\000a\335\\\276\241\240\240\274\205\204\2137\335\337\032\256\000\000\000\000\247%\202\250f\254\225\255\000\000\000\000c\310E\26617\005\263T\007\365\275\000\000\000\000\344\010\365=\000\000\000\000\000\000\000\000\000\000\000\000\270pe\300\000\000\000\000\227\206\0264&.-\264\311*o\300\000\000\000\000\275M\352@\000\000\000\000\000F\355\217\000\000\000\000\224\240I\300R2\347\275\222}\250\276r\004\024\264\260\355H\301<\332L\210\0309\202A\225\224\224\275\351\335`\275\030\333\232\210H\266\310\277\177U\010\300\224p`@@H\244\277\305\034u\301\277\332k\2427\262\215A\376\374\374\276\374\325\330\276?ed\261[>f\2777\013\202\300\335\361TA\202]\000\300\320\271G\301%\300\277\276#\005\350@\220\217\217\276\302\203\205\276\306\304D\276\342=3\277\236\036\023\300\210.\225@\250;\032\277\245`\245\301\004\030\205\300\207\330\305A\342\026\302\2758\306\330\275b`B\277#Hi\300\024K[\300\277E6A\034Y\206@\354z\220\301F=\"\3017\177\265A\264}{\277y\253\256\277\313\267(\277\322\257L\300\215\215\r\277\253R\001A\370p\375@i\372\275\301\323q\355\300+\003\317A\n\3013\300]\332L\300\201\202s\277\350\330\245\300\200\177\211A\301\206V\300\t\213\005@\337l+\301Q\315\300\300Z\360/A\3378\020\277<lc\300\310\035a\277R\2747\301\317\2225A\236\262y@=\360\246>a\364\251\300\316\316\320\300C\257NA+j\216\277\375]\"\300>\034\303\277\361wK\301u\320\240A\365QgA\230\372}\300\345oM\301\251\227\254\277\366h@@\200(\345\277\203\307m\300\260\345\227\277\363l\010\301\323K\211A9\034\217@\004C\321\277+j\202\301\374\346\232@\300\325\022Au\272#\277\330\243\340\300H\276\303\277\321\203\315\300\t\243\010Aws\324\300\2121\202\300\351\337\216\300\236\';A\230\025\017A\204~:\276@2\274\300U\306\302\277m6P\300\214\275\242?\"\0032\277\365\016\374\300n\233\014\301\341\377IA\272\231\032Ae\375\026\275uj\205\2777\342\356\277CE\t\300\033&\364@\034e\372\300\033\270E\300\266\313\006\301\220!3@J\306\202A@r%\275\263h\220\3003\n=\277\247G\017\300\305W\027?i\211\361?\346!\204\276tp\001\301|l\334\300\252\207\202A\373_F\275\305|\234\277\036`F\275\013\356\230\277\240\222\342\276y\203\004@s\"\006\275\237\332\032\301\233\262\013\301\201\276\225A\023\337\253\274#\305*\277\023\337\253\274\232\323\263\276\213\375Z\256Y2\244\277\017!\377\265\025:\022\301\267e\235\300&\024{A3\216\341\235\312c\021\272uJ\273\256\214\316\212\275\244\014\203\255\235\253*=\305\226\\\263\320\230\244\300MK^\300\343\"\013A\000\000\000\000\373\266X\275zF\245\222\367\260\016\233\037z\351\203\276\n\200@\000\000\000\000\363Z]\277\032\017D\277\021\200\357\277\000\000\000\000{\327\377\276\000\000\000\000\000\000\000\000\000\000\000\000\215\263%@\000\000\000\000y\346\t\215\233\354B\236 \002\021\300\000\000\000\000j\213\245\276\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\372y\2217K\210\252\236l\233\364\204\000\000\000\000\000\000\000\000\302\256\217\267\000\000\000\000\322\345g\264\000\000\000\000]\261\353?\215\214\2126\037\312\241\246\344\3138\237\3038\370\277\371\332\300\274\356\004\206>B\236\242\257\365\207\017\276$\2721\271\3254\024\277\000\000\000\000\353\311>\276\023\017\017\277\367\232\032\277\250\376\376?\216_\203\275\003\323\330\256\326\257\247\203\26646\251/\031\262\300\217\322\010\277\306\303-\277\032\205\'A``7\277\372\364}\300R\024\252?\177*1\2368\314\253\276l\330Q\214OX\027\301\372\361\265\262Z\206\215?\207M\367@o8%\3019b\006\300^)_AW\325\273\274\244\233_\277V\'h\241\207\177$\301u\263\246\277\221\215\237@S\264BAm\274R\301\\\022\323\277u\223UA\277\014<\277\217\303V\300\234\240 \274{\377\336\277\265\225c\300+\261\025A\325P\210A:\037\301\301\3674\233\276\331x;A\013\272\201\300t\373h\300\372$-\277\223}\275\300\335w\222\300\000\247\335A\007\234\026@\253\242\374\301u\211(?\367\020xA\375eY\300\030\277v?\013\010\333\277\177\352\232\300k\022\016\301\221\246JA\211\266DA\343\031\255\301\010\316\240@J\2459A\277z\321\300y{\032@R=\006\300X\362U?\331d\216\301\"\303\230A@;\334A@w\272\301\304\320\250\300K\\\217A\2103;\301\265e\203\300C5L\300p\326B?v?\204@\256\327%A[\243\004B\345V\032\302\237\335\351@\305Q\300@\0043\r\301\013\317\000\301\t\330\310\300\007\231pA\020/\302@|\267\317A\242z\271@E}B\302\212\255 \277t[\212A*\177\352\300\357A\255\277\305\253C\301\214vvA(,\017\301\001\3358Apw\356AA\031\341\301\262\261\207?\260\007=A\001D\002\301\304\013\005\301\030\311\200\301\221JE@\263\016%@E\321\035AL~ZA\304C\262\301\022\3107Ao\364aA\373\366\365\300\037\362F\301W-F\301]\024\rA\373V\nA#\224\001A\030x\212A\350\302\252\301j(\256\277\342j\036@\251\205\261\300\200\335\021\301)>\377\300\312a\t@$\2065\300\335\246\216@\231\332JA\207\t{\301\245C\007A_\264\353\277w\352\017\300\200\006R\274\203r\243\300\251&\345\300\2167*\301k\306\031\301\337?\224AR\r\210\301<8,AKc\267@:<\003\300+\356.Ak1\002?E\"\372\300\341\276\250\3004\317\246\300oW\217A\244\370\263\300\243T\274@\347\201!A_\024a\276\241\323\r\301\026\352b\277\262\'B?\302B \3010\362-\301\241u\313A\254\363\321\300\010\2362AV<\250>G\3600\300\356(\313\300\314s\217\277\0238\022\301\364a\352\300\210\252\304\277\035\260\024\301o0\005\301\020\336\200A\271\302YA\246}\004\300\354\251\006AhM\345\276\200?\312\300>\013\303\300\212\234\304\300\"\375\204\301W\016\323>\210$\371@\315\221WA\364\370x\276\222\t`A<\025}\2763,\337\276\302\201\337\300\200\322\016\301l\366\035\301\177Z#@\234\032\036\301\261p\247A\000\000\000\000\362\021NA\206\205\205\276\307\251I=\273j\353\300T\272m>\"\027W\300\343\321\351\277\234\036\001@\376\217\005A\000\000\000\000J\351\362?\375C\005\231\223\222\222>Z\2420\300R\001\017@\261O\202\2746\014V\267\234#\337@B\264\352\300\000\000\000\000\\\323\035?\000\000\000\000\333M\375\243\352\014T\300\000\000\000\000\000\000\000\000\000\000\000\000b \202?@\251\"@\000\000\000\000x\310z\276\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000dcy8\242\240\240\274\204QK>\000\000\000\000\250\246&\276\026Zv\270\000\000\000\000\316\267\204\274\000\000\000\000\265\304\304\275\367ur7_\207\001\277\212\001\332?\217\216\016\276\273t\371\276\022\211q\276\363\361\274\211\271^t\276\370!B\271\273\312\036\277\"\"\242?\370\242Y\300k\271\207@\031\302\375?2Nz\300\214<\213?n\316\303\274\017_\037\277?bV\242\307%\032\301\325M\314@\3575D@\302\266\225A\356\214\004@2yz\301R\343\311@\035{\327\300\206\264\220\300\311\351\251\2472\263(\301D8\341@h~\275A\330\271AA\254\351\257\300e\246Z\301\364\324B@[\253\336\300\355c\023\301\005L\223\246\361/\310\277z\320\340?mg0A\311\207%A\032\032\034\300g.$@\375\023u\300\010q\340\300H\302\013\301\263\"\007\3003\360\026@\276\024\010A\201\330,A\240\247\260A\263o\201\301+\213\223\301\212(\270@p\000\032\301\337\331\354\276\340_\231\300\340\232NA\0306\375@DZ\241A\311\207QAB\270\222\301\002/O\301\224\037\254\300\016\376~\301\234T\006A\231\031\036\301\207>\200@\022\335\'A\231\357\035A\375\220\221AE\202p\301\353\323\216\300\221\n\n\300\"(\271\301\267tGA`\303#\301}I\223A\302\353\026\300\360\320\370@\031\017\303A\310\353;\301\366\007(A\024?\227\300m\322\304\301\320\374\027\300\2037t\301)\205\247\3004\nA\276\251\035tA\263\256\025A\267\260\315\300\316\222\216?.\365\201\300\272\366P\301# \232Ad\'\200\301A\204K@\263\031/A\000zX\274q\370\226A\'\246K@&b\223\301@\376\026@t\311.\301O\251\033\300U\315\327\300\342O\234\300n\223)A\035\301\200A2\013\"A<\270-\301\261\337#\300\242\2260\300\034\324\222\301S\356\202A\322W\\\301q\037\377@\327D$\300L-\033@@\023\013A\374\026L\301-\030\233A\356pg\277\214\364\201\301Qj\347@]\313S\301U\025*\301\330\224\247@6\226\365@V\231PA\313\217\004@2\244#\277B\366\000A>\007k\301\2231IA\376\221\265\301Y y@\262\263\225@F&\307@\332\261o\300\356\305\024?\352Q\234@aj\251@2d0\301P\373\002@\231MM\301\037g\311?\037\3353\301\2333\222@|\301\021A\350\273\361\300h\010\222A\321I\265\300\257\327\354\300\314F\257@\275\341\343\300c\373\265\276\246\017\010AL\250r\300\014\371\246\300\316Z\023A\330U\205A\272\221\027\300J\3518\300\227\t\243\300\027\234j\301X\"\311@pk\300<V\366\323?\216\022*\300\203]iA?\360t\301K\365\037?\337\254\237\300y\230\025A\035\032\031\301\240\224\207\277\0145\026\300\337\251\001\301\302\2254\301r\270\245A\307\204@A\003\260\244@\035\243\237?\350\177\305\276}V\177\301J\222\327\276\002\323\246\300\3305\242\3017\001\325\277o\013\254A\215\251\313\277\235\211!Am\373\r\300\217\327\345@{\314\354\3002k\002\301\232\177g\301\212B\234\301\372F\243\301\201e\257A\307\226\256A\nJ{A\017\254T\277\347(\254@\371t\270\277\245\237\032\301\374\025\302\301\037tV\301\250<\340\277\360\346\247A\0352\363A]`\211\301\203\203\203\276\374IrA.\273\257\274R\336\264\277\204\356e\301\336C\236?\371\320\203\276\004\260\034@\232\311\324@\001\275\256\300\000\000\000\000(\0053A\337+\256\274(\305\303\276S\014\024\300\303\375\374\275\000\000\000\000\034(\246\277_i\260?|<O>\000\000\000\000\344\"#@\222eJ\274\000\000\000\000\n\203\354\277\303\375\374\275\000\000\000\000\031\356\354\275\241\255\366\27676\026@\000\000\000\000\t\006c>\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\303D\211\230\215\214\n7li\"\236.\216\207=\021\302N\267k\231Y\276\037w7>\000\000\000\000\356\216\006\275\000\000\000\000_\013\216\277\323\363\252?\303\373\034\277\241\233\014A\346v\014?\356Z5\300\\\213W\300\006+3\277\353\220\321\277\\@\322\276\250\234\271\277\220\0208@a\014\363?\302\304\206A\016\375\334?^\241\212\301\222tp@z5p\3007\001h\3008\177r\277\224[X\301\220\010rA{\".@\\S\265A\227\025\\@~D}\301\362\355\000\301\177;\025A\310XZ\301TH\016\3000\'\007\276\253/QA\222\002L@\267\341\202A\001S\271\3007h\177\301\3340\202\300\337\006\026A#\032\025\301(3\327\300\376D\002\276\203Kh\300j\334\203\277\007\266\200A\222p\017\301\2348\370@\001T(\301,J\003Ak@\257?\215^\022\3010\017\214@b9p\301U\022\017A\022\217\306?\036gq\300#\324\216\300\364\360\264@\373(\232A\002\223\024\301\312K\350\300\304\030\013@\327T\233\301\202\021\341\300\314\324\204@\272a\326?\316\200\340@IC\022@\033\276\200AI\232\361@\364\344g\301/!\232A\000Y\371\300\0045H@\tb4@\352\301\247\300\274\374\002A\351o\033Ab\223K\300d\270K\3015\252b\301\351\214\357@&\034\210\301\341Y3A\254\317!>P\363=\276AQI=\237\034\302\301\275N\332\300\346\260oA\331nfA\2132\215\300R\330P\301.\177\204@]q\013\300\344\013cAV\203\036\301\031\207\023\301\336\314\351@\317v\021A5_\201@\311D,A\256\275\336\300\304\032s@s\236\177@\217\243~\3014\252\225\300\332\203:\301\340\306\007A\226\211\244@>\271\340@;N\316@/\036\372\300\363Y7A\325)\337?\312\365a?U\222\035\301\032\350\024AC\327\305\300{\372 A\224m\200\301_\266\262\277\003\237\256@TR\336@w\023\205@M\006\016Ah\353U\300|\326y\301\027_m\301_\277e@|\035\305@\355mP@\"\030\002\301L\014\030A\352\350PA1 \027?\304\026\266?\337\363\304\300\3112\277=;\301\316@!\311\240\301m-GA\206\210\017\301R>\351>\032\272\317\300?\263\334\277\222\346#A\202(\273\276eo\342\300\213QQA\257\2429\301o)WA|\312\227\300d7\245@\235\337\325\300\234];A6b\245\277\357m\007A\231\314\021\301k\005a\301\251\3658\300\233\225\002\300\361@\275\300\245\340\205\300S\222\030Aof\001A\007v\034?\005\220AA>\242\252\301nJ\273@_\353-\300\352T3\301\371\275\024@\2240\326@\314\342\343@S\325\316?\005\007\177A\277\242\227\277Em\354\301y\251BA\345\201z\300\266\303y\277\007/\037A\241G\036A\330m2\300\320\263o\300\31160@\334C\265@\2064[\301\206|\206?\244\234\001\301\213S\353@\316\375/A\246\210\231@6\t\331@\203=\320@\315\020hA)Ig\301n\366\025\301\324\227\225\3006\212\263\301:tR\301\246\233(\300o\3313\300\247QG\3014\\\245AU3\215ALy\340A\334\233\304\300qQb\301>\217p\301\021\306C\301T|~\300\024\032\210\301&\200\217\300\321\366\342\277\034\255\306A!\004nA\017\273\241\277\322\010\272@\004\346\230\300\240\371\306\300\207\026\224\301G\304\023\300\200\227\361\277PcL\300\243\240\177A\001\357\256?\016\301]\277\030\221\216A\272w\013\300\234{\000\300&\334\355\300k\022\036\2777\\!\276\221\356d\300\236\221\270@\006\"\324@\334\225\353\276\313}\007@\003\254\205\276U\315\217\277z\221.\300M\273:\276\000\000\000\000\223\337\246\275(WU?\300\275]@\354nP\272\272\272:\276\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\350\356\210\242$\250(=\000\000\000\000\356H\254\206\000\000\000\000\000\000\000\000K\335\373\265-\246(\275\330\013\274\276\207V \300\300Mi\277\214\267\"\276@W4\256V\266d?\021v\002<\323\346j@nc\004\277v\343\344\275R\301p\277\360e\036@ j\277\277}\003\005A\263\346b\300\337\324:\3011\354\025\300\234o\200A#c\247\300&=\316\277}\342c\300\331\360j@;1$AX\217?@d\341*@\034h#\301\370\362\210@\310\230\343@\014\n(\301}\311\326\300\336w^\300\235\275a@<4&@n\335\021@\377VAA8(\276\277\313(\212\300\204\360\203A\321\334\001\301\270\253\234\301\016?[@5K\352?\205\312\177@^\r\346@\035/\033\300\230B\325\277\371Jq\301\216k\302A\206\277\010\301\004kP\301\\X\202@,E\014\301\r\236\332@\001i\320\277\023\263\027@\214\307.\300O_\235@\371R}A\252\300\330\300\376\005b\301\224\222A\300\\W\016\301`\331\262\277\022\201\021A\324ca\300\270\004\001\301\200\244z\277B\322\251A\214\277\215\300\250x\032\274\000\373\305\301\313\327\227\301c\307UAdm\030A\350\270hA\360m\360\300\032\312\364\300\365\377\307@K,\237?\204n_Ag\273\260\3006x\007\300\217B\220A\303\322\241A\277D\025\301\357JD\276\315\363h\301\217cWA!\230J\301\022\341\352\300\033\327(\277N\227\254@\036\313&\300#}\004Al\266\225\301\202\345\'\301Oi\001\301\362\034\273Aj4\306\300\001\023\033A\232\202\223\3017\375\266\300{t\304@O@9\300\021\016\201\300Q\\H@\n\t\321\300\376\273kAa}FA\230\245\232?o\037rA\322\213\006\301\026r[\300\032\253>AY}<\301\322Y\207\300\312Lo\301\002(e@>\3128\300\272\375pA$\242nA\265n\327\300\3205Y\300\"b\"\300\260\010\236\301YO\364@\247,N\301\305z\315?\341\323=@\221v\221A\355\317\212\3005;*\301\210\2553Av\006d\300^{\240\301\020\376W\300-\263[\300\300\375\010A\343\021uA\267\312$A~%b@\220\316\237\300\306~!A=/2?1\325\242\301\002\202q\277\246\223\356\300\262\216\353?\336\237`\277k\260\223A\314/\373>x\331\246\301\017\177w\300Ta\332\300\254\031\312\3007\232>@\010z\016\300<\212,Ao\301\030A\203Q\202AfZqA>^c\277\"gF@\177\002\205A=x\301\301\315\235\337\277M\315\300\301k0\326@\360L\373\300\021%\212AK\177\n\301\273%\321\300\271\206\014\301\033\030T\301\306,v>\250\217\222A\304\353\340\300nu\204\300?c\322A\204\214b@\211\204\341@E\264\347\300\247\243\t\301\n~\020A\035H\207\301_M\341@\322\357\020\300\2066T\277\332\226\277@@\266\327@XtzA\"i\013\300\023\232\276\300\336\031\340@/\260\352\300\357\021.\301liv\2761L\213@(\217\340>\322\n\\\277\221\356\323A\214\213\t\300\216\022\031\301\247 H@A:\357\277g\034\023A\335@\035\300@E\333\300\020\342X\300\243yH\301\332\005\274\277q\255\316@C\274\247\3011\007\250\300C\202\251A\256i,?\005t^AK\337\243\301@(\222A\335*E\301K\030\240\301^D\240@ml\204\301\014\350]@d\016\353?\363\020\005Be\270MA\357w\237\300/\333;\300\007>?\301g\206t\301\257\255\037\301\333\010\017\300\030m\235\300\332\262$?{5\225AT\201\200?\036i\013\300\033\203\220A\332\025q\300\366t\377\300\336\216w\300H\316v\277NA)\277|\303\364@>\362\306@\234$\240\277(\251\035\277\206\231I@\214\027\326\277h($\300\000\000\000\000\332\273\200\213\3569\333\241\250\357\353\275\246\226\205@\302\352\352\276\323[Z\271\206\023\n\277\377\375\375\276\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000*\027\005\277\227\016\260\244\016\334t?w\307\233\275\357&\346\235\000\000\000\000\251\371i?\225\211\204\276i\000\202\277\312\310H\275\210,\321\300\303\032\217@\272\342\203\300\275\310H\275M \212>\347\337\324\250\014Q\352@\361\313\002\277\223\017L\277\310\3678\277\3236\211\2776\242\027@X\3354?\207\215\250\277P\253\031\301\220\260u\300)\252\275A\265\023\255\300\226B\232\300\0243\325\300\306\001M@\311\252\273A\010F ANC\376@x\027\356\301EE\205@\265i\327@F\350c\300\226\222x\301\352\363Q\300G\217\006\301\016\303\243@f\002\331A\266\r\272A\003c6\301\233\210\257\275\0378\257\300\276S\276? \261\342\301\013\352\007A\207,\026\301G\334\211A&\201&A\004\024\270@\227\330u\300\257\003\256\301\336%-An\362;\301\200n\277\300C\366\n@\272\032\327\300Sd\026\301\330\306\206=T\326\"A\264\236\021A<\271\023A\020\004\001Aa\031#\301U\362G\301\363\020Z\301&\220\263\301AykAk\310\366@\222\260\240\300PLHA\034\266\262\300\010\307R\300\243\215RA\016\370\356?b,\263@,Q|\277l=\340\300\032}\321@\266\210\310@9\014x=P\247\310\300\265\247\023A\240\001\021\301\007\377\213\300)\355\360\277\305\330\035A3\364\001\301\246\210\232\300\371U\177\301W\013\035AZn\347\300\232\220\207A\310^\026A\352\000\000\301\300\364\321@9\'\020\301\370Z\225@\301\242\266\300\230|\333@p\263\266\275\375\253N@\016 \325@\013R\r\301g\322\212\300\247\033\001\301\264\007\024A>\215\343\300\024\307?A\001\270\225\301X\341\346@\014\255(?`L]@\2404$\301\302i9A0a\360@P\006\216\301\3660\216A$%\375\300\316\345\037\301U\267\027\301xl}\301\036\013]A\326\262\344\277\314P\275A\020\274\332?\364\264\225\300\337\310\nA\250\225\236A\016\262[\301\337\032WA\020\234S@\251z_\301\227\243\305\301\215\001\"A;\222\"\301\210\032\204\301\253\334y\300\235\350o?\250,\223\300_\346U\301\317Q\253\276\226\213PA\343?pA\362%\237A,\345\330\300\3207\217=\021\234\007A\210\327\253\275\333\306\034\301\036\253\333@gV}\301pS\307@\320p\234\300/\346{A\301 \213A\243/\006\301f\021\216\301\261&\307@\013\333\353@\307\200\241@\251\227\222\301\251n\036A\3210\326\300\307P\247@\234\027\206A\34630\301d\207q@\211\332\213\301\362\035.\301C\324\242@\216e&\301\262wmA\2359\003A\364\000\216?\032j\334@\237<\204>\221\017\353\300\020Q\373@s\264\336@\2732\204\277M\003\252\277\332F!\300\\p\021\301T\220$\277\031\357-\300\226\014\026@\005\001\032\301\263\010\254@\245WM\300\374\355w\300\247\225\202\301\021\320cAx\341\032=N\007\\A\365&\003\301\304:n\300\033\311\n@\374\310}\301\243\002\251@)E\304@+E\035\301\2018\'A\330\354\206A\353\220R\300\2461\271\300\322\374C\301\310Q\222A\332\037\234\301h8\nA\005\314\177A \373k\301\232\205\270\276\220\024NA\212\236H\300\224\327\030A%\346\205\300\373\241\013\302<-\370\300\305\321\221A/\025\342@\325f\227\300\273g7\277\307\277\240A^&)\300\234\332\353\2771\021\265\301\354\302\317\301!\032\321\300\tW%A\263\250\254Au \227A\370\227\375\275}\214yAe\312\024\301\261|\222\301\2447t\301zM\233\300?#\213\300\t\345\362?\207\306+B\334\277\201\300\n\316\366\277\021\307\246@w\267\245\277>\346!\301j\353\r\277=\254O\277Z\031\254\276\2747\025A~\341\246@)\277\036\300\232\323\202\276\t[\223?\347g\221\277\343j\316\276j<W\243\354\344\317\272\257\322?\242\3143(\276\035U\212?\274A\270\277\000\000\000\000\341\331\216?\302\300@\276no\357?\000\000\000\000\224\010\030\237\000\000\000\000\023\344\260\256\000\000\000\000\000\000\000\0007L\271\277\000\000\000\000\351\214\330\276\275\360G\212\322\310\331\276\374M\376\242\324\362\006\300.\267Y\277\224\372\037\207\000\000\000\000\315\253\246@\305\331X\276\216`\316\277\016\247\374=\017>\230\300X\345\365?\t+\022\300\367_Q@m\035\217\276\326\324\324\275O\363\321@\370e\213\277\n:W\3008\022\251?\032 w\275\331=\214\277\024rw??k`@D\241.\301\3172B\300=\331\301AI\267\026?G%x\301r\014\347\300\236q\330\275\314SlA1]\rA\262\202\355@D\321\037\302J\220\305@rC\214A\365\"\341@\305\326g\301\034\233\371\300\344\t\263\300\324>\250\300\003\n\234@\220\256\261A.\347W\301=\325C\300N.\007A\360\030\331@\030\235\343\300>\334\365\277\341\252\035\301\013\204\361\300\226\3128\300z\324\242@w8H=,\3354\301\351\236\017A\351V\177Ae@\\@7!K\300\337\340\234@\225\031\'?9j\010\300Yn\275@\rr_\301{\210\003\301/\235\351AIf)A0.\276\301\026\236\006@\233\362k\3019v\017@\344\313\\=\363e\332\3008\026m@\314\322[\301_\261FA\034\276\225\300P\277\233A\364\275\217A\036|\217\301\321\355\004\301rG\241\300\303\027\324@Y`7@\214D\242>B\252d@A y@\256j}\300\213\005\033\301\200&\265\300\325@RA4\322W@:\000E\301\376\230T?\211\3210\300\366gI?e\233vAf\215G\300T\214\317\300\355)`\301\277\244.?\342\217\232@\252\275\331@\310&N@\326o\236\300lR\223A\344\r1\300\204\247\267\300v0-A\316\272\202@k\310:\276\214|\243@\207XI\301\024\265l\277\246\272\271\300\323\376\tA\326\372\035A\243,\230\301\010\022\220\300A\370\t>]z\035\300\315\\5A\305\317u\3012\352\003\277\330?_\300\274\204\375\300\325\345\tA\237\235bA\266\365~A\254\003\335\2754\206\262=\334\010HA\223\364\230\301\351\363\215\300/i\246\301\321\242FA\346C|\301\242V\232A1\223+Ap\256\354\276t\241\376\277\\J\225@\362\035\357\277\237\360\270\300/\326\235\301Y\315FA\271\\\001\301?\231!A<\267\364A\300U\202\301??\376\300\000tr\300k\375\235\301\223W\244@E\3674\301\310\257\331@\352\004\336@\026\346\031A\3712 A\301\332\032A(\026\346@\344\007\215\277*\371\020\301\332\213\030\300st\343?\204`\370\2760\353\022\301\3047\316\300\356]\370>\273F>\301#\003_\301\311{\307A@\333\323@ \257t\300\002]\324\301\207\334\235A$\256\251?\316dF@r\321\362\300Z\277\261?\302\273}A\231t\350?F;,\301\360\264v@%\005{\301\317\031\314\300\303\024\204A\270\322}?\203\342\235A/\206\025\301*\345e@\357L\226@\320\0230\301\320F`A)LB\301\234\245\346@\027\376\241\300\026\031;\301\272\233.A\371BL\301\244\021!\301\351Y\007@\232\241\210@\273\305\031\300%\030\217\300\376\300\325\300B\352\345?.Y\212A-c,A\330\234\t\301n.\254\300\372\231\335As?\216\3014\222\241\300\252&\263\301\250\324\203A\252\0234A\376\024\340\300[\262}A\346e\245\301\216\022\000A\322\354\372\301\255\241\034A\342\211\347AB\234\326\301)@ Ac\032:A\254\356\253\300\351\333\030A\350\317\364\3018f\345\301\217\323\343\301\370\247\311@\025\005SB\325**\301\024g\370\300(\266\010B\341QW@\246\230\317\301\202\263V\301\301\365\027\301\327\202\257\300>\013sA\031\272-B0\0304\301\343\350\004\301\341\334xA\347{\206\276\340\030.\301\002\371\313\276\343<\221\277\240z\006\277\35060\300\260K\230A<\037\032\301>\016\231\273\241\274\362@Rm\245\277\005\323\210\276X=\037\243\00179\273\274I\021\212\250\247\247\276\233n\352\276(\334\310\277W%\204\257\213\371\'@\214\312K\261\000\000\000\000\000\000\000\000\000\000\000\000\276\r\216\206\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\245@\001\221\000\000\000\000\000\000\000\000\274[\016\251\304\031\033\3006\214\014\277\316\314L\276\262\227\007\207\255\376\250@IVR\250\274\363\006\3008\374@?B\324E\300\326@C\300H\244\205@M\025>@\032\274\231\276\225\371.\277\330\215\nA\224:}\300\\G\257\300\022\253\235@\321\363\360\276x\320m@\006\223\016A\337\307\030\301T,i\301^T\203\300\272)\273A\300-\310\276\236\310=\301\310\251&\300\240 \306\300\242Y A\354R\000B\341\006\202\301\020\203\343\301\024\350bA\321P\200A\267\362l@\254N\264\301\007\033\241\300Nl\217\301\334\000\277A\211\250\rA\270\350I?@\311\\\301.\367D\301\360\037\020A\233Z\215@#B\010@\336;\262\300\207:\334\3008K\261\301\003\004\006Act\375\277\372ed\300\323i\014\30148_A\322\373\344@\315\244\233A\363k2\301\333\\4A\006\214\216\300\032\332\225@fm\204\300\245\327\215\277GYt?\272\t_@\0347\300>\200\n\250=!\005\005\301\3240f@J!\312\300C\374\350@\305\323\363\276\'\2178@\245\022:\301\014\014j@\327\312\007Aa\263T?\203)\312@\001\261[\301\212*\020A\245\016\006AF\010\311@\2245\210\301mY3\301\003m\350@x[\353@\355\002)\300V\265\177A\247W\346\300Ff*A\231\353\232\300\327\254\222\301p9\350@\035(k\301\216\307_\300\354\376\265@\327\335\017A\007\320\014?*\007\321\300\006\313|\301\353\236\203\301\252\276_@\313wJA\257?\242?n\266T@\334\251BA\262q\252@2\310\217\277Vy\022\301\372\032\246\300n\206\331\300\362\314\374\3002\355\023A\344\204\034\301\302\353\271A\320\"\010\300\222r\031A\334\354\267A\030\341\240\276V\262%\300:\300]\300\224J\304?\351U\341\300\rM\273\301\275{|@\017H#@o\251\271@\026\253\032\2775\335\005@4\260\247\3009^\010AU\335\250\301@S)\301\231\236\331?\363\334\345A.\022\t\301\376\277\241@d\357\234\301\017\271\233A&\237?A\333\322\230A\344\273j\301mw8\301xP\030\301\271\210\225A\224J\344\301D/jA\352v\256@\350LTA\233\266TA{\355|A\010\202\313@\345\360\247\301v\200\253\3015\330\271@$\240\025\301WV\003\301*\235\330@=(`\277\304\346\003\301\312\231F@XcG\300\322\340\\\300n\333\366\3013!\317Ak~q@\3433\340@\0338\'A\3773}\3006\347\253\277\275E\233\300\330\366\342@\2727\004@jI\256\301}B\014A\312\217\305\277\031[\243@Q\222\245A\2177\324\300\327\326\213\277\023\366rA\256\211\272\300\204t\'\3017)\241\301_\375\024A\024\306`\300j\356\036@\254<OA\204\231(\301\341\275}\300\230\357_A\316\343\270@\250\232\371\277\321ih\301*E\261\300\370.B@\330\356A?\030R\016\301\374^\333\300\353\257?\301\300\276\357>V\357+>\360\303eAx0\373@Y%/A\265\244\317@&\320H\301 \327\273\301]a\014\301\375\030\251\277HT\247A0\030^\300\305\306\227A\271\003\256\301\342\335\346@\263[\324@H\332\241@\273`HAV\315\235\301_\272\355?&\002\205\301.\337\214A\244\227\207A\267\242X\301\007\214\000AmQ!A\234P\211\301\005\010\375AY\265T\301\342\324\340\3019i\370\301\243\210\355\300\322\010DB8bY\301l\001c?\245j\221A\246\300\307\300\324Y\211\301\t&\030\301r\363\231\300\'Z8\300\366\233\375\300H4FB\354\372\255\301V\303\233?\3004\251A\003\013\374\300\245B\377\300\203\377\310\277\307\264\016?\362:\311\277\265\212S\300\351\270\350@3\345D\301\330q{@\250g\210A\025\014\005\300~\335\242\250\236D:\236\242\177\307\202\000\000\000\000b_\365\276bu!>g\206\210\274\"\037\337@\371\365\275\300\264\2622\277\000\000\000\000\000\000\000\000\000\000\000\000p\254*\207\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\347#=\221\000\000\000\000\000\000\000\000\005@\261\251\350\003\300\277\330\016\017\277\001\377\376\276w\323\250\207\303j\223@p\211\002\251\371/\003\3003]\213@\247\214\307\277G\033/\300\364o\272\276$o\375\277\\\027L\277\0003a\300D\357\230A\231f\023\300\203k#\301\032Xc@{\266\224\300X\347\025An\227vAJY\212\300\226\006a\301\231\355\234\300V\205`AV\327\337\300R\322\355\300}\013\334\277\352G\n\301\277i2\301\276+jAt\236\352\300\022\374\254\300\355\345\210?\177u\\A\224,\217A\216\026R\301\226\033\017A0\350\204\301\030t\276A\204\200]\300\255\303\003\3019\236\315?\223]%\301\374\277\300@Dw\350\300\026\354\260@\207\306.\300\3660\272\301\345?\312A\241\331\227@P\355I\301\244V\232\301\206\022X\300\360\265FAq\324\206@\355%jA\200i\301\300ET\317\300N\271)\300\264\000\006@<\026\025\301\025\020jA\247\221\246?\005\000n@\235\211\364?\223\374V?\037\244E\300\307o\027>\226\236\335?P\007\023\301\177\306\200\300~\261gAr\215\276@\223\246\376\300\026h\250@\362ZT\3006\2541A\323\230\'\301\231cI\301\367\035\352\301\304YMA\270W\220A\020\023X\301H\334;A,\353\027A\204;\'@E\367\033\301\352)\222@Ju\313?\\\276\216\301\215\025\034\301<\3232A\270\361\036\300\213]\323?0ddA;?\322@\326\226\305@\312\0223A\250J\367@\213\355~\301\212\236\245\300_9\026A\365\005\024\301]n[\301pi\307@K\000W@\245G\314\300\036;\374\300Q\255\214@\2401\250\301\245\202\032A\205:lA<\266\223\300\225\335!\301~\230\201A\0021\240@\006\032.A\242\220YAp15\276\3669\n@F\304T\301\320\255[>\300:q\300\315\372,A\211rN\300-\217\211\301\323{0AU\212\343@\030\023\306@\360\264\337@\232\303\331\301d8\005@\212\3050\3014\024OAv\300\016\301)_X?i@m\300\214p\301A\265w\027\301H\341D@\343aq@\263\022\350\300ved\301\200f\245A\303\206_\301\366\005B\300ucKA%0 @\321\216\213?\366$\366@\2405y\301\366{\356\300\361\264M\301\344\277\354AH\200\243\3012\332.@\333\224\214A\010\223\306@\277r\200A\tk%AS\t\230\300?\327\214\301\377\345)\301<\206\320\277\223\231\"\301I\000\255\300\321\270VA\021\361\346\300\216\314Z@(\237\371@\322J\n\301a\246`\301Y\251)\301\251\2522@2\345cA\322\275\225\277\\\311\001\301p\035\376\277w\r2\301\275\337\327@\213U\034A\250\365\226\301\250\261\222\301\310y\201A\315\260\302Alw\262?7\340\003A\330\246\234\301\314H\230\300U\225SA)&\004\301\305Yq\301\256\345~\300K\\\251A\270\267\021A\370`\235\275\207\202\003A\331O\224\300,g\237\300\321i\262\300pN\375?\001\376\247>5\252$\301\313\376 A\200\237\340@9\001\010\300\330%BA\2578X\301(\223\014\301\201\024HA\340\200\254@\211\004\316\300\341\204\233\301\315\033\230@\\d\016A\343\325\217@q\277VA/\245D\301\214\347\372\300\374\263\225@\325\223\241\301\n\362\245A\372\336]@\346\253\014\301\034#pA\336+\003\301\262<\326A\314\321\204\301>D\372\301\350r\274\301\013I\344\300\267\203\241Bb\264T\277o\036e\300{\312\304\300R4\223\301\t\276\200\301\353\275\300\300\301\202\025\300\005\024\332\277e\243\024\301\007\264zB\267t\265\301\033Y\204@./$\301\342\352\311?\222\352\036\301p\215$\3007k\214@\236\036\207\277\240?\003\301|[\241A\233V \301`\026A@ct\005A-=\204\300\354\323\330\232\343\236\030>d\361\022\213\237\221\232\256 T\306\275\210\276\224\275\313\023J\277\274\241\341?\213\242#?\315\313\313\277\000\000\000\000\000\000\000\000\000\000\000\000\020\357J\206\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000*L\014\220\026\025\225?\000\000\000\000\225\215\260\251\272\240\034\277\004\333\332\277=\376\375\276O)\250\207\027\250c@<\r\002\251U\272\363\277\331\330\330\276\241\240 <\365\263\207\277NO\n?\250N\032\301y\375\374\276\'\177\247\207\350\332\353A\211B\240\277\\C\211\301\031\344^@\20586\300V\314q\300\255\255\277A\022\222c?\317\r\243\301\256(J\300\023\013ZA\375EV\277AM/\301\312X}\301\267\233n\300.\250E\301\371V\223A}\231\331\300\033_c\300x\347}@]3\204A\027p\365@\256\363\211\300s\032\031\301\036\215\351\300H\000\266@\367K\351\300\346\222T@?\343\006\300\000\370\235@\305/v\276\302N,A\217G\345?\366\254\376\276\205Y\324\301\"]3A\314\270\337\300\010\034tAh\277\032A\036\262\003\301\350UP?F\272\005\300\225\317\354@\273\322\355?\005\375\231\301YG\317\277\337^\302\301\216~\366?\025\240\232\276\037\325\032A\352;\342@\203\301\246AP\354\202@Nc\237@\336%\035\301\336\r\275\301\267+\027\301\323\260O@\325\217\327@\353\362\032\300\016\221[\301s\035\203A\t\215\335A2Bj\301\257m\203\301\247L`\301\r\356\207\301z\202\250A\3050\372\277dA#A\342q\332@\347\223\257A\301\225\177@\201*3\276\2621\013\301Qv\031\302\007\224\253\30190\306Ax\272L\300S\214)\300\022\203\215A\265%MA\365\002\232A\334d0\277c\365\356\276\025\013\371\301\277\235\341\300\221\000\214\277\266\024y@\"L\013\301(\211DA,\023yA\311&\213AX\243\324\301\255\000\022\301$]\r\302\'\371\264\300$\032\360A\3139\335@\246~mA -\"\300\203\217\375@\177V\234A\027J|\300^-\230A(\302\032\302\307\341cA\250\357&\302\211\200\272A\265\345\253@I\361\311@\000{\306@\214s\036A\021&E\301\257h\225A\321*\237\301\363$\354?\312\217\360\301 \350\t@W\036\373\300\303\352\212A1Q\330A\316\210C@Bz\301\301\233u\371A\236Qy\301\376\370 A:\214\264A\354\340_\301\302\021\273\301\374\321\211@\244\027\013@u?\333@\022|\270\301\361+\345@|\200\003\277Q\233 \301\263\264kA\326\365\007\301_f\323@\335G_A{1\000\301CZ\365@b\271\r@\360g\351\300&u\213\301\355X\210A\304]|@\206\334%\300\245\026\212\301\303Y\014A\317yA\301\266\225\305A\177\226\246\3010(\267@A\033l\277\\\224`@$\271\307@\357\275u?\347/g\301\241\035UA\302\342\311\300r\000LA\342\021\242@\240\373\023\301\013+KA\344\367#\300\270\234|>m\033e\301\034kz@lOF?\340\006-@m\317:?\356Z\037@\345\321\361\300@\342\306\300\263\'=A\200\227,\301\2530\275\301>\354?\300\254\342/A1\215B\277,K\325A\357\242\274A\'O\032\301t\355\263@LJB\300\234X~\3013\202k\301Z#\000@%h\021@\2513\213\300(\267bA\331o\005A\233\177\014\3010z\227\300\300\362\033A\300\330\n\301\026\355\364\301\303CO\301M\323\241A\365\335oA>LGA\207A\225@_\260\313\300\0001\n=\202\241\004\301UV\236\301\247\353\210A\271\266dA\345\261&\301w\242\202A\331J\362\300\276O\203A\'\347^\301v\265\010\301\034/\222\301*\231\324\301\273\277NBH\024\306@\000Z\017\301\265\273|A\"\335\\\301\262\016Z\301\320T\340\300 \343\315?r\330P@OCO\301\3327\212B\250\310\312\301~\301\274\300\322\250\r\300\251\261\335\300w\262\367\300\217&\036\300\371\3266@v\331\242\276\321P\222\300\0238\226Ank\231\301\323\010\262\276\306vxA\373M\"\300\271n\005\233\241%\373>2$\t\300\002o\222\255\356r\031\273\342\340\340>\333\351\241?[\347\346>\243,\346\276\270\241P\275\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\247e6\254\227\226\2264\000\000\000\000\371x\352\247T\204\200\275\356\361\367\277\374\253(\2754T\337\205^\\\036@\247O.\247\256\344\335\276\201\200\000\274\036\307\201\2765\017\360\275e[\336\276\001E2\301\302\315L\276\013Xy\273,\004\301A\206h\177\277\246\210/\301\322n\310?\317\356\300\274\255,e\301&\017\261@\024%\253\300zn\375\300t*(\300\234q\321A|\242*A\3139[\301Z\342\265\300\211\350Y@\t\240M\301\3364\221\277\253\214\210\301\033\335\343?\207\016U@\007+\254A6g%\300\252\233\024A\327\031\275@\274\013!\300\365\342\276\301\351\225\025\301\213\2755\277\nR\204\300rM\232\300B\315TA\366Z\205A\202:\030A\202j\212ANO\263\300\345h!\302\314\346l\301\306\346\005A\005\251\232A\352\"v\277\222\346\003\300\214\337\232\300\256\367\274ABj\315\277\301-8\301\214\333R\302\035Q\033\301\257\025:A\224ro@\226\310BAW\351\324A~\310 A\000C5A\212\227\r\300\346\334\002\301bN\310\301:c\276\301\232\026\251A/:<@\262>\017AC\241\320Ad\007\312\300$\334\315@K\323\t@^\2035\300\275a\222\301\200=!\300\360#j\277>_\004\301z\236?AF\314`\3006\327\330?b=\244A\\/X\300\300\2634\301\r\331\213\301\200VlAq]\202A]\017IA\275QM\300*L\t\301V\312\347\300\325\256\361@\210\361n@\261Nh\301\2126\007\302N\275*\301\222\305mA\372\365Y@\363\031`A8oEA8C\220=\000=)A;F\306@`\347\304\277\303\263\221\301\320t\212@\213C\'B\337\356F@\350\326\023\277\204j\003\302\005\274\035A\036\370A\301\215}\244@\240\333\010>k\260\311\300\361\2732\301i\224wA\311\222\025Al]\354@\347\275/\302\277\013\226A\316x\245@\314\304\216\301\003p\325A\272\360\200\301\360\010\257A\343\367\217\301\036K\001\301\016h\030\301\322\016\252@\376\360\373??\226[A\240\255\315\301f}\000A%\337q\301Bgq@\025\025\222?Q\277\324@Z1 \301j\024\241A\217\017\347\300\232\250\222A\002m<\301\035\305$A-\350\r\301\206\001\257@\272\n\003A\354\027q\301T\037\017\300\2208&A\216\177\275\2770\270\242@\321t\254@VN\254@/X\353\277\301z5\301\334%e\300\215n\264\301\341n\022\300\206\023\206AI\252\027A\032\352\222@\3715\004\301)\030\270\300\374\354K\301\331\031\006A\242\236y\300h\353\"\301\341+\021\300\342\355xA\201\231/?\344\345\223AFj\247@\330\302\020\301\200\311\022A\000f\265\3003?\340@\353\334\224\301\020\317\217\301\024\315\347\277\214\252\256Ax\324\035A|0l@\232\351\274\300!\206\267@\376\331\222@\033\313L@\247\230l\301\365V\006\301\230\247\373\300\245\340\"AZ\350\030A \365\314?\003\326\271\277\2159N\300\224\004\323\301\323\310\272A\241\251\030\302\320Aw@\273\362\021\300N\336\257A\243\234\245A*\374/A\223\032}\276\220+6\277\237\233\262\301n\272\023A\212Fo\302\371q\205A\"n$A~\3368AJ\340\302A\022\373\346@\370\205C@R-\207\301%\311}\301\321x\277\277\213\221{\302l\224\000B\201Qk\301\366M\rB\216p\010B\303\025\025B\253\016\000\300\374\316uA\006Z\365\300q9\017@\311\026\232\302\216\336~A\323 6\301)\207AB\350\263\250\301\250\271Y\300;\201Y?\362j\204A\213\365T@\306\213\352\277\200\035\270@,\221B\301\376\216!@\035\030b?R\356G\301I\215\360\300J\007\201\277T0\200A\000\000\000\000\231xh\300}\201\247A2\373\275\301\004\247\240@|\"\346\277\223\355\210\300}[\017\232\000\033\007?\314\350\356>\354)5\211\343L\227\277\204\356\033\277\206\275\322>\243k\027@N\300\344\277FOF\276\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000=<\2245\000\000\000\000\255|\325\211\"\335\230\233\247\244\262\277\213g\301\264\000\000\000\000rm\273?AU6\242\"\215\214\275\000\000\000\000\035T\242\275h\355\320\275\341\224\220\275\n8\000\301\026\2539\265Q\032\262\272\r\361\216Ai\2229\276\217\266\026\301\322\222\340?\266>\020@(\'\201\301\352\274n@\262(\371\300p\262\360>\006\235\034\300Y-\303A\245o\344@P\337U\3015\026\034A,1\001AH\2156\302\0239\222\301\355:\317\300d\346#A\243\3269@\267V\305@\320\223\023\301c\342)B\247\214|\300\213\367\326\277\001\312(\302\310\223\376\300\237U(@K\313\215A\035\367\345@\252\332}@\236X\003A\202~\200A^KmA\301\260\327\300\274\216T\302\334\300@\300\266\010\230A;\251#\301$\307\301A\331h\237A\241\267\026\300\332\206 \300\225\247>A\234\024$\301\274\302\032\302\352\314\302\300A\252\276@\2469\200A\344q\003@\274\363\235@\311[\372\277\n\306\200A\021\306b@\017>b@\027h0\301\222\324>\300^\267SA\325c\316\300\311\356\370\277\342\264\031\300\246r\020\301\205TXA.+\273?\316\216\016A~\217/\301\231\271\000\301\303`\217A_M;@\322mQ\300\250(&\301tSH@6\204\326\277\327`uA\317N\233\301\213\235\206\301B\201T\301\000Q\262A\207\370\314?ZBNA\230\035\017?\363\005;\301\336\211\010A\034\217\351>\200\007\254\301tf\221@\010]h\300\254\353\232A\220\361\323?\2729\205\301\334\302E@P\370\002A{\267\217@\034\241\300\277c\363;A\036\326\231\300@\207\316\275t\221zA\333J\246A\350\001\271?\251p-\302\317;\024A\346\027\022\301*)\r\302\020\036MA\006\347(A\005\207\205A\237\000\255\301\340\360vA\032\031.A\365\214\"\302\237W\320A\336\031\244@8x\245\301\024\344\026@\'9\225\277\3627~Ak\354\035A~\251\277\3001\2332\301\224\277\270@\240\\\301\300\261x2A\2120\372\301\027\303 A\333\302\037\301\026\3500\301\374*\235A\377)Y\301\324\016C\277\230\3524=\n\"\263A\254UhA;\366\235\301\262\264\224@\315`k\301\001\344\226@+_\353@\367\016\230\301\242\311\275@\326W\246@7\342\330@\365\266\226A\313\255\336\300\263\272\267A\221\t\361?\347\323\267@b\214\223\277\311[\234\300\323+q\301\346\"\266\300zU\374\300\332\3350A\250\013\205?\'G\034\301;\336/\301[\244lA\235\0207A\350\376\177\276$\333\014\301\257\320c\3006\330F@`\240?@\027R\022\300*i!\301\'\351\021\301* \364@\222x=>\264\243\315@h)\305@\022.x\301^\334\261\277\013\362\217A\343%\227\301PKP\300\332\007\222\301\005\314\223\301\3409fA\276c\303\276\362\345\013\301\213O\231A\252+\241A\316\327dA9\374B\301\236+\217\300\231\037<\277\267\026\222\301~\362~A\207\004\327\300\273\246\217A;\202ZA\277\352}\301[,+AE\372\357@b\330\200\300\231\005\351?\200\355#\302N\242\204A\322_S\300\244\327\207@\200\376(\277\363\267\352@\333\3377A%C|AyN\314\277h\026\005\301/\025C\301\314dh\300\026y\347\301\320$\002Bd!\010\301\366\306\316A\313\201/\301\347\226\212A\330\016~\300\212c\034B\354\315s\300Y-\227\301\243\240M\302\372\255\271A$0\262@\201}\256A\022k\347\3011eIA\033\355\315\277\224\217\030B\037,\022@>\323\232@\335\361\256\301K\355\252\301V|\313@\207\356\205\277U\216\222\301\342 \334\277\177\371\272\277>\342(Bt\326X\275[\nG\300T\245D\300\026\321\267\301\321\237\212>\2274\206\300\323\362\274\300\213\356\243\233\3231\300\202#T\200A\200\320\322\276<N\235\300\220O\262\300\007\332|\300\303a\230\277\2134\312=\363B\312\275\000\000\000\000\000\000\000\000\000\000\000\000\262\342b\276\000\000\000\000\036**?\000\000\000\000\000\000\000\000\000\000\000\000\343\342\342\276\000\000\000\000\000\000\000\000\343\r\314\277\343\r\314?\263lP\277\236\3623\246\000\000\000\000\263mQ?\006j\264\246\201\200\200\273\000\000\000\000\232|\214>9f\200\277gHx?\305\215=\300\022\326\372\245\000\000\000\000\210\023\025A\375\205\261\251x\027\323\300.\274\030\301\375\'\203@\206\023L\301\233\340]@\277\275\226?\2732\226@p\354\213\277\002W1A\023\026\254>i=\273\277\235O\007\301\225\321\270@\203u\310\301EU\026\301e6\363A\344\351\"A#\375[\301B\245cAo(\243\301\207\274\203A\233@dA\222\221T\300g_\017\302\263-\270\301K\340\232A\347\326\265\2774\306\325@\361!\367@\250\321KAu\3035@\247\203\241@s\303\356\300\300I\265\300c\223\251\301\000K\357@l\356\251\300y\3570A\327S\276\300\363\356\235@\002\200\210A\320\352\325A\177\310C\301\317\\{A\226\021<\301H\250\203A@i_@\274C\262\300\273f\226?t\260\017\302\036\233\364?\350\206\200@\236\200y@\243\010\306\300\242\230+?E \302@\262r\321@\350b\206@\340\213E@\005\363\241\301\273A\003\300>\244\206A\353`\267\300\014v2\300\304/\n\277B\'\230@\233\013\216@e>\200A\036\233\007\301K\234\321\301\2662\327?L\366i\301\277M\230\301{]\224?\335jR\277\365?\031A\2351\037A\214-yA*\322\216\300D\337\212\276z7=@\030\351p?H\232\231\301\177\224^\277y\244\355\3008\315[Az\305\213\300\'\004\300A\303\325\207\301S\314\032AF\333\371>]\255\220A\336\200\013A\211\321\314\300\206\264\355@;\344\206A\020%<\300\364d<>\351%,\302\010*\022A\033\220\000\301\306i\232\300c=6\300\363iNA\232\242\263\277L\235\031\301\037P\243\300W\024r?\237a\251\300\374\351zA\304\317\350\276\005\'\364\301,\351QAL\353\233@\007\000\273@\037x\'A\204[\347\300\277\373O\301v\226Z\300\234.\004A\340\2279A]ww\301\333\207\210A\323\004\035\301\311\251\241@\252\251\305@\364\000\341\300\223\267S\300\344\312\016\300\326%\320@\022EC@\005\337\004\302:CEA`%O?h}\264\275O\360KA\2353\202\301\347m|AH\227\006\277U\236H\300\337I9A\222\364\301?\247g\360\300\354\"Y\301\001^\211@e\272\177A\016\023Y\3017\321\244\3010\216SA\177Z\016@\216\037\220A\360\230\013\276p\372\273\301&FMA^?\037@\254\206\031>\202\214\366\300\254!\356\277\332QbA\021\325g\300\341\252\346@Z\276\016\301lmS\301<?8\300\256C\n\301\002\002{A\010X\207\301\005\327x?\267\244\021B\332\272\316?K\224\203\300C\333\235?h}\315\300\333C\347@\202\'\200\301\001\311(A\202Ei@\317\345#\301\n[oA\003\230w\301\366\270(A\2356\\A\230,\021\301\336\371g\301=\320\355@\311\020\021@\367\317\354\277\026:\274@\214Q\003\301\354\205O@\020\207\205?^\tN@M\226\"\300\333\306\205\277\035\035^\301$;\nA\317\311\373@\325\224\003A\274\230MA@\353\255\301\"\356\313\277\233\272QAd\327\013=2\274rAEJr\300\305\363\242\300\347\247\013A\031\3219A\242\237xA\003\217\376\301f\370\273\301\320\273\267A9\265\006?\010!PAl\330\276@\tk]\301\345\342\312?X\230\262A\353\036\254\300x\351o\301Q\246\000\302\306?\033Aut\330\277.)\357AJ\263\225\276z\200!\300\221\277\340\301\036\006N@\311\370@\301\311\0064AJ\206\026\301\314\262#\270f\032-\277\247\301\335AX\217\220\277\253W\"\301\374\375b\300\237\2225\301x\325A\277\362Z\035?]\342:\277\210\333~\230\000\000\000\000\367-\263?\342\232\234\276\303\323\013\277 \233\212\275\000g\356\276\302\346l\274\215\363L\266\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000t\224\224\275\000\000\000\000\316\336^>\000\000\000\000\000\000\000\000\000\000\000\000\224\224\024\276\000\000\000\000\000\000\000\000\362\365E\300\362\365E@\000\000\000\000\027e\315\246\000\000\000\000\000\000\000\000\324q\251\233\000\000\000\000lZe\275\353\345\333?\"\224\356\277\250[c@M\0041@\371\307\002\277rK\310\247\332\360\024?\225\224\224\275}\254\303\300{eC\3015Kh>\310\252\302?\262@\367?\304Y\002Ah\226\n\300\023\247_\300W\355\021Aw\366\037\277\356\305\034\300\237\002\370@=\324\256>\020\201\330@_\225\233@\236$aA\347\004\255=w\236\307\301H\r\301?\260\321i\301\\\326\205@\014\231KA\303\370\342\300s\347\272A\322 \202?\334C\354@\016TJ\301~\324\023\301\270\250\350\277\331L\237\301C\013\307@\251s\205\301\340W\"\301\207\372mA\214\n\344@\377\317dA(\226\324?\034\231\256@\217\006\342\300\272\205o\301\3462\256@\021\\\237A\250I\226\277%\322\207\301k\225U\301\233\330\\A\000\361\220\300\357\t^AU\253\337\300\037\221\347\3006\374)@O\261\034A\177\022\257@\261[I\300\216\202]\301\035\302\000A\344=\361?\207\236\002Aw\350d\301\243\007\037\3018\r\374@SoVA\315\270\203\301\265?\233@\3543\213\301\241\202\201A^0cA\032\004\321?\2705\245\301\342@\235@\"\021,\277\220t~A\337\006\241\301\020\363\212\300\020b0\301~\3379A\253\301\r@+D\353@Y\\\333?\246\0169\301\334%\004A\264\206b\300~ka@\016g\324\276#\004\251\300Zp<At\225@@R4\212@O\257]\301\360\222\232\300Fj\251@\356\271\202\276\031S\227A\274\355\353\277GW\200@\227\251\321?\334\260V?\363w?AxI\006\302(*\367@\002\305\026\301\017v\255\301\036\r(A\242\261\021Aan\n\301\244\370\030\301\252\253\277\2776\nNA\274& \277+\341NA\330zY\300\337\n\026\302\220s\266A\217\217\307@\266dAA\030\344!AP\271Q\301\226\251,A9Z\256\301\223\221\302A7+_\301\202ZX\301\214\364\036?`\026\004?)\214\223@\341\'\323?c\237>\301\355\327\207\300\206\371<A\322\371b\300QO`A\247D\235\301\372\313\265Ap\031j@\214\223\324>L,\242A\207\033\222?Y\330\347\300P\010\244\301\243L\271\300\220\234\237@\320\372]\301\257\372\220\301\020\304\020\301\030X\303? -\254Apv\333\300!r\274\277\236\030\317A\034_\225\301\302\342\230AeV\373@u`\233\301U\234\373\300\261,_\301\250\372\256\276]\330\240A\2125\240\300MW\240A\224\372J\3001\026\340?\371\035\325\300cB\272\301\325\355\227>\177\003\335@\240U\204A\3665\\\277\310\020\233\277\310\222\205\276\350\244Q@\226 \250@\277\016\nA[\355\206\301\306\331\222\301\031z\004B\2743e\301\367\222S\300\225\373\033\300\001YwAh\252\341\300\205V\244@\':\233@\223\370\023\301d\177\200\300\030\220NA\274\017\r\301$1\256@\340\334\311@\326\231\375@(\3635\301\235\027\177\300T\226\"\277\035c\363\300\200@\320\301\252!\315@\314\310\307A\324i\206\301\214\215\264\300\333r\317Am\004\267\277\360\360T?{FhA\326O8\300\013\361O\301f\006\274Ai\337\277\300\357\202\312?\000\017\333?\205WZA\022\271\235\301\343\365U\301\310\260\310A\023\263\005\277\253|\326A\270dgA\325\266\035\302\026\017\256A\026\tmA\240*-\301\tl\305\301\371\326\333\301aO\241@\030\342o\277X\230\323A\246\t\362\300Gw\025\3017\3020\301e\276\236A\210d\270\301\252X\035A.\250\023\301\372\266\034\276\317\036\333\276\353\373\306A\341=_\277\027\346\324@\017\035\210\300\211\030u\3019\266\303\300N^%\277JLo\300\326\324\324\275\203\020Y\257\362\343\215@\3773\271\243\274\342\224\266&\342\r\300\275\223\333\277\231\374\234\204\276\250*\276\256\251l\276\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\231z)\226\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\024s\273\277\024s\273?\000\000\000\000,}B\246\000\000\000\0001\374\362\257\244w \233g\275\231\206\255\350\367\276~\310\272?N\303\r\277\026\334\373@\314\017;\276\213&#\300\325\0131\251\0030Y\277\222\214\014\277\3543\205\300\034\217\340\300\252\252]\274\216\037\325A\276\343\324@\342\212\263\300\210\376G\277U\342\266\300\037\217\002\301\376\370\376\275\220\334\273\300\375\027kA\226\243\317\275\260\305\232A\320\256\276A\013\272\270\301l\265\r\300\374\035\262\301B\027\014\301\262\2143\301\256_\034A**\233A\217\204\226\300\020C\271\277\274m\\A\303\027\"\301<\302\214\300&)!A\021\336\020\301\000A\211\301~_i@\025\217?A\265\231\030\301y\235YA\312#6\301\036\351K@\204T4?\255\311\203A\033\356\356\300\001\216\330\300\375\203,\301N\211\207\300\267^5\300\200\371?A\270\313:\301\332\2318A\210o\300\301\234{\216Ayh5A\3128\322?\364r8\301\\2\232A\242_+\276\024\3304\301c)\257\301P\233-A:\250\005\301\351\036\361@\037I\202\300W\305\027\300\344\006(A\273L\341@\251\331\013A<\263\'A\200c\016A\274\327\014\300#p\274\301Er|A6\000>\300[\266\355\300\310\226l\301Bf\366@l\236\026\301\227\315\277@-\362\225\300\034>X\300hm$?\304S\004A6#\216\301\367\013\010A\'Z\204@\320\255\217@\322g\337?\014)*A\253s\005\300`c\307\300\310\301`\300Z\033\007B\373\263*\302\317Y\204@\210\370\216\276\223\037\237\300\273\317O\301\304\034\352@\341q\304\300\317\344\"@\316,\271?]g\213\300)\217\032\300F\006\345Ar\t\021\3017\312\037\302\230\n\035A\036U~AH\205\232AVru\300\257I\010@\320\023\222\275@\031\371\277\347\321\227@\3640\303\300M\331\237\301\367\010\261A**\017A!B\035@\350\350\211A[D\262\301i\2057Ap\030\363\301L\365\234@\n\033\260@[6\006\301R\"\n\301\362\003\222\300\307\263A\300\241q\253AT+\267\277 ;=\276t\324\204@\"z\242?\206\252\037\277\242\034&\301\306\020\031A\346 \202?H\017{\301\343K\'Ai\360b\301c\204\203@\226\321I\277\031\006\031A\320\240\312@\020e]@\001\001\033\302\237\240\311Au\263m\300\253C\025A\302\215=\300R,9\301\317\r\tA:4\010?^\221\036A4\177\231\277\022\313\253\301MI\215@5P\260AN\225l@\225\n\220\301\302\030e@x\250\217@\355u\236\300\335#\355@z\304\207\300+\377\206\301Z\256[?8\201lA\211}\302@[E\201@\365\3239A\031\316^A>\221\304\301\251x\263\300\374\332\376@\224\030\233\300\275\302\023\3019\344\251@\274XbA)\312\374@\\\344\r\301T\217\312@\373p\273\301\016a\227@\017\200\330@\321cK\300\367\020\326A\r8#\277\247\0169A77\331\300\242\235q@\216hVAg\n\010\302u7\215\301\370~\347@\211T\007\301=\3764A\352\212.@i\213\306\300\317\251\222AnR6?\346S&\301\376m\312\301\310\260 A\242^w?\235\336\363\300\323R\214A\025\277\265A}\363\001\301\027J\024@\243\303\025\301\025\333\204\300\216\365\262@z\t\237\301\3752\232@$T\034@\220\241\217A\315\373\335A\206\344\231\301{\020\001\300\033\347,A\016l\273\300ev\r\301\253\213\336\301\006hj\300,\033U\300\341\037dB$\014Q\301\034&\r\300\212!\023\301Ok\201A\224.\236\301\240\275\037\3015\343?\301\357\343,\300\216\n\010\276=U)B\322w\235\277\370B\333\300\345\253\004\301\035\303\030\301E\244\267\300\010\020\235\300\225\210;\300\224\224\024>\277\306\315\276\017^\022A\206<B\252\315\314\314\276\226?\201\300\344\356m\300\237Z\310\276\264\255\257\276\217\324\321\251\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\031\005\002\277\274\250%?\000\000\000\000\302\235l\273\000\000\000\000\232J\323\255\010 _\232\030\334\n\276\352q\014\300\000\000\000\000<%\247@]\237\222@=\3304@*Up\300f\002\230\251\303&\r?\350\334\334\2764\244\331\300\026\210\326\276\022\303\017\276\034\026\215A\010i\232A\340\177\240\301\366X\271@\264\212\177\300^L\327\300\263\3763\300j/\t\301`\341\354@p\210c\277\250\201\020A\303\203\272A\236\341\t\301\250z\350@a(\272\301\360\242\252>\3106A\300\345\3238\301\246~)A\302\254\023\3013Y\376@\306c5\300&\025#\301yd0A\244?\"A\231\214(@\'\342 \301\232u \301\202\370QAp\231l\301\202\321\026A\272\037\352?#\025\"\301\025x\357A\335\302[>\254)]\301\374\225\217\300>\3124\301\376\034YAR\261\362\300\236\204\257@\255f\204@q/\254@@B\023\301\003bK\300\371M\247\301}\302\217@\207\007\372@[B\022\301\362\234!?Sf\315@\274\334%\301\026\347\026A&8\224\300E\317\022A\356-\025\301\024w\206\277\r\265\rA\300n\030A\331\333\005\301\317\003v@\304\345\363\301\254\346\030AD\345\345\300N\326\263A\3164\t\301\320\322_A\n\r\231\300\237\314=A\031\236\206@\353\023\300A\206\202\310\301\270a\337@\313\322\225\301\250\311\265A\177\201\316\301\204\377\024\301\001&\022A\363/R\300m\001\\\300\317Q9A\276\321S\301\223o\021@\211\377(\301\340\332\264A6M\327\300\273\235@A\252\3743\301\020t\214@M\023\371@P\203\005A\377\265\211\300\000\004\322\300K\336\304\301P\362\036\300*s\256\277%\276xA\r~P@\202\317\304\301\346\370kA\270\016\222A9\203\314\300\256\025\223\276!\373\217\301\376\0101@\376)I\300\0357MA\023\253t@\315\r\251\301\231^\231A\305\301&@4o\"\301\367\342qA\267\250V\301\321+\352@J\260\220@9,\030@\277*\315\300\357\340\214\301\022\213\303@\002\2070A\210\234\233\301\031\203!AX\242\327\300]C>\277\335P\326@\034\230\017\301e\250\234A\2677\000\300\033\375\005\301\354}\262@\007A`@\214;r@\240\275K\300*W\334\277t\030\301\300N8MAx\t\214\300\202leAC\271\373\301.\232\237\301\226=\207A\036P\030A\335\214zA\245\221\270@6\2308?\224\243z\301\252f\205@[\002\376\300\234[\177\300\344_\330\300%\354\200\300\362\313\255A\364\201|A\211/\205@\244\231ZAHH\311\301ky\356\300\306\260\006AG{\r\300\260t\357\275\231M\215A\033\260y\301\265\024\260@D]\247@\247\017\203\301\331\341 A\354\331H\301\221\005KA\347U\234\300\345\310\367\300\3350\252\300\2614\227\277\310+\240@l\220%\277\252P\020A\240\006\361\274X\030\335\300\225\247\014\300\237\310{\300\350\206v\300\250\235\337\277\177\234\217@\261\327\221@\203V\237@\344\321\014\277k\353\251@\366\270\341\300T^\017A\231\201D@\300\037\237@\343q\333@\2248\"\301\003y\001\301h\017\336@%\345\301@\025\2678\301f\262\342\300\212/\232\300\370\3513@\2766\226\300v\255tA\317\251\r\301Z\345)Aw \237?$2\036\300\017%\216A\024)\327\301PS\246?P4\331@ Y\245@\016aJ\301:~\235\301\242\373\030\300\306\350\231A\227\202mA\302\214\217A2q\364\301&\323M@>+\233\300\221\317\206B\002\234\264\301\302\307\237\301\224\221X\301So\253Ar\313\320?\316\245\310\301\005^\365\300\r\023`\277\252\315\224?_\223\204A\375\201\201\277h\357\234\300\267\336\027\300TO\211\300\354\037_?j\247k\300\224\000\274\277(\212Y@:\367\177\277$\340b@0\210u\260\336\335]\300.%\240\276|5\226\277\2138y\277\357\216\220\274\356G\005\254\000\000\000\000\000\000\000\000V\207v\235\365;\030\224x\232\245\277\000\000\000\000\000\000\000\000\242\002{?\344\321\314\275\026\231\323>\000\000\000\000\000\000\000\000\205L\306\271)\035\022=\000\000\000\000\351\362o\272\000\000\000\000\000\000\000\000\004\301\220\227\306\320\014\275F\024\330\277\000\000\000\000\346M\\?\226v\232?\251=\322?\t\257\335\277\035\207\244\250du%@\010\361\177\276@\002(\300\340\332\346\276\233)\303\276@\025\225@\315\217\326A\024\031\003\3013\262\035@\327t\254>d\323|\275\313\2732\301\351X`\301f\331\260@\203\233/\300\207\207NAZ\032\234?\213\355\370@\247\201\312A\231\327;\3013\300\357\277,\016\220\301<\017\223\301\263c\217\300\267~6\301\317R\004B\\\270\027An\2725@s\336\342@\223\352d\301\033\265\373?\250\336\265\300\344\246\224\301\361s\357>o\274\001\302\2729\367@I\336pA\037\3519\300\267_\232A\220\311\201?q\354\232\301{\352\271@\366\277\252@\254\347\247\300\246\365\227\301\031\357tAj\021\335\300\357\225\300@\373/\032A,\200YAJPG\301\3008\363>>\\\266\277\304\025RAJY\002\300\350\363x@\360tU@\"\361F@\0378x\277\276\306\220?g\367\240\300\005Y\354\300\325\302\022\301\254\307\201A\265\214\256?\272\373L>\242\265\220\3018\267\032A\0231S\301U\245GAzK\376\300\357\240\266@%\377\314\300_Z\035A\224t\020\301\222S1A\032Q|\301\334\244f\301!~\036\301\016L\213?\n\017b\3009\352\204A&\201`A \320r@<\214\275=\206\214\275@\t\212\232\301\005\363{\300\271\275\032\3008\227#\301\345D\n@*|TA\344T*A\317\3742\3012\213\237\277\266\212\314A\305\256\276\301\251u\001A<X\214>\251\023\350A\211B\214\301\235SnA\263K\300\301\033\232\303\301\"\220\027\301(\230\243@\272)l\300vTZA\363?\265\300\037&\277A\376\276\324@\252\232f\300\003\330\032\300\236c\021\301\270b\342@\000\230\241@\206\313\340\300\006L\033A/z3@iA\240\300M\034U\301N\205y\300&@[AM\324!\300J\027j\301\034N\271\300qa$A\000\336\226>\231z\237?\013\002\213?\204\301?\300\220e|@U\242\021A\"B\030@\231-\337\301zC\334?!\354\243A\302^dA\234i\007@j\025{@\353\332\266?\343\010\201\301\202z\023\300\305\373\356\277\000\003\263@\236\231+A\346\206\241@\341\321M\301\350J\007Ay\217\035A\227(3\301%9/\301t\2272\300\234\256z\2777M^A\337\214\201\300&m\376\277\240=\350\300\035|@\3012D\032A\306S\276?Ag\023\300\'_e@\330\323\373\300\273u\201\277)i\374@\344>\005AH\345\301\277\214\3416A\032\356\265?+\324\216\300\3162\213\300\316\271\035\301\000;\242\301\345\330:@\336\203K@\t\2519@\266H\305@\336\273Q\300b\312\032\300$6\320\276(z\006A\262\3453@\"\367\037\301\331\027\206>\005\337\374@\003B7Ai\210\036A\277\263\223\277\221\262,\300\312t\254@\377\320\207\301\364\321\202\300\270\326\200Aa,\301?\223\334b?\014\321\211Ai?\247\301\274\025\035A)b\365\300\253\335\243\300f\347]\300\003 \006\301F\304\261@]\276\316@\357\r\355Az\242\335?8@\017\301\330\304\221A\242x\307@\265\312\375\301\030,\347?\267\263\347\301\257\022\263?8\354XA\357\213\036Bq\310\244\301\330\347{\301-M\027AH%8A=\203q\301Q\356\365>Y\322\304\301>\307\255?\255c\020A)\026\033B\343mS\301\343!\214\301!a\000\301\3150\204A7\345.?i\345\226\301\270\347\013\301\324\203a\277\221\343\202@]\2127\277\373\371\230\277\013\202E\277\376C#\277\242F\253\277\035&\206@\212s \300v\247p\276\217\003\027\277\000\000\000\000\315\302\036@d\244\243\257\000\000\000\000}H\014\277\225\337\253\277\000\000\000\000\000\000\000\000\254\304)\254\000\000\000\000\000\000\000\000\226\024\232\236\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\356\204\257\227\000\000\000\000u\203\202?\000\000\000\000\250\334;\265\322\320\320\275\375\351\024\300\304\207\235\232\273S\310\242\234\200K@\235\356^\231\242\243\342\277\337\311\305\276(-\347<\365\362\252?\177\370.\300\251\333\t@\267\006\032\277\254\203\304\242\224@\003@\020\274\036\277\\\003\235\277s\263\362>j\361F\277\374\344\217\300\344=\350A\265\3657A(bI=\002\263\364\277\026p\315\300\217\237r\301\274\200D\301\310P\364\277\272\"\353\300I\301\346@5R\226@_\262\210\277SI\276Ac8\017\277&\377\242@\273\252v\301\347\313g\301\236\276\014\301Es\353\301fn\013\3006A\353Aq\233\007A\302\034\261?\367\312$\276k\035.@\344\316*A\251\203A\301(<\366@dE\343\300\331\260\274\300\376\374\216\300(\300\240?qiSA?^\027@g\371\302\300\353\301\\\300\323\276\036@r\363\266@zV\221A\302\331\303@F\251\006\301\010FP@3\025\376\300+\274$A\005\031\256\301\261\345g\301i\352\020A\200\320v@\314K\007\277W\251\340@\345?\331\300m/\232\300\270\306wA*E\241\300W\270\002\301\'\317\242>T\365\252\277\320S\334\300\224\364vAp\235XAn\345\346\277gA\216\301\346j\272\277\363\3400A\200\277\330\300\034\232OA\002k\222\301\004\255#A5\221I\300\203\223\254A\273\241\335@8\022\307\301\266#\272@\202#1A\212\307\027\302\314\227\203Af\305\302\300w\344\205A\261\260\024\301\357\326:\300\360\205l\301\032\\FA\344\342.\300r\025\341A\306\345\264\300!\253\201\300\016\261\216\301\302aDA\200\354\177\277J\236\320?\272\356\337\300\322\034\363\301\020\017\370\277\211\215\213A\316J\324@cNA@\262{-\2772\376\334\300\200 NA9e\247Ad,\314?\251av\301\354\367=@1\002\247@ek\257\300\240\332*\301b\374\243\300\213\222f\301\240\r^\301\n\377P@\310\265/@\3354|A\327Z\000\301F\252\230A\000\276\213@\231\356\022\300\324\275\323\300E\3463\301\030\376\315@\3478YA\344\211\316?\212\373\261\300\311\223\026@\256\231\035A\230\362z\300\335N\372\300\332\376\250\300\256_P\301\220(\220A\0222\342@^\310\223>\002\326\343\300\325\333J\300K\000\344@r\037:\301~jq@\361\020\252\277\376\275\370@\002q\002\301ah\031A\255viA\017\251\022\301\272P\231\300-\224\035A\223\270\\\301\324\363\256@\246\3205\301\222\332\236\277x&\303\300\030`B@\250\000\023A\351\035\203\277S\335\013A\232\0237@B\215[\301\360O\272\275\0064\323\277?\352w\301\2373\001\300N\000\314\300X\260\333@\311\335\000\300\364@\355\300\177\r_A\322l\217@\336\300\203>\366\252\370@\337\320\303@\340\016z\300\023\003\311?\246Y\227\300V\356]\301\276WpAE%u\300\226\200H\300\343\303\214\277\266\200\373@\024[\257@\030\205\237\300\245\033\330@\215h\340@\311\324\n@\354\203\031A\'\346\023A\'[5\301)\272_\301.7\037\301\004\312\236A\300\222\313@d\234jA\232\330\365\277\2002\005\302pU\306?I\332\225Ax\374\247\301\335\312fA\027\343\232\301\262\346\030@\021\020\220@D\002ZArX\031A\361U\275\301M{\233A\241\303,A\033\321\n\302C\\\373@\234\024\035\301\003\354\032A\272\277\024A\257E\002A\367]h\300ye\201\301\007L\205A\344\036\217@w+\331\300\372\313.\301\347\324+\301Zi%\300Z\306^A\252\277\030B\240<\275\300_\001-\301\315\271[?Ph\203\300LiK\300\272\206\233\301\335\324\335\300\203\247\302\276q\352\200@\230\n:A\213\237\356\276\342\340\340<\2467\244\3004\332\221\300\357I\244\230\002\354N\300\331\020\365\277\022O\302\241\000\000\000\000\266\033\236?\000\000\000\000\000\000\000\000X\205\200\276\301\364{\277\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\226e$@1`\340\250\216\276\203\275\323\263\355\275\000\000\000\000\367N\277\231p\310\314\256\221\220\220<\235\234\234\275>\026\025\300F\203p\277\305\365\332;\313\202\304@\2216\177\300\"\343\023@\246\350\250=\365\3653\257\322\316\371\276\"S\231\277e\305\366\277\002\206\277\277Z\t\037\277\377y\341@\257\300\240A\341Y\330@<\366\206@\377\2702\300m\357\013\301\332\245N\301\216\3018\301\305!\347@\316\242,\277+\227YA\204_\246A\013_\235?\200\024\215?U%\276\300\021\357\227\301\312\232L\301\247\277\261\300\246\326xA\232\243\006\3028\273OA\016]DA`\340\216\301\341\021\225A3\271\241\300\013\036f\301=\345\007\300U0ZA\245\343I@\252g\341\300\223\277pA!\351\342A\366\020\224\300*G-A\322S\027\301\202\354~\301eO\264\300\215-k\301\204\336\352@\037\013z@\033\337\357AN\001#A\346F\261\301\302\255#\301\264;\231A\263\316\266\301P\370\220A.\320\005\3022\234}A<\357MAl\244\245@R\250\347@2\244\363\301\272\225\373@*U\370\300\36109\300fU\235?0\350\021\301\233\000\317@\241\024\022A$\367CA\243\263\202A\372\325\264\300\001\'VA\332\220;A@s\254\301\332\002\226\301\321\251\272\301xz,A\rK\352@\216\357\rAZ;5\300\212s\265\301\374L^@\365\021\321A\373\311s\301\301\001\336@\315#\266\301\240\201\007A\352\221\337@\311P\311A\210\207\024\276\355\227\330\277\021W\023@\201\261UA\250\217e\301\321<\034\301\270\240\362\3017V\310A;N\036A.\257\322@\031\325\337\301\315\207\370\300\326\220p\277\302\324\313A5\227\226\3000\327\211@\013R\357\301\237\026\223>\216\267\252\300\340\336\326@p\327\367?o\356\214\301~?\375@ \210q\300~\307C@\016\235\235@\007\275\361?\274m\004\300\253i\221\277\331\'\027\301OK\203@\310<\267\300\207\205*\277\205\333\334A/\023\231@Z\234s\301\371J\016\300\236\361\034\277\177_9\301r\376\301@\324\263[>\005\250%A\316\032`@\317\200LA\016\036>\301zZ\301@I\376m\301\257]G\301(\270,AtR\244\277\036\037\320@\021|{A$\370\331@\366\326\267?K\222R\301\207\205q\301\270V<?\232\010\207\277\"ccA7\335e\2772\330\221?w\371\231@\317\030XA\"B\340?\370\237\336\301\024&\206\300sN\271\277x\004\210?\304\034hA\306\272\372@\224+?A<Y\326@\335\260\341@0\025\205A\330\007=\30223\267\300\327\034L\301k\224\227@I\020\256A\037\337@A\007\337V\277\326\010\227\301,\0057A\010F\331\300\365\363\323\301\342\230h@,\257\036\277\260 H@\266\222\020A\3467SA\360`\214A\025\234\003\301\322\334\232\300\367\201\224A\2775\022\302\342\013\212@y^\201\301U\003\270\300%i\037@k\201SA<\251f?\207\204)\300E\344K@?\235\036A\273\312\233\301\035\223\224AgE\243\301P\331\251@\257\203&A\025\003\001A\212\037\220\300\315P\213\301x\266\346A%a\021\3015*\234\301\340\312zA\026\"\216\301s\r\276\300 Fi?\331\346f@F\036\026A\236au\301\000=h@\005*\031A,D\205\3010~]AP\315@\300\022\257\231?vF\264?\257Y\271A\036N\277\300\035\251-\301\230\200\206@ \215\315=c\006\031\300\033\272\217\301oq\343@0LG\300Q\031\224@\363\013[A\3609q\300:\273>\301E\366eA`1\264\277ZL\252\300\241l4\301\235\315\203@\332\231a\274\262\354{@\352\013\305?\226\375I=]>\337\201$QO\277\017\016\335\276q\260T\230=\025C\300|[\234\277\000\000\000\000\000\000\000\000`x\355\243\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\362\360p4\007\373\252\233\207d\256\220\000\000\000\000\000\000\000\000\000\000\000\000\202\'-\206.Bp\264\000\000\000\000\000\000\000\000\225\226\2263\t-\210\2742\226!>\000\000\000\000h\323M\231\372K\\\256\355\250\374\241\226\220\020\276\227\334\321\211\025\023\202\300\374\225\020>\272%(A=\371h?\037:\201=\373\376\361\277I3W\260\034\375c\300\276&\235\277\014\311`\277\021\317\024?\306\246\222?\374\0334A:\000wA\266>\210\300TEQ@\346\327D\300\341\3251\301E\n\377\300 \276\250\300\251\347\261?\254\034\364@J\312\'@\207Q\036Au\037\257\300\256l\222A\362\245?\301\032\267\243\301z7d@4\245\257\300\356\304\306@\323s\302AzL\030?\217EJ\301T\336\373>VN\004Ab\016\216\301kir\301\016A\340?r\035{@\014\252\301@\370ssA0\264-@v>KA\240t\252\301B\265\256@~\202\236\301ZVu\301\'L\215An\213V\300\371\340c@\023\333\013A\242r5A+\351\215@\r:\220\301x\255\256AM_\213?\376\273\313\301\3140\036A\000#\213\301\335;,\301\013\266\"A\006\262\355\300\016|\325@\030\n\202A\033\250\242@\022\336\231@\265\310n\300\301\026\033\301c\3555\301V\335\362>\253@\357\277\000\'lAz\353\231\301\177\200y?\301|\327@=\326\000A\002}\240\301H\377cA*a\202\300\217.tA\003\027eA\243\342\014A\220G\356\300\227\362\353\300\236>c\300\212v.?SR\342\277$\315\334\300Nr@\301\2607JAD\203\327\300a`\001A\337\301\271\300\200\334l?C#\342@+g0A\034\3439\3017\2625@\224b\223\301jFrA#\001\304@\226 9\300\370\002\236\300\362s\263\277\300f7\274\343\004\263A\036R<\300\r\031\236\301y$;\301\027N\300?L\311\302@\036\333\017A\224*L\300\261\177\360\300\230\316+AF\251=A+\'$\301\304\337\214\276\211t\217\3012\013\223\300\345\275\"A=\033m@\361\3324\277p\301\023\301.\n7A\201\361\304AB\263\240\301\244\270\250?\330\006\205\301\214`5\300\216\315EA2\345\010\300\346\rd@\350\377\205@\270\216\354>\234\352\000A\250\346|\300:6\025\301\266\036&\301\3323\364@E\t!\301\265w\343@`\346\010A\323S)\301\037\254\007\301\235\":AU\250\"A\360H\367\300A\331\003\301\237/6\301\361x3\300\357\006\235A9\360\367\300\237V\005@\376\016\025A/E\215A\336;:\301\032J\025@\335\206\213\301\023X\024\301\250e\250A{\341}A=No@\377\003\020\301z\257O@\371\256tA!\322\204\301\014\212\272\300\242\352\223\301D \205\301\275\263\203?\262L\270A)\314\247A\342:\205@\324\002\024\300@\251\361@j\026\310\301h\215!=6MM\3017\024\010\301^\313\237A\256o\331\276js\323\300_\276:\300\303\273\\A\260v\021A\341g\273\301P\330\nA]\210\032\301\203\306\031\301\177\211\237As\2624AD9\014\301\002W\254\300\004\315\252A\232\250\226\300\253#\260\301W\310\364\300\264\226\261@\333\374\\@qaU\301\253\227?A\265h\203\300q\265\337\276\201\313\245AH\353DA\032\221\250\301\313\332\023Au\377\225\301\242\271\274@bz\376\300\372~\300A\023\352=\301I\035\360@(\274\247@S\242G@\036B\227\301 \241\255\277\013\241\267\300;<\251\300\232\270\355@\326\374XA\243vC\301\017A\321\300\346\215HA(\267\250?\345\327\240\277\033\343\267\301i\030WA\221\363\010\300].\236?I:BA!\304\202\300\225\2071\301\242\261\217A\265\033\255\300\026\000\207\277\226\311\334\300\313\241&\277\302\266X@\367\037\351>\202\360\341>U$\021\277\300\332B\276\271\\??\230*\303\273\352\316\361\276tT\204\277\222\2630\300\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000jii4/\243\245\233c\361\250\220\000\000\000\000\000\000\000\000\000\000\000\000F\276\'\206\035\300h\264\000\000\000\000\000\000\000\000\227\226\2263\310\271\325\232\251\375\331\217\000\000\000\000\000\000\000\000\000\000\000\000bqX\205])\226\263\000\000\000\000a\356\021\277\361\256\016>\246A\303@\024\211\215@\000\000\000\000\324\205\224\277If_\255\221A\317\300\357\317D\277\213\025\330\277\363\001Q\300\264\2506\277\007):A+\\\230A\340\225\302\300\364\307\002@\214n\017\300O1\215\3011\256\021\300\243r\376\276\325\214\213\301$\260\306@\370\0271A\234]NA\2327\017\301$lUA\231t\311\300\210e\240\301\224\204(A\336<\247\277\252\314\207@\373J`B\003\354\274@Xs\223\275\247\311q\301\315}\261\301\321>-\301\213\310a\300m\213\324@N\030\251\301 h\005?\005B\311A{W\212A\347\356\205A7\2128\301\325\202(\301\001\213\270\301G\357\270\300\323\332\032?\270z\026\301\367\350\322\277\377\2558@\316\204IAA\371\253@\3322i\301\3022*\276\361\271\016\301\211\034\022A2\344z@\340\217\t\301T;SAo\316\007A\237\2015Ah\0035\301\345\276n\301\033!\237?\223k\217\300\236\246r@\342a7\301\302\201\203@\014\274\033Ax\021\302@\207\033BAex\034AO\230\265\301Z\037 ?@:\245@\370,\310\300\366\241\026\301\030\035\244\300\212~}@X\311$\277\220z\210@\335\013\203\300<\3766\301\020]\003\301\246\3103A \332\345\277\262h\221@\342<\013@z\272.@\277\033.\301`\207\273@\016a\022A\014C*\301\343\245\204@D\222)\300\262\255\020\301`\206NA\226\334\311\277\310\007\272@\242\321\331\301\356\362\336>\271\320\267\300o\235`\301\261#\016A\336\310\021A\017?(A\tb\204@\022\321\001A[\340\000B\353\254c\301\330zO\276Bc\224\300\377[\025\300b\375\225@\265NvA}{\213\301\206\213F\276\223IT\301\234\247!\300:\361_\3009\233I\300\004\333\217\300\300\224\311\300\2359\035A\342j\262\300\357l\374@\376\334\016@t&\263@\2173\312@\351\2421?\002\003\000\300\036\320\272@\233\304\201\301\203\254\320@\307@\232Av\337\036\301\374\305\337@\243\366\213\301\331U\335@(\353\243\277\032g\203\300\\\346\370\277\305Yr\300w\330\322@\001\353\245@e\362\210\300\310[)@.\014\276\300FI\364@%t\212@\'\032\307\300\235r!A\313!\016?(\t\377\3008\n\235@\344\313\353\300\331\300\240@\010\241/\301zf\352\300\004]\211AG\242\260@\325K3@\361\036\307\300\266c;A4\300\357\300\334\n\031\301\272/\261?\331s\377\300l\227\023@\261\256\023A\027\203\277@x\245\014A\252\224\'A\004\215\017Abd\033\300x\265\\\300Y>V\301!\354\323\301S\250\320\300)#\314A\343R\033A\213\372\027A\241Qd\301\313\354\024@\201 aA\305\024\253\301\260\3375\301\203\201\362\300\315\337\315\277\252\311\255@tP\216\300\357\004\304\277\2675\251A\'\005\021A\362)+A\332\322\000\302[\330\274@J;G\301\254\027,A77[\301\177\326VAw\205\"A\006A\361@`;\222@I{\026\301`\355\254\301\326\370\237>\353\222\002\300\365\263\000A\005\204\302\300\2126\233@C\224D\277\250\311\017\300b\022\211A\352\232Z\301\2550\300\301\2627\212\276\356+\210A\241D\276\300L\252`?\374dkA\2361\030\301\201s&\301D\340PA\'*\237\300J\301V\300!\3725\301\213[\207A\330\267\037\300\242\375\t\276\260X\261@mM!\300\034\267\367\300\251\237\003AQ`\316\277#\243<\277K\321\362\277\344\244W@\265\264\264>\314q\371>\353\335]\300<\242\030\275z\273\266>\332\331Y@\310\306F>\300j2\276\330\274\000\277\2770\035\277\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000Z\366v\224\250\013{>\226\264\351@\223\340i\2270\033)\276\000\000\000\000\344\232\244\300\360\023\250\273b\376\016\300\337&\366\277\266\272\343\300g1\230\300_\010\316A\"\373\340\276\317|\250@\000\000\000\000\313hv\301\355\334,\3004\235\250?\201\270n\301\275\323B\301\331U\320?N\261\336A\377\205\232\300{\353\200A\357\036\007\277t\350H\300\207\203\360?k\331=\301\022\001\024\301\212`\252A3D\363\277\224C\236A\021V\014\301\324\3263A\033<5\300z\007^\277\237si\3012\274a\301B~\313\276\321\246\362@\262=\346?\221\365bA\276\214\312\277\353\017\035A\037\002\342\300NA\"@\260\230\\\301\355iQ\301rt\002AO\337\264?\262R\211@%\230tA2\323\213\277E4T\301:\263O\301\252m\206A.\335\374\300d\\+\301\261\371\341@_\r\025>:\262z?\277`?\277\312\014\031@g\313\334@\354.\242\300^I\355\301\rU\207A\010\212\212?L\0179A\242B\030A:\205\203A\327\227\004A\025\264\200\300\252_\330\300\326\004\027\301\211\275\006\301\036a\307?\004\312\225\301\270\220\\A\260V\177\300\031\004u\300\242JN\300v\310\214@\017\325\221AA\264\277\301\0039\227\300\370\224\244\277\342C\223@\231j\004A\'\375\235\300\256\324\002A\377\006rA\026\304\236\277f\335K?M\3745\301\276\t\205\301\324j\004A\200\020\317\300\337\341\234@E\242\224A\250\343\002A8,\277\3011\t\320\300\226t\240?\201S\213@/;d\301\037e\371@\352n\315\276b\315\013A\n\341\344@\026\031+\301\274,SA8\345]\300\330\234\301\300{\337\360\300\210VA\3013\\\264A\307\373<\301\000A\360@F\355E\301\204\243\003\300c\322\025\301v\230\275?\224d,AP\225\035A\2267R\301\0029\326@\030j\037?zr\010A\367\035IA(\365%\301\264\255\341?/\022\275\300t(8\301\234a\373>\262\017\240@\033\206\342@s\212\364\300#E\227\300\333\244\033\300\241\343\350?M`j\301\210\017\021A\030\323\204A/z`\300xX,\301v\357/A\207y\031\300\347\373\356@\027#\204A~T0\301\037[M@\333p.\300\366\301\243\277\327\245p?\314L\216\300\033ie\300Ww\242\300n\020\255\301%\265S\301\227b)A\201\035\205A\n\3517A\203\331\204\301\320 #A/\357,\301\321EnA\370\246\303\277r\275j\301:\300\236@*v\220A\337j\340\300\363/\241@\310\006\t\301\331\351AA\266\321\310\301\344\225\332@\036\204\005AH \213\3018\235<\300A\346,A6\3372A\303\316\251@$\'EA/\022\032A\270\344\002\302W\023\366@6\325w\300ex\024\3017\301\301\3003#\213A\314\016\020\276\323\267:A(9\265@p\003\301=\217\270\003\302\322\201\372@\231;\270@p\320\272\300\3640\257\300\311\320\317A\306\354\225\301w\200M\301O\363\244@G\316\332\277\201`~\301\326\333\323@\346\005\266A\333}\340\300r\261\007\301)o\256A\2543\302\300\335\364+\301\220>wAK\245\222\300x\304\323\301G\337\217@\021\347\254A\276\002\036\301\231<H\300\'\246\225A\200\037M\300tj\'\301:\317\021A\n\336%\300\347\345\271\300\307s\341\300\001\315dA\244\335\005\300\340^\"\277e_\274@\326\246\310\277\374\234h\276\316\264!@\340\026z\277\362\227\225\277\223\310\220\277\336\253\035\277\000\000\000\0009\226U@\037\003\003\300\251\234\242?\300\306\300>\005\003\003@\000\000\000\000C\217\000\276\036\226U\300p\274\302\277\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\210\364\263\275i\3652@b\021)\226F\312L\273\000\000\000\000\2511\034\300\007\245\252\262?\207\207\276\201\205,\265\314\212\217\300o\355\032\301\231\373|AU\005\266\222\230Q\300@\000\000\000\000*\242\204\277\021\322\252\300\'\242\243\277X\317\\?\307\253\310\300\332\240H\301K\n\274A\227\223\267\277z\004\005?\246\"\220\242[m\316@0\252\201\3018\252\243@50w\300\003\322/@\241\276\200\301^4\033A\n\200\002\276`\230\004\300R\273\022\277\242\216\274Al\307\303\301\375\3552A\257k\376\300\306\215\013\300$\343m\301\260\207\233A\'*P@$\364m\277\313(\226\277\004\014\237A1-\223\301\231$;@i>\034\301l\217U\300\307\202\376\300R\201nAw\225,@-h\016@\224\212\237\300\241.-A\333\2204\3010\225\325@k\256\t>\230Y\375\300\236M)?z\203i\277\305\352\002?\326\263\250@\262i\000\301n\211\207@\230I\006A+\217\026\300\325n\014A\\G\223\300w\304:@\367\034\336?y\334\017A<\233VA\367\342?\301\2604\267\300\204Ag@H\005\211\301\224v\260?\355\254\220\300(\362^\277D)fA\r>\270\276\317\262\233@\314\206\214\301TP\205@\355\235\332\277t\274k>_Bz\301g{\322\277H\243\265\300\301h\216A\366\323]A\351\226\303@d2\347\301\204\r\031\300\021r\223ATI\372\277}c\236A>\346/\301\237\213QA\311\336\234@\236N\210\300\304\214\233@+\240\203\301\036\245\031\301\316k\037Ab\0015\301(\335HAZ\324\367\300\326\257\035\301\344_\350@\347\241\352\300L_\210A\0024\213\301\222\263rAd$\256@W>r\301@\360\016@1\265\326\300\262H\260A\344\300\035A\237\243\253\300a\220\241@F\252b\301X)$\300\303S<A\241$\261\301\000\256\360\300\311j\005@\302\247\322\300\204\312\302@[n\346\300\034\326BA8\332}\300\006\234\270\277\352\177\006Aa\247\002\300>\352&\277> \264\301\360\031)\301\n\375gA\020\313\231\277\234\325e\277M\217\362@^T\240\300\316o\313Af4\325\300dA\005?^+\220@\3432k?\372\252\'A\275-\014\301\023~4AD\361\213@\345J\313\300\223)\217\301\253yn?\257\360\r\301\272h\200@\325KqA\033\376Q\301\023g\200\300\250\305GA\210\202.>\322\362\276\3015\014rA\236\357?@;\271\265\301\362\217!AhU\014\300C\242\031\301\235f\022A\230\021\003?\242\350\316\300\374\254V\301!\345\001B\362;\n@\031\326\304\301<_\306=\345\no@I^\334\276\254\254\020@\261q\257@^Z\375\277rQ\245\300S!\tA:\216@A\363\311\220\301$\334\017\300wg\257A\256\327<@C\352*@\305\344\310>I\333!\300\234\235I\301\371\273\336\276\246\035\377@\212\t#\301ZN\363\300cd\230?\266\341\001\300q=\rA\265h\202A)\310\316\2777CQ\301I\001\352\277=j A\225\226\250\300\364\327\202\300\037\020BA \336\301\277\226\2266@]\343=A\345\364c\277\320\010\267\301\235\333\312\300\236\030bA\347\277\000\277\272qL\300\260h\366A\373\0332\300\037\000\257\300\264\313\001A &\306\276\372\313\025\3019\037\005\301\"Q\016\301\345\344d\276\344K\333?\230k\203A&\'\310\275)9\303?I\334\244\300\316\256\000\274\341)t\300Xu^\300\235\233\334\300\000\000\000\000\307\306\306>\021\021Q@\000\000\000\000\256\236M\220\213\001}\276\000\000\000\000\000\000\000\000\276\2667\277#,,\300\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\013\375\237\204\000\000\000\000\000\000\000\000\376\215\205>\000\000\000\000\000\000\000\000\000\000\000\000`\352\361\276\3327\177\275^C\214>\000\000\000\000\315\337\212\277\334\035\236\277\0208\255\300\000\000\000\000\2253\277@\000\000\000\000\016\314\024@ \305\305\255\027\021\021\277\201\205,\265*P[\300-|g\300\311\n\261\300\265h\334?\313\241\017A\261\304N\223\330\362\271@\001\205\350\300\276JU@\302\341\235\2771\246\007\301\353\350\307\300\350\247\246@\333\3059\3002q\235?\303\004\303\224\207\326\215\277\312\351Y\301f5\331A\024\243\032\301\240C\020\301B\266c\277\377\375\214A\t\226\025\301S\212 @[\265\316\224l\032*\300C^8\300\2537eA\246@.\301\rd\036\301.\302\334\300\265\365\354A\003\031-\301\357I\266@\237\031\242\250\300\211\007\277m\343\361\300\322x4A\363\025Y\301\202\250\244\300\276p\317\301\303\223>B\367\244\340\301\356%\020@\"m;\277<\013\217A\226\277|\301\314\316\253A\203\177\206\301\206.\233\300G\302\r\302\036\357%B\361\324C\301\265\\OA<\214\202\277\366\273\246A\255J\230\301\251\311aAJ\345\315\301\020\026\004\302\334\272\234\301\266\257\347A\245\002\tA\n\255:AKe#\277\332\233\036A\352\303,A\337_\021AF\005\300\301\362U\273\301\210\255\250\301\2054\206A\201\217\010\301\t\367\225A\304`\277\275m5\306A\020r\330??OrA\264Q\020\302\247\\\210\300\242\377\327\300\031\\\006B\3435\245\301e\003vA0\3170@\362\347vA\335\264 \301\n\006*Aq\342\025\302\354\232\362\300r\300n\300\366W\305A\315`\263?e\350\215A\270\310\257\300\013\252]@\030&\256\3000(GAx\360\026\302X\024%\301\377\253\326@\311;RAn-\314\300\024\267\332\277>\263\254\300\260\330#AOn\037A}\342\253A\350\035\014\302\003\263c\301\177\371oA\345\273S@%-\221\300\214\325\257AR4\222\300T%\205@\306b]A\244=\251=5\275\232\301Q\261\270\301M\002\034A\317\227CA\016zK\301>7?A\210\266]\300\275g\270@\303\021EA\363[\323@\326\230\356\3010\367P\301\347\376\354?\003\023SA\302% \301\262\242\213As\013\177\277\215k;\300\331\354\230A{\220\246@y\'\350\301\372\"\001\301l\324\206\301\360\235(AS\242\306\300\022\003\211AD\022\345\277y\237VA\263\300#A\375\t*A\221\221l\301\365\243\354\300\230\303\261\301Ov\242A1\301\240\301:\364\235A\342!\376\277\221\3532@\027\006\346@>\217\203A\342\232\201\301e\035\362\277#\246\206\301\035\212k@\031\327_\301\354d\212A\0300\237\277\312\253\326\300\207q\024Af\335\324A\303V3\301\344\300]\274\277\201W\301\263\304a\301M\375\207\301h\327\271A\332oS\276\030\355n\277\032\242*@2Z\370A\021\031\247\300\006\331\215\271\313\311X\301\"\345\307\300:\305\235\300\023\274\332A\303.\005\247yy\312@\351-\003\300\370\345\325\277>\266\357\277~\233\247\212\334Y\371\300H99\277m\031\242\300\367`\234A\235\0319\275W\346\202\300\311o\343\277\036\342\350?\tf\273\242\000\000\000\000\360\243\215\277\000\000\000\000R@w\300\252\250DA\235\0319\275\014<\215\277\236\336b\276\002p\276\300\000\000\000\000\000\000\000\000\232\2319@\026\363R\275;BJ\276\000\000\000\000\000\000\000\000\"\230>\256\'\003\003\277\340\350\010\300\000\000\000\000\000\000\000\000\336\335]@\000\000\000\000u5Z\220\000\000\000\000\000\000\000\000\000\000\000\000\311\234\034\277\254\2666\300\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000r\2600\275\251\350\005\215\260\307\027>\322\215\303?\330\256^\232\206\014_\206!\177\334\300\201\221~\276\\3\260@\033\205*\246k\206\006\276\031\321\341\261%*@@\004\211\334?\357\027\206\247\026\242\300\206\365\274\220\301[\220!\300;.\200A\326+\264\277\347\020\014\277\226\316\220\275\212_\202@\222\333Q\300\357`\235\300Y\220\273\206>\324h\301\235\305\255\300l\013\321A\014k\006\300\345\205\016\277\256W$\277i\255\201@\023!+\301$8\202\301\000\000\000\000\205\234\212@\263\356\203\2768*\261A\251\"\001\300cI\200\277M\005\227\277H\273\366?k\213s\301\001,\236\301!|\330\274\345\375$A\242\240\301>\340\317\324A\017\n\317\300\217\320\222\277)\200>\300\366\317\366?\002Z\302\301\362\324!\301\367{\204\276=\316\246A\266m\001\301\221\221\364A\"\245\355\300\n\275\330\300 \023\227\277\336\3231A\251Y\340\301\246r`?E\247\305\276\370\205uA\312\341_\301s\272\363A\375\231\240\300_g\233\300J\202\344\300\205HbA\341D\360\301:\006\213A-1[\277\346\017\003A\031e\231\300\203\314OA\266\265\"\300\233)\000\301\014\021\020\300\231\243\030A\014\210\335\301U\355oA\3323\261\277\226\237\220\300%cb\300\3324\313A\276\233\336\300\242`Z\3014f\310\300\322\304\'AUX\362\301\r\031\003A\030\302\265\277V\222\250A\021Vj\300\305\356\263A\013\217\032\301\"\177\365\300\203\355]\300=\243\024A\362)\"\302N1\277@\301\222C\277V\252\250A\222\303V\301`\267\034B\366o\307\300T\226\367\276\243o\266\300f\223\010Awi\013\302\371\336\211A2\014t\275\273\006\300A\362\"\250\301\314\226\224A\345\252\203\300\025\025\005\300{\275\013\301\t#\207A(Y\007\302\206^\351@(Z\r\275\036\312wA%MS\301\360\253\262A:\243\377\300<\311\303\277\347\276\033\301\204\265\211Av\224\t\302\246\307!=h\232\315\273\247\362\254Ax\300\201\300\246\261\226A\337\024\231\300\220\037\247\300t\276\247\300\214:\'A\001\003\017\302\024\201\017\301\203\202\202\276\217\330\251Ae4\037\300\013>\370A\027+&\277QGE\300w\363\364\276/\262y@\272\346\343\301\270\340\267\275\000\377\376\276:b\212A\254\205\354\300\214\321\233AP\340\204\300\376\334\245\276i\204\276\277\206\354\257\276\200\036\204\301su\362@\234\233\233\276\263\221\225\277\035\206]\300\373p\241A\'-\226\300\2160\330\271mZ\331\277\004\342\244\277\205\206=\301\227\364\251@\302\300\300\273\312Z\324\277Y\342d\276\203\321\200ALW*\300\222\317\231\270\200SE\277\306\304D\276Fp\342\300\205\300\350?\000\000\000\000\353\272\316@\032VS\266.\004\033@\351\375\336\275\000\000\000\000>\337j\221\376A\033\260\036\214W\277\343L3=\000\000\000\000\324\335\023\276\276\202m\222A\227\206?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000q>\027\276-\t\311\262\000\000\000\000\252Q\213\274\000\000\000\000\251\250(>\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\266\274<>\000\000\000\000\266\274<\276\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\367\354\354>\000\000\000\000\370\354\354\276\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000_bb\277\000\000\000\000\000\000\000\000\371\234\036>\000\000\000\000 \273:?\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\317\320\240\300\000\000\000\000\000\000\000\000y\016n\277\000\000\000\000\236\222\276@\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000jN\256\300\000\000\000\000\000\000\000\000m\267\210?\000\000\000\000\221 \214@\000\000\000\000\000\000\000\000\000\000\000\000A=\037\254C\233\n\300\000\000\000\000\000\000\000\0008EH\276\000\000\000\000\227\037\027@\000\000\000\000\252e\332\264\027{\367\276\331\206*\257\326x\257\300\260\366\344>\000\000\000\000\275P\212@\013P\361\277\303HE@\000\000\000\000k)\240\264h\202\004\276M\3701\266\340$L\300*\t\200?\000\000\000\0005\355\031@\036\210_\276k<\007>\000\000\000\000\262\271T\277]\366M\233\036\033\265\266>D@\300Mq\021?\000\000\000\000\334\3220;N\032\016\277\315pt@\376\374\374\275\333\177-\277\002\251\033\276b\016X\265V\340\027\300\233<\007\276\233]\310\274\203W.A~\314\347\276\233\276\336\300\007\007\007\277[l\213\251\266[\312\276\037\230\343\242\\\006\"\301\006yD\276\330<\202\275+\253SA\201\200\200\276\3158\326\277\325\323S\277:\333\244\211i\016\371\274\221\220\220\274f\007\256\300\260&\203\235\342J\240\273\013\004\023A\306\305\305\276S\360\036\300\240\237\037\277\000\000\000\000\365\025T\242\020\020\020\277(\271z\300$\031\213\257\000\000\000\000\313\026\233@\306\330\330\276SJ.?7\016i\235\342\221d\277\272\213>\242\325\324\324\276\246\343R\300\375\277}\257\000\000\000\000A\353\002A\346\322\311\276\363\317K\300\354\352j\276\202\3649\300\304M?\221\010\004\242\232\364\3069\300\3508j\263\000\000\000\000\330i3Ax\214\311\2739l\245\300\377\375\375\276\257\360\367\274\007\222_\274ks!\252w\325\223\277\013v\267\276\023\357\204\206=\351#A\277\333\353\270\226\016\003\301\203\202\202\276\000\000\000\000\305(\t\277ks!\252\274\216\343\245;\014\341\277\023\357\204\206\326\030\003@ h\315\247\'\037\000?\356\253t\277\000\000\000\000\255\242g\276\331\245\333\211\035LF\277\355\346D\277\000\000\000\000\361\312\337?\262\177\351\234\3001z?\232\252D\300\000\000\000\000\356\343g\277\000\000\000\000\021d\037\300~Z\231\263\000\000\000\000$\375\236@\244x\321\224\276\032\300?\336\2413\277\000\000\000\000Y3\275\222\000\000\000\000\333\225\021\277\'\022\214\262\000\000\000\000XA\n@\375S\277\223\240\315c\277\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000" + } + } + } +} +node { + name: "Variable_1" + op: "Const" + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + dim { + size: 10 + } + } + tensor_content: "\372\214\372\301\212\364\261A\274V\372A\2253S\301x\360!\276\313\275\256B3G \301\017k\034B\363:\320\302\277\235\251\301" + } + } + } +} +node { + name: "MatMul" + op: "MatMul" + input: "Placeholder" + input: "Variable" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } +} +node { + name: "add" + op: "Add" + input: "MatMul" + input: "Variable_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +node { + name: "Softmax" + op: "Softmax" + input: "add" + attr { + key: "T" + value { + type: DT_FLOAT + } + } +} +library { +} diff --git a/tests/YoloDatabase.cpp b/tests/YoloDatabase.cpp new file mode 100644 index 0000000000..4c91384073 --- /dev/null +++ b/tests/YoloDatabase.cpp @@ -0,0 +1,101 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#include "YoloDatabase.hpp" + +#include <armnn/Exceptions.hpp> + +#include <array> +#include <cstdint> +#include <tuple> +#include <utility> + +#include <boost/assert.hpp> +#include <boost/format.hpp> +#include <boost/log/trivial.hpp> +#include <boost/numeric/conversion/cast.hpp> + +#include "InferenceTestImage.hpp" + +namespace +{ +enum class YoloVocClass : unsigned int +{ + Aeroplane, + Bicycle, + Bird, + Boat, + Bottle, + Bus, + Car, + Cat, + Chair, + Cow, + DiningTable, + Dog, + Horse, + Motorbike, + Person, + PottedPlant, + Sheep, + Sofa, + Train, + TvMonitor +}; + +template <typename E> +constexpr auto to_underlying(E e) noexcept +{ + return static_cast<std::underlying_type_t<E>>(e); +} + +class ImageNotFoundException : public armnn::Exception +{ + using Exception::Exception; +}; + +using YoloInputOutput = std::pair<const char* const, YoloDetectedObject>; + +const std::array<YoloInputOutput,1> g_PerTestCaseInputOutput = +{ + YoloInputOutput{ + "yolo_dog_448x448.png", + { to_underlying(YoloVocClass::Dog), YoloBoundingBox{ 233.0f, 256.0f, 299.0f, 462.0f }, 0.5088733434677124f } + }, +}; + +} // namespace + +YoloDatabase::YoloDatabase(const std::string& imageDir) + : m_ImageDir(imageDir) +{ +} + +std::unique_ptr<YoloDatabase::TTestCaseData> YoloDatabase::GetTestCaseData(unsigned int testCaseId) +{ + testCaseId = testCaseId % boost::numeric_cast<unsigned int>(g_PerTestCaseInputOutput.size()); + const auto& testCaseInputOutput = g_PerTestCaseInputOutput[testCaseId]; + const std::string imagePath = m_ImageDir + testCaseInputOutput.first; + + // Load test case input image + std::vector<float> imageData; + try + { + InferenceTestImage image(imagePath.c_str()); + image.Resize(YoloImageWidth, YoloImageHeight); + imageData = GetImageDataInArmNnLayoutAsNormalizedFloats(ImageChannelLayout::Rgb, image); + } + catch (const InferenceTestImageException& e) + { + BOOST_LOG_TRIVIAL(fatal) << "Failed to load test case " << testCaseId << " with error: " << e.what(); + return nullptr; + } + + // Prepare test case output + std::vector<YoloDetectedObject> topObjectDetections; + topObjectDetections.reserve(1); + topObjectDetections.push_back(testCaseInputOutput.second); + + return std::make_unique<YoloTestCaseData>(std::move(imageData), std::move(topObjectDetections)); +}
\ No newline at end of file diff --git a/tests/YoloDatabase.hpp b/tests/YoloDatabase.hpp new file mode 100644 index 0000000000..3656e26a94 --- /dev/null +++ b/tests/YoloDatabase.hpp @@ -0,0 +1,63 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "ClassifierTestCaseData.hpp" + +#include <array> +#include <string> +#include <memory> + +struct YoloBoundingBox +{ + float m_X; + float m_Y; + float m_W; + float m_H; +}; + +struct YoloDetectedObject +{ + YoloDetectedObject(unsigned int yoloClass, + const YoloBoundingBox& box, + float confidence) + : m_Class(yoloClass) + , m_Box(box) + , m_Confidence(confidence) + {} + + unsigned int m_Class; + YoloBoundingBox m_Box; + float m_Confidence; +}; + +class YoloTestCaseData +{ +public: + YoloTestCaseData(std::vector<float> inputImage, + std::vector<YoloDetectedObject> topObjectDetections) + : m_InputImage(std::move(inputImage)) + , m_TopObjectDetections(std::move(topObjectDetections)) + { + } + + std::vector<float> m_InputImage; + std::vector<YoloDetectedObject> m_TopObjectDetections; +}; + +constexpr unsigned int YoloImageWidth = 448; +constexpr unsigned int YoloImageHeight = 448; + +class YoloDatabase +{ +public: + using TTestCaseData = YoloTestCaseData; + + explicit YoloDatabase(const std::string& imageDir); + std::unique_ptr<TTestCaseData> GetTestCaseData(unsigned int testCaseId); + +private: + std::string m_ImageDir; +}; diff --git a/tests/YoloInferenceTest.hpp b/tests/YoloInferenceTest.hpp new file mode 100644 index 0000000000..edc4808939 --- /dev/null +++ b/tests/YoloInferenceTest.hpp @@ -0,0 +1,237 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// +#pragma once + +#include "InferenceTest.hpp" +#include "YoloDatabase.hpp" + +#include <algorithm> +#include <array> +#include <utility> + +#include <boost/assert.hpp> +#include <boost/multi_array.hpp> +#include <boost/test/tools/floating_point_comparison.hpp> + +constexpr size_t YoloOutputSize = 1470; + +template <typename Model> +class YoloTestCase : public InferenceModelTestCase<Model> +{ +public: + YoloTestCase(Model& model, + unsigned int testCaseId, + YoloTestCaseData& testCaseData) + : InferenceModelTestCase<Model>(model, testCaseId, std::move(testCaseData.m_InputImage), YoloOutputSize) + , m_FloatComparer(boost::math::fpc::percent_tolerance(1.0f)) + , m_TopObjectDetections(std::move(testCaseData.m_TopObjectDetections)) + { + } + + virtual TestCaseResult ProcessResult(const InferenceTestOptions& options) override + { + using Boost3dArray = boost::multi_array<float, 3>; + + const std::vector<float>& output = this->GetOutput(); + BOOST_ASSERT(output.size() == YoloOutputSize); + + constexpr Boost3dArray::index gridSize = 7; + constexpr Boost3dArray::index numClasses = 20; + constexpr Boost3dArray::index numScales = 2; + + const float* outputPtr = output.data(); + + // Range 0-980. Class probabilities. 7x7x20 + Boost3dArray classProbabilities(boost::extents[gridSize][gridSize][numClasses]); + for (Boost3dArray::index y = 0; y < gridSize; ++y) + { + for (Boost3dArray::index x = 0; x < gridSize; ++x) + { + for (Boost3dArray::index c = 0; c < numClasses; ++c) + { + classProbabilities[y][x][c] = *outputPtr++; + } + } + } + + // Range 980-1078. Scales. 7x7x2 + Boost3dArray scales(boost::extents[gridSize][gridSize][numScales]); + for (Boost3dArray::index y = 0; y < gridSize; ++y) + { + for (Boost3dArray::index x = 0; x < gridSize; ++x) + { + for (Boost3dArray::index s = 0; s < numScales; ++s) + { + scales[y][x][s] = *outputPtr++; + } + } + } + + // Range 1078-1469. Bounding boxes. 7x7x2x4 + constexpr float imageWidthAsFloat = static_cast<float>(YoloImageWidth); + constexpr float imageHeightAsFloat = static_cast<float>(YoloImageHeight); + + boost::multi_array<float, 4> boxes(boost::extents[gridSize][gridSize][numScales][4]); + for (Boost3dArray::index y = 0; y < gridSize; ++y) + { + for (Boost3dArray::index x = 0; x < gridSize; ++x) + { + for (Boost3dArray::index s = 0; s < numScales; ++s) + { + float bx = *outputPtr++; + float by = *outputPtr++; + float bw = *outputPtr++; + float bh = *outputPtr++; + + boxes[y][x][s][0] = ((bx + static_cast<float>(x)) / 7.0f) * imageWidthAsFloat; + boxes[y][x][s][1] = ((by + static_cast<float>(y)) / 7.0f) * imageHeightAsFloat; + boxes[y][x][s][2] = bw * bw * static_cast<float>(imageWidthAsFloat); + boxes[y][x][s][3] = bh * bh * static_cast<float>(imageHeightAsFloat); + } + } + } + BOOST_ASSERT(output.data() + YoloOutputSize == outputPtr); + + std::vector<YoloDetectedObject> detectedObjects; + detectedObjects.reserve(gridSize * gridSize * numScales * numClasses); + + for (Boost3dArray::index y = 0; y < gridSize; ++y) + { + for (Boost3dArray::index x = 0; x < gridSize; ++x) + { + for (Boost3dArray::index s = 0; s < numScales; ++s) + { + for (Boost3dArray::index c = 0; c < numClasses; ++c) + { + // Resolved confidence: Class probabilities * scales + const float confidence = classProbabilities[y][x][c] * scales[y][x][s]; + + // Resolve bounding box and store + YoloBoundingBox box; + box.m_X = boxes[y][x][s][0]; + box.m_Y = boxes[y][x][s][1]; + box.m_W = boxes[y][x][s][2]; + box.m_H = boxes[y][x][s][3]; + + detectedObjects.emplace_back(c, box, confidence); + } + } + } + } + + // Sort detected objects by confidence + std::sort(detectedObjects.begin(), detectedObjects.end(), + [](const YoloDetectedObject& a, const YoloDetectedObject& b) + { + // Sort by largest confidence first, then by class + return a.m_Confidence > b.m_Confidence + || (a.m_Confidence == b.m_Confidence && a.m_Class > b.m_Class); + }); + + // Check the top N detections + auto outputIt = detectedObjects.begin(); + auto outputEnd = detectedObjects.end(); + + for (const YoloDetectedObject& expectedDetection : m_TopObjectDetections) + { + if (outputIt == outputEnd) + { + // Somehow expected more things to check than detections found by the model + return TestCaseResult::Abort; + } + + const YoloDetectedObject& detectedObject = *outputIt; + if (detectedObject.m_Class != expectedDetection.m_Class) + { + BOOST_LOG_TRIVIAL(error) << "Prediction for test case " << this->GetTestCaseId() << + " (" << detectedObject.m_Class << ")" << + " is incorrect (should be " << expectedDetection.m_Class << ")"; + return TestCaseResult::Failed; + } + + if (!m_FloatComparer(detectedObject.m_Box.m_X, expectedDetection.m_Box.m_X) || + !m_FloatComparer(detectedObject.m_Box.m_Y, expectedDetection.m_Box.m_Y) || + !m_FloatComparer(detectedObject.m_Box.m_W, expectedDetection.m_Box.m_W) || + !m_FloatComparer(detectedObject.m_Box.m_H, expectedDetection.m_Box.m_H) || + !m_FloatComparer(detectedObject.m_Confidence, expectedDetection.m_Confidence)) + { + BOOST_LOG_TRIVIAL(error) << "Detected bounding box for test case " << this->GetTestCaseId() << + " is incorrect"; + return TestCaseResult::Failed; + } + + ++outputIt; + } + + return TestCaseResult::Ok; + } + +private: + boost::math::fpc::close_at_tolerance<float> m_FloatComparer; + std::vector<YoloDetectedObject> m_TopObjectDetections; +}; + +template <typename Model> +class YoloTestCaseProvider : public IInferenceTestCaseProvider +{ +public: + template <typename TConstructModelCallable> + YoloTestCaseProvider(TConstructModelCallable constructModel) + : m_ConstructModel(constructModel) + { + } + + virtual void AddCommandLineOptions(boost::program_options::options_description& options) override + { + namespace po = boost::program_options; + + options.add_options() + ("data-dir,d", po::value<std::string>(&m_DataDir)->required(), + "Path to directory containing test data"); + + Model::AddCommandLineOptions(options, m_ModelCommandLineOptions); + } + + virtual bool ProcessCommandLineOptions() override + { + if (!ValidateDirectory(m_DataDir)) + { + return false; + } + + m_Model = m_ConstructModel(m_ModelCommandLineOptions); + if (!m_Model) + { + return false; + } + + m_Database = std::make_unique<YoloDatabase>(m_DataDir.c_str()); + if (!m_Database) + { + return false; + } + + return true; + } + + virtual std::unique_ptr<IInferenceTestCase> GetTestCase(unsigned int testCaseId) override + { + std::unique_ptr<YoloTestCaseData> testCaseData = m_Database->GetTestCaseData(testCaseId); + if (!testCaseData) + { + return nullptr; + } + + return std::make_unique<YoloTestCase<Model>>(*m_Model, testCaseId, *testCaseData); + } + +private: + typename Model::CommandLineOptions m_ModelCommandLineOptions; + std::function<std::unique_ptr<Model>(typename Model::CommandLineOptions)> m_ConstructModel; + std::unique_ptr<Model> m_Model; + + std::string m_DataDir; + std::unique_ptr<YoloDatabase> m_Database; +}; |