aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRaul Farkas <raul.farkas@arm.com>2022-07-11 20:09:48 +0100
committerDiego Russo <diego.russo@arm.com>2022-07-22 10:59:29 +0000
commit625c280433fef3c9d1b64f58eab930ba0f89cd82 (patch)
treed7c1c9900abd76aa85515ad930572f9739a24dbe
parent7a09acbd1bccc9a7f81d79ed57259a0d32aa6873 (diff)
downloadmlia-625c280433fef3c9d1b64f58eab930ba0f89cd82.tar.gz
MLIA-507 Upgrade Vela version
Upgrade Vela version from 3.3.0 to 3.4.0. - Adapt code to use new typing notation by replacing `numpy.array` with `numpy.ndarray` where necessary. Change-Id: I035e9564d448652aa09a52d79c71ef09663ea776
-rw-r--r--setup.cfg2
-rw-r--r--src/mlia/nn/tensorflow/optimizations/pruning.py6
-rw-r--r--src/mlia/nn/tensorflow/tflite_metrics.py8
-rw-r--r--tests/mlia/test_nn_tensorflow_tflite_metrics.py2
-rw-r--r--tests/mlia/utils/common.py2
5 files changed, 10 insertions, 10 deletions
diff --git a/setup.cfg b/setup.cfg
index b42ce9a..dbed6f7 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -31,7 +31,7 @@ packages = find:
install_requires =
tensorflow~=2.7.1
tensorflow-model-optimization~=0.7.2
- ethos-u-vela~=3.3.0
+ ethos-u-vela~=3.4.0
requests
rich
sh
diff --git a/src/mlia/nn/tensorflow/optimizations/pruning.py b/src/mlia/nn/tensorflow/optimizations/pruning.py
index f629ba1..f1e2976 100644
--- a/src/mlia/nn/tensorflow/optimizations/pruning.py
+++ b/src/mlia/nn/tensorflow/optimizations/pruning.py
@@ -29,8 +29,8 @@ class PruningConfiguration(OptimizerConfiguration):
optimization_target: float
layers_to_optimize: Optional[List[str]] = None
- x_train: Optional[np.array] = None
- y_train: Optional[np.array] = None
+ x_train: Optional[np.ndarray] = None
+ y_train: Optional[np.ndarray] = None
batch_size: int = 1
num_epochs: int = 1
@@ -73,7 +73,7 @@ class Pruner(Optimizer):
"""Return string representation of the optimization config."""
return str(self.optimizer_configuration)
- def _mock_train_data(self) -> Tuple[np.array, np.array]:
+ def _mock_train_data(self) -> Tuple[np.ndarray, np.ndarray]:
# get rid of the batch_size dimension in input and output shape
input_shape = tuple(x for x in self.model.input_shape if x is not None)
output_shape = tuple(x for x in self.model.output_shape if x is not None)
diff --git a/src/mlia/nn/tensorflow/tflite_metrics.py b/src/mlia/nn/tensorflow/tflite_metrics.py
index b29fab3..9befb2f 100644
--- a/src/mlia/nn/tensorflow/tflite_metrics.py
+++ b/src/mlia/nn/tensorflow/tflite_metrics.py
@@ -31,13 +31,13 @@ DEFAULT_IGNORE_LIST = [
]
-def calculate_num_unique_weights(weights: np.array) -> int:
+def calculate_num_unique_weights(weights: np.ndarray) -> int:
"""Calculate the number of unique weights in the given weights."""
num_unique_weights = len(np.unique(weights))
return num_unique_weights
-def calculate_num_unique_weights_per_axis(weights: np.array, axis: int) -> List[int]:
+def calculate_num_unique_weights_per_axis(weights: np.ndarray, axis: int) -> List[int]:
"""Calculate unique weights per quantization axis."""
# Make quantized dimension the first dimension
weights_trans = np.swapaxes(weights, 0, axis)
@@ -57,7 +57,7 @@ class SparsityAccumulator:
self.total_non_zero_weights: int = 0
self.total_weights: int = 0
- def __call__(self, weights: np.array) -> None:
+ def __call__(self, weights: np.ndarray) -> None:
"""Update the accumulator with the given weights."""
non_zero_weights = np.count_nonzero(weights)
self.total_non_zero_weights += non_zero_weights
@@ -69,7 +69,7 @@ class SparsityAccumulator:
def calculate_sparsity(
- weights: np.array, accumulator: Optional[SparsityAccumulator] = None
+ weights: np.ndarray, accumulator: Optional[SparsityAccumulator] = None
) -> float:
"""
Calculate the sparsity for the given weights.
diff --git a/tests/mlia/test_nn_tensorflow_tflite_metrics.py b/tests/mlia/test_nn_tensorflow_tflite_metrics.py
index 805f7d1..cf7aaeb 100644
--- a/tests/mlia/test_nn_tensorflow_tflite_metrics.py
+++ b/tests/mlia/test_nn_tensorflow_tflite_metrics.py
@@ -31,7 +31,7 @@ def _dummy_keras_model() -> tf.keras.Model:
def _sparse_binary_keras_model() -> tf.keras.Model:
- def get_sparse_weights(shape: List[int]) -> np.array:
+ def get_sparse_weights(shape: List[int]) -> np.ndarray:
weights = np.zeros(shape)
with np.nditer(weights, op_flags=["writeonly"]) as weight_iterator:
for idx, value in enumerate(weight_iterator):
diff --git a/tests/mlia/utils/common.py b/tests/mlia/utils/common.py
index 4313cde..932343e 100644
--- a/tests/mlia/utils/common.py
+++ b/tests/mlia/utils/common.py
@@ -7,7 +7,7 @@ import numpy as np
import tensorflow as tf
-def get_dataset() -> Tuple[np.array, np.array]:
+def get_dataset() -> Tuple[np.ndarray, np.ndarray]:
"""Return sample dataset."""
mnist = tf.keras.datasets.mnist
(x_train, y_train), _ = mnist.load_data()