From 625c280433fef3c9d1b64f58eab930ba0f89cd82 Mon Sep 17 00:00:00 2001 From: Raul Farkas Date: Mon, 11 Jul 2022 20:09:48 +0100 Subject: MLIA-507 Upgrade Vela version Upgrade Vela version from 3.3.0 to 3.4.0. - Adapt code to use new typing notation by replacing `numpy.array` with `numpy.ndarray` where necessary. Change-Id: I035e9564d448652aa09a52d79c71ef09663ea776 --- src/mlia/nn/tensorflow/optimizations/pruning.py | 6 +++--- src/mlia/nn/tensorflow/tflite_metrics.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'src/mlia/nn/tensorflow') diff --git a/src/mlia/nn/tensorflow/optimizations/pruning.py b/src/mlia/nn/tensorflow/optimizations/pruning.py index f629ba1..f1e2976 100644 --- a/src/mlia/nn/tensorflow/optimizations/pruning.py +++ b/src/mlia/nn/tensorflow/optimizations/pruning.py @@ -29,8 +29,8 @@ class PruningConfiguration(OptimizerConfiguration): optimization_target: float layers_to_optimize: Optional[List[str]] = None - x_train: Optional[np.array] = None - y_train: Optional[np.array] = None + x_train: Optional[np.ndarray] = None + y_train: Optional[np.ndarray] = None batch_size: int = 1 num_epochs: int = 1 @@ -73,7 +73,7 @@ class Pruner(Optimizer): """Return string representation of the optimization config.""" return str(self.optimizer_configuration) - def _mock_train_data(self) -> Tuple[np.array, np.array]: + def _mock_train_data(self) -> Tuple[np.ndarray, np.ndarray]: # get rid of the batch_size dimension in input and output shape input_shape = tuple(x for x in self.model.input_shape if x is not None) output_shape = tuple(x for x in self.model.output_shape if x is not None) diff --git a/src/mlia/nn/tensorflow/tflite_metrics.py b/src/mlia/nn/tensorflow/tflite_metrics.py index b29fab3..9befb2f 100644 --- a/src/mlia/nn/tensorflow/tflite_metrics.py +++ b/src/mlia/nn/tensorflow/tflite_metrics.py @@ -31,13 +31,13 @@ DEFAULT_IGNORE_LIST = [ ] -def calculate_num_unique_weights(weights: np.array) -> int: +def calculate_num_unique_weights(weights: np.ndarray) -> int: """Calculate the number of unique weights in the given weights.""" num_unique_weights = len(np.unique(weights)) return num_unique_weights -def calculate_num_unique_weights_per_axis(weights: np.array, axis: int) -> List[int]: +def calculate_num_unique_weights_per_axis(weights: np.ndarray, axis: int) -> List[int]: """Calculate unique weights per quantization axis.""" # Make quantized dimension the first dimension weights_trans = np.swapaxes(weights, 0, axis) @@ -57,7 +57,7 @@ class SparsityAccumulator: self.total_non_zero_weights: int = 0 self.total_weights: int = 0 - def __call__(self, weights: np.array) -> None: + def __call__(self, weights: np.ndarray) -> None: """Update the accumulator with the given weights.""" non_zero_weights = np.count_nonzero(weights) self.total_non_zero_weights += non_zero_weights @@ -69,7 +69,7 @@ class SparsityAccumulator: def calculate_sparsity( - weights: np.array, accumulator: Optional[SparsityAccumulator] = None + weights: np.ndarray, accumulator: Optional[SparsityAccumulator] = None ) -> float: """ Calculate the sparsity for the given weights. -- cgit v1.2.1