diff options
-rw-r--r-- | ethosu/vela/graph_optimiser.py | 6 | ||||
-rw-r--r-- | ethosu/vela/tensor.py | 2 |
2 files changed, 4 insertions, 4 deletions
diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py index 9f92e755..b9aafcac 100644 --- a/ethosu/vela/graph_optimiser.py +++ b/ethosu/vela/graph_optimiser.py @@ -156,8 +156,8 @@ def calc_upscaled_padding_and_skirt(padding_type, kernel_size, stride, input_dim ypad = needed_total_padding(int(input_dims[1]) * upscaling_factor, int(stride[1]), int(kernel_height)) xpad = needed_total_padding(int(input_dims[2]) * upscaling_factor, int(stride[2]), int(kernel_width)) - right_pad = ((xpad + 1) // upscaling_factor) - 1 - bottom_pad = ((ypad + 1) // upscaling_factor) - 1 + right_pad = max(((xpad + 1) // upscaling_factor) - 1, 0) + bottom_pad = max(((ypad + 1) // upscaling_factor) - 1, 0) left_pad = max(kernel_width - 1 - right_pad, 0) top_pad = max(kernel_height - 1 - bottom_pad, 0) @@ -845,7 +845,7 @@ def add_attrs_to_resizebilinear(op, arch): def add_bias_tensor(op, arch): - if ("Conv2d" in op.type or op.type.startswith("FullyConnected")) and not op.inputs[-1]: + if ("conv2d" in op.type.lower() or op.type.startswith("FullyConnected")) and not op.inputs[-1]: # Add bias/scale tensor filled with zeros weight_shape = op.inputs[1].shape weight_sets = weight_shape[-1] diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py index f0e7ea44..d4f6a409 100644 --- a/ethosu/vela/tensor.py +++ b/ethosu/vela/tensor.py @@ -232,7 +232,7 @@ def create_const_tensor(name, shape, dtype, values, value_dtype=None, purpose=Te const_tensor.purpose = purpose const_tensor.quantization = quantization const_tensor.values = np.array(values, dtype=value_dtype) - const_tensor.quant_values = np.frombuffer(const_tensor.values.tobytes(), dtype=np.uint8) + const_tensor.quant_values = const_tensor.values # Operator const_op = Operation("Const", name) const_op.set_output_tensor(const_tensor) |