@@ -285,7 +285,7 @@ def predict(
285285 """
286286 import torch # lgtm [py/repeated-import]
287287
288- x_ = x . copy ()
288+ x_ = np . array ([ x_i for x_i in x ] + [ np . array ([ 0.1 ]), np . array ([ 0.1 , 0.2 ])])[: - 2 ]
289289
290290 # Put the model in the eval mode
291291 self ._model .eval ()
@@ -370,7 +370,7 @@ def loss_gradient(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
370370 """
371371 from warpctc_pytorch import CTCLoss
372372
373- x_ = x . copy ()
373+ x_ = np . array ([ x_i for x_i in x ] + [ np . array ([ 0.1 ]), np . array ([ 0.1 , 0.2 ])])[: - 2 ]
374374
375375 # Put the model in the training mode
376376 self ._model .train ()
@@ -432,8 +432,6 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
432432 """
433433 import random
434434
435- import torch # lgtm [py/repeated-import]
436-
437435 from warpctc_pytorch import CTCLoss
438436
439437 # Put the model in the training mode
@@ -466,8 +464,10 @@ def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: in
466464 )
467465
468466 # Extract random batch
469- i_batch = x_preprocessed [ind [begin :end ]].copy ()
470- o_batch = y_preprocessed [ind [begin :end ]].copy ()
467+ i_batch = np .array (
468+ [x_i for x_i in x_preprocessed [ind [begin : end ]]] + [np .array ([0.1 ]), np .array ([0.1 , 0.2 ])]
469+ )[:- 2 ]
470+ o_batch = y_preprocessed [ind [begin : end ]]
471471
472472 # Transform data into the model input space
473473 inputs , targets , input_rates , target_sizes , batch_idx = self .transform_model_input (
0 commit comments