@@ -24,43 +24,43 @@ from training.model_builder import (
2424from warnings import filterwarnings
2525filterwarnings(' ignore' )
2626
27- async def main(algorithm: str , sequence_length: int , epochs: int , batch_size: int ):
27+ def main (algorithm: str , sequence_length: int , epochs: int , batch_size: int ):
2828 datasets_path = ' ./datasets'
2929 models_path = ' ./models'
3030 posttrained = ' ./posttrained'
3131 pickle_file = ' ./pickles'
3232
3333
34- for dataset in await get_datasets(datasets_path):
34+ for dataset in get_datasets(datasets_path):
3535 print (f" [TRAINING] {dataset.replace('.csv', '')} " )
3636
3737 dataframe = pd.read_csv(os.path.join(datasets_path, dataset), index_col = ' Date' )[[' Close' ]]
3838 model_file = os.path.join(models_path, f" {dataset.replace('.csv', '')}.keras" )
3939
4040 # dataframe = preprocess_data(dataframe)
4141 dataframe.dropna(inplace = True )
42- standard_scaler, dataframe = await scale_data(dataframe, StandardScaler)
43- minmax_scaler, dataframe = await scale_data(dataframe, MinMaxScaler)
42+ standard_scaler, dataframe = scale_data(dataframe, StandardScaler)
43+ minmax_scaler, dataframe = scale_data(dataframe, MinMaxScaler)
4444
45- sequences, labels = await create_sequences(dataframe, sequence_length)
45+ sequences, labels = create_sequences(dataframe, sequence_length)
4646 input_shape = (sequences.shape[1 ], sequences.shape[2 ])
4747
4848 if algorithm == " GRU" :
49- model = await gru_model(input_shape)
49+ model = gru_model(input_shape)
5050
5151 elif algorithm == " LSTM" :
52- model = await lstm_model(input_shape)
52+ model = lstm_model(input_shape)
5353
5454 elif algorithm == " LSTM_GRU" :
55- model = await lstm_gru_model(input_shape)
55+ model = lstm_gru_model(input_shape)
5656
57- else : model = await lstm_model(input_shape)
57+ else : model = lstm_model(input_shape)
5858
5959 train_size = int (len (sequences) * 0.8 )
6060 X_train, X_test = sequences[:train_size], sequences[train_size:]
6161 y_train, y_test = labels[:train_size], labels[train_size:]
6262
63- await train({
63+ train({
6464 ' model' : model,
6565 ' model_file' : model_file,
6666 ' sequence_length' : sequence_length,
@@ -70,7 +70,7 @@ async def main(algorithm: str, sequence_length: int, epochs: int, batch_size: in
7070
7171 dataframe_json = {' Date' : dataframe.index.tolist(), ' Close' : dataframe[' Close' ].tolist()}
7272
73- await save_json(
73+ save_json(
7474 os.path.join(posttrained, f' {dataset.replace(".csv", "")}-posttrained.json' ),
7575 dataframe_json
7676 )
0 commit comments