@@ -1318,8 +1318,6 @@ def results_collect_parallel(self, dck_list, read_file_function,
13181318
13191319 return result_data
13201320
1321-
1322-
13231321 def results_collect (self , dck_list , read_file_function , create_index = True ,
13241322 origin = None , store_success = True , store_hours = True ,
13251323 remove_leap_year = True , time_label_left = False ,
@@ -1413,26 +1411,33 @@ def read_file_function(result_file_path):
14131411 # different parameters, then the .hash value has a
14141412 # tuple of deck name and hash number, which need to
14151413 # be assigned to columns
1416- df_new [[ 'deck' , 'hash' ]] = dck . hash
1414+ hash_names = [ 'deck' , 'hash' ]
14171415 else :
14181416 # Usually, the hash is a single value
1419- df_new ['hash' ] = dck .hash
1417+ hash_names = ['hash' ]
1418+
1419+ df_hashes = pd .DataFrame ([dck .hash ]* len (df_new ),
1420+ columns = hash_names )
1421+
1422+ df_rplc = (pd .DataFrame
1423+ .from_dict (dck .replace_dict , orient = 'index' )
1424+ .T .reindex (df_new .index )
1425+ .infer_objects (copy = False )
1426+ .ffill (axis = 'index' )
1427+ )
14201428
14211429 if store_success :
14221430 # Store simulation success
14231431 df_new ['success' ] = dck .success
1424- for key , value in dck .replace_dict .items ():
1425- df_new [key ] = value
1432+
1433+ df_new = pd .concat ([df_new , df_hashes , df_rplc ],
1434+ axis = 'columns' )
14261435
14271436 # Add the DataFrame to the dict of result files
14281437 if result_file in result_data .keys ():
1429- df_old = result_data [result_file ]
1438+ result_data [result_file ]. append ( df_new )
14301439 else :
1431- df_old = pd .DataFrame ()
1432- # Append the old and new df, with a new index.
1433- df = pd .concat ([df_old , df_new ], ignore_index = True )
1434- # Add it to the dict
1435- result_data [result_file ] = df
1440+ result_data [result_file ] = [df_new ]
14361441
14371442 except Exception as ex :
14381443 logger .error ('Error when trying to read result file "%s"'
@@ -1442,6 +1447,10 @@ def read_file_function(result_file_path):
14421447 frac = i / len (dck_list )* 100
14431448 print ('\r Collecting results: {:5.1f}%' .format (frac ), end = '\r ' )
14441449
1450+ # For each file, turn the list of DataFrames into one DataFrame
1451+ for result_file , df_list in result_data .items ():
1452+ result_data [result_file ] = pd .concat (df_list , ignore_index = True )
1453+
14451454 logger .debug ('Collected result files:' )
14461455 if logger .isEnabledFor (logging .DEBUG ):
14471456 for file in result_data .keys ():
@@ -1563,7 +1572,7 @@ def results_create_index(self, result_data, replace_dict={}, origin=None,
15631572 df_list = []
15641573 for hash_ in df .index .get_level_values ('hash' ).unique ():
15651574 df_hash = df .xs (hash_ , drop_level = False )
1566- df_hash = df_hash [- keep_steps :]
1575+ df_hash = df_hash [- keep_steps :]. copy ()
15671576 df_hash .reset_index (inplace = True )
15681577 df_hash [t_col ] = df_hash [t_col ] - df_hash [t_col ][0 ]
15691578 df_list .append (df_hash )
@@ -1657,7 +1666,7 @@ def results_create_index(self, result_data, replace_dict={}, origin=None,
16571666 logger .warning (
16581667 key + ': Data has leap years ' + ', ' .join (years ))
16591668
1660- result_data [key ] = df # df is not modified in place
1669+ result_data [key ] = df . copy () # df is not modified in place
16611670
16621671 return result_data
16631672
0 commit comments