@@ -355,23 +355,23 @@ def create_lookup(emdat_data, start, end, disaster_subtype='Tropical cyclone'):
355355 'Date_start_EM_ordinal' , 'Disaster_name' ,
356356 'EM_ID' , 'ibtracsID' , 'allocation_level' ,
357357 'possible_track' , 'possible_track_all' ])
358- lookup . hit_country = data . ISO
359- lookup . Date_start_EM = data . Date_start_clean
360- lookup . Disaster_name = data . Disaster_name
361- lookup . EM_ID = data . Disaster_No
358+ lookup [ ' hit_country' ] = data [ ' ISO' ]
359+ lookup [ ' Date_start_EM' ] = data [ ' Date_start_clean' ]
360+ lookup [ ' Disaster_name' ] = data [ ' Disaster_name' ]
361+ lookup [ ' EM_ID' ] = data [ ' Disaster_No' ]
362362 lookup = lookup .reset_index (drop = True )
363363 # create ordinals
364- for i in range (0 , len (data . Date_start_clean .values )):
365- lookup . Date_start_EM_ordinal [i ] = datetime .toordinal (
366- datetime .strptime (lookup . Date_start_EM .values [i ], '%Y-%m-%d' ))
364+ for i in range (0 , len (data [ ' Date_start_clean' ] .values )):
365+ lookup [ ' Date_start_EM_ordinal' ] [i ] = datetime .toordinal (
366+ datetime .strptime (lookup [ ' Date_start_EM' ] .values [i ], '%Y-%m-%d' ))
367367 # ordinals to numeric
368- lookup . Date_start_EM_ordinal = pd .to_numeric (lookup . Date_start_EM_ordinal )
368+ lookup [ ' Date_start_EM_ordinal' ] = pd .to_numeric (lookup [ ' Date_start_EM_ordinal' ] )
369369 # select time
370370 emdat_start = datetime .toordinal (datetime .strptime (start , '%Y-%m-%d' ))
371371 emdat_end = datetime .toordinal (datetime .strptime (end , '%Y-%m-%d' ))
372372
373- lookup = lookup [lookup . Date_start_EM_ordinal .values > emdat_start ]
374- lookup = lookup [lookup . Date_start_EM_ordinal .values < emdat_end ]
373+ lookup = lookup [lookup [ ' Date_start_EM_ordinal' ] .values > emdat_start ]
374+ lookup = lookup [lookup [ ' Date_start_EM_ordinal' ] .values < emdat_end ]
375375
376376 return lookup
377377
@@ -397,15 +397,16 @@ def emdat_possible_hit(lookup, hit_countries, delta_t):
397397 # tracks: processed IBtracks with info which track hit which country
398398 # delta_t: time difference of start of EMdat and IBrtacks
399399 possible_hit_all = []
400- for i in range (0 , len (lookup . EM_ID .values )):
400+ for i in range (0 , len (lookup [ ' EM_ID' ] .values )):
401401 possible_hit = []
402402 country_tracks = hit_countries [
403- hit_countries ['hit_country' ] == lookup .hit_country .values [i ]]
404- for j in range (0 , len (country_tracks .Date_start .values )):
405- if (lookup .Date_start_EM_ordinal .values [i ] - country_tracks .Date_start .values [j ]) < \
406- delta_t and (lookup .Date_start_EM_ordinal .values [i ] -
407- country_tracks .Date_start .values [j ]) >= 0 :
408- possible_hit .append (country_tracks .ibtracsID .values [j ])
403+ hit_countries ['hit_country' ] == lookup ['hit_country' ].values [i ]]
404+ for j in range (0 , len (country_tracks ['Date_start' ].values )):
405+ if (lookup ['Date_start_EM_ordinal' ].values [i ] -
406+ country_tracks ['Date_start' ].values [j ]) < \
407+ delta_t and (lookup ['Date_start_EM_ordinal' ].values [i ] -
408+ country_tracks ['Date_start' ].values [j ]) >= 0 :
409+ possible_hit .append (country_tracks ['ibtracsID' ].values [j ])
409410 possible_hit_all .append (possible_hit )
410411
411412 return possible_hit_all
@@ -428,14 +429,14 @@ def match_em_id(lookup, poss_hit):
428429 with all possible hits per EMdat ID
429430 """
430431 possible_hit_all = []
431- for i in range (0 , len (lookup . EM_ID .values )):
432+ for i in range (0 , len (lookup [ ' EM_ID' ] .values )):
432433 possible_hit = []
433434 # lookup without line i
434435 #lookup_match = lookup.drop(i)
435436 lookup_match = lookup
436437 # Loop over check if EM dat ID is the same
437- for i_match in range (0 , len (lookup_match . EM_ID .values )):
438- if lookup . EM_ID .values [i ] == lookup_match . EM_ID .values [i_match ]:
438+ for i_match in range (0 , len (lookup_match [ ' EM_ID' ] .values )):
439+ if lookup [ ' EM_ID' ] .values [i ] == lookup_match [ ' EM_ID' ] .values [i_match ]:
439440 possible_hit .append (poss_hit [i ])
440441 possible_hit_all .append (possible_hit )
441442 return possible_hit_all
@@ -467,7 +468,7 @@ def assign_track_to_em(lookup, possible_tracks_1, possible_tracks_2, level):
467468 """
468469
469470 for i , _ in enumerate (possible_tracks_1 ):
470- if np .isnan (lookup . allocation_level .values [i ]):
471+ if np .isnan (lookup [ ' allocation_level' ] .values [i ]):
471472 number_emdat_id = len (possible_tracks_1 [i ])
472473 # print(number_emdat_id)
473474 for j in range (0 , number_emdat_id ):
@@ -479,14 +480,15 @@ def assign_track_to_em(lookup, possible_tracks_1, possible_tracks_2, level):
479480 if all (possible_tracks_1 [i ][0 ] == possible_tracks_1 [i ][k ]
480481 for k in range (0 , len (possible_tracks_1 [i ]))):
481482 # check that track ID has not been assigned to that country already
482- ctry_lookup = lookup [lookup ['hit_country' ] == lookup .hit_country .values [i ]]
483- if possible_tracks_1 [i ][0 ][0 ] not in ctry_lookup .ibtracsID .values :
484- lookup .ibtracsID .values [i ] = possible_tracks_1 [i ][0 ][0 ]
485- lookup .allocation_level .values [i ] = level
483+ ctry_lookup = lookup [lookup ['hit_country' ]
484+ == lookup ['hit_country' ].values [i ]]
485+ if possible_tracks_1 [i ][0 ][0 ] not in ctry_lookup ['ibtracsID' ].values :
486+ lookup ['ibtracsID' ].values [i ] = possible_tracks_1 [i ][0 ][0 ]
487+ lookup ['allocation_level' ].values [i ] = level
486488 elif possible_tracks_1 [i ][j ] != []:
487- lookup . possible_track .values [i ] = possible_tracks_1 [i ]
489+ lookup [ ' possible_track' ] .values [i ] = possible_tracks_1 [i ]
488490 else :
489- lookup . possible_track_all .values [i ] = possible_tracks_1 [i ]
491+ lookup [ ' possible_track_all' ] .values [i ] = possible_tracks_1 [i ]
490492 return lookup
491493
492494
@@ -507,13 +509,13 @@ def check_assigned_track(lookup, checkset):
507509 # merge checkset and lookup
508510 check = pd .merge (checkset , lookup [['hit_country' , 'EM_ID' , 'ibtracsID' ]],
509511 on = ['hit_country' , 'EM_ID' ])
510- check_size = len (check . ibtracsID .values )
511- # not assigned values
512- not_assigned = check . ibtracsID .isnull ().sum (axis = 0 )
512+ check_size = len (check [ ' ibtracsID' ] .values )
513+ # not assigned values]
514+ not_assigned = check [ ' ibtracsID' ] .isnull ().sum (axis = 0 )
513515 # correct assigned values
514- correct = sum (check . ibtracsID .values == check . IBtracsID_checked .values )
516+ correct = sum (check [ ' ibtracsID' ] .values == check [ ' IBtracsID_checked' ] .values )
515517 # wrongly assigned values
516- wrong = len (check . ibtracsID .values ) - not_assigned - correct
518+ wrong = len (check [ ' ibtracsID' ] .values ) - not_assigned - correct
517519 print ('%.1f%% tracks assigned correctly, %.1f%% wrongly, %.1f%% not assigned'
518520 % (correct / check_size * 100 ,
519521 wrong / check_size * 100 ,
@@ -707,7 +709,7 @@ def emdat_countries_by_hazard(emdat_file_csv, hazard=None, year_range=None):
707709 List of names of countries impacted by the disaster (sub-)types
708710 """
709711 df_data = clean_emdat_df (emdat_file_csv , hazard = hazard , year_range = year_range )
710- countries_iso3a = list (df_data . ISO .unique ())
712+ countries_iso3a = list (df_data [ ' ISO' ] .unique ())
711713 countries_names = list ()
712714 for iso3a in countries_iso3a :
713715 try :
@@ -800,26 +802,27 @@ def emdat_impact_yearlysum(emdat_file_csv, countries=None, hazard=None, year_ran
800802 year_range = year_range , target_version = version )
801803
802804 df_data [imp_str + " scaled" ] = scale_impact2refyear (df_data [imp_str ].values ,
803- df_data .Year .values , df_data .ISO .values ,
805+ df_data ['Year' ].values ,
806+ df_data ['ISO' ].values ,
804807 reference_year = reference_year )
805808
806809 def country_df (df_data ):
807- for data_iso in df_data . ISO .unique ():
810+ for data_iso in df_data [ ' ISO' ] .unique ():
808811 country = u_coord .country_to_iso (data_iso , "alpha3" )
809812
810- df_country = df_data .loc [df_data . ISO == country ]
813+ df_country = df_data .loc [df_data [ ' ISO' ] == country ]
811814 if not df_country .size :
812815 continue
813816
814817 # Retrieve impact data for all years
815- all_years = np .arange (min (df_data . Year ), max (df_data . Year ) + 1 )
818+ all_years = np .arange (min (df_data [ ' Year' ] ), max (df_data [ ' Year' ] ) + 1 )
816819 data_out = pd .DataFrame .from_records (
817820 [
818821 (
819822 year ,
820- np .nansum (df_country [df_country . Year .isin ([year ])][imp_str ]),
823+ np .nansum (df_country [df_country [ ' Year' ] .isin ([year ])][imp_str ]),
821824 np .nansum (
822- df_country [df_country . Year .isin ([year ])][
825+ df_country [df_country [ ' Year' ] .isin ([year ])][
823826 imp_str + " scaled"
824827 ]
825828 ),
@@ -894,13 +897,13 @@ def emdat_impact_event(emdat_file_csv, countries=None, hazard=None, year_range=N
894897 df_data ['year' ] = df_data ['Year' ]
895898 df_data ['reference_year' ] = reference_year
896899 df_data ['impact' ] = df_data [imp_str ]
897- df_data ['impact_scaled' ] = scale_impact2refyear (df_data [imp_str ].values , df_data . Year .values ,
898- df_data . ISO .values ,
900+ df_data ['impact_scaled' ] = scale_impact2refyear (df_data [imp_str ].values , df_data [ ' Year' ] .values ,
901+ df_data [ ' ISO' ] .values ,
899902 reference_year = reference_year )
900903 df_data ['region_id' ] = np .nan
901- for country in df_data . ISO .unique ():
904+ for country in df_data [ ' ISO' ] .unique ():
902905 try :
903- df_data .loc [df_data . ISO == country , 'region_id' ] = \
906+ df_data .loc [df_data [ ' ISO' ] == country , 'region_id' ] = \
904907 u_coord .country_to_iso (country , "numeric" )
905908 except LookupError :
906909 LOGGER .warning ('ISO3alpha code not found in iso_country: %s' , country )
0 commit comments