@@ -160,9 +160,9 @@ def process_ssp_sample(ssppath):
160160 ssp_df = pd .read_csv (ssppath , skiprows = 11 )
161161 ssp_df = ssp_df [ssp_df .year >= 2010 ]
162162 ssp_df ["loginc" ] = np .log (ssp_df .value )
163- ssp_df ["isoyear" ] = ssp_df .apply (lambda row : "%s:%d" % ( row .iso , row .year ) , axis = 1 )
163+ ssp_df ["isoyear" ] = ssp_df .apply (lambda row : f" { row .iso } : { row .year :d } " , axis = 1 )
164164 ssp_df ["yearscen" ] = ssp_df .apply (
165- lambda row : "%d:%s/%s" % ( row .year , row .model , row .scenario ) , axis = 1
165+ lambda row : f" { row .year :d } : { row .model } / { row .scenario } " , axis = 1
166166 )
167167
168168 return ssp_df
@@ -176,7 +176,7 @@ def process_rff_sample(i, rffpath, ssp_df, outdir, HEADER, **storage_options):
176176 increments for a single RFF-SP
177177 """
178178
179- read_feather = os .path .join (rffpath , "run_%d .feather" % i )
179+ read_feather = os .path .join (rffpath , f "run_{ i :d } .feather" )
180180 rff_raw = pd .read_feather (read_feather )
181181 rff_raw .rename (columns = {"Year" : "year" , "Country" : "iso" }, inplace = True )
182182
@@ -199,7 +199,7 @@ def process_rff_sample(i, rffpath, ssp_df, outdir, HEADER, **storage_options):
199199 rff_df = pd .concat ((rff_df , all_year_df ))
200200
201201 rff_df ["loginc" ] = np .log (rff_df .value )
202- rff_df ["isoyear" ] = rff_df .apply (lambda row : "%s:%d" % ( row .iso , row .year ) , axis = 1 )
202+ rff_df ["isoyear" ] = rff_df .apply (lambda row : f" { row .iso } : { row .year :d } " , axis = 1 )
203203
204204 rff_df = pd .merge (rff_df , rff_raw , on = ["year" , "iso" ], how = "left" )
205205
0 commit comments