@@ -560,9 +560,9 @@ def _iterate_sequential(
560560 """Iterate over all samples sequentially"""
561561 return [
562562 optimize (
563- self .optimizer_type , input , init_kwargs , deepcopy (optimizer_run_kwargs )
563+ self .optimizer_type , inp , init_kwargs , deepcopy (optimizer_run_kwargs )
564564 )
565- for input , init_kwargs in tqdm (
565+ for inp , init_kwargs in tqdm (
566566 zip (self ._inputs (), self ._opt_init_kwargs ()), total = len (self .samples )
567567 )
568568 ]
@@ -651,19 +651,19 @@ def __post_init__(self, sample_fraction, ensemble_size, random_state, replace):
651651
652652 def input_from_sample (self , sample : list [tuple [int , int ]]):
653653 """Shallow-copy the input and update the data"""
654- input = copy (self .input ) # NOTE: Shallow copy!
654+ inp = copy (self .input ) # NOTE: Shallow copy!
655655
656656 # Sampling
657657 # NOTE: We always need samples to support `replace=True`
658- input .data = sample_data (input .data , sample )
658+ inp .data = sample_data (inp .data , sample )
659659 weights = (
660- input .data_weights
661- if input .data_weights is not None
662- else pd .DataFrame (1.0 , index = input .data .index , columns = input .data .columns )
660+ inp .data_weights
661+ if inp .data_weights is not None
662+ else pd .DataFrame (1.0 , index = inp .data .index , columns = inp .data .columns )
663663 )
664- input .data_weights = sample_weights (weights , sample )
664+ inp .data_weights = sample_weights (weights , sample )
665665
666- return input
666+ return inp
667667
668668
669669@dataclass
@@ -708,28 +708,22 @@ def __post_init__(self, ensemble_size, random_state):
708708 def input_from_sample (self , sample : list [tuple [int , int ]]):
709709 """Subselect all input"""
710710 # Data
711- input = copy (self .input ) # NOTE: Shallow copy!
712- data = sample_data (input .data , sample )
713- input .data = data .dropna (axis = "columns" , how = "all" ).dropna (
711+ inp = copy (self .input ) # NOTE: Shallow copy!
712+ data = sample_data (inp .data , sample )
713+ inp .data = data .dropna (axis = "columns" , how = "all" ).dropna (
714714 axis = "index" , how = "all"
715715 )
716- if input .data_weights is not None :
717- input .data_weights , _ = input .data_weights .align (
718- input .data ,
716+ if inp .data_weights is not None :
717+ inp .data_weights , _ = inp .data_weights .align (
718+ inp .data ,
719719 axis = None ,
720720 join = "right" ,
721721 copy = True ,
722- fill_value = input .missing_weights_value ,
722+ fill_value = inp .missing_weights_value ,
723723 )
724- input .data_weights = sample_weights (input .data_weights , sample )
724+ inp .data_weights = sample_weights (inp .data_weights , sample )
725725
726726 # Select single hazard event
727- input .hazard = input .hazard .select (event_id = input .data .index )
727+ inp .hazard = inp .hazard .select (event_id = inp .data .index )
728728
729- # Select single region in exposure
730- # NOTE: This breaks impact_at_reg with pre-defined region IDs!!
731- # exp = input.exposure.copy(deep=False)
732- # exp.gdf = exp.gdf[exp.gdf["region_id"] == input.data.columns[0]]
733- # input.exposure = exp
734-
735- return input
729+ return inp
0 commit comments