|
5 | 5 | import tools |
6 | 6 |
|
7 | 7 |
|
8 | | -def count_max_consec_detects(nest_data, date_data): |
9 | | - """Determine the maximum number of consecutive bird detections""" |
10 | | - assert date_data.shape[0] == 1, "date_data should be a Pandas DataFrame with one row" |
11 | | - sorted_dates = pd.Series(date_data.Date[0]).sort_values().reset_index(drop=True) |
12 | | - sorted_nest_dates = pd.Series(nest_data.Date).sort_values().reset_index(drop=True) |
13 | | - sorted_dates_dict = {val: key for key, val in sorted_dates.items()} |
14 | | - sorted_dates_combined_diff = sorted_nest_dates.map(sorted_dates_dict).diff() |
15 | | - all_consec_detects = [] |
16 | | - consec_detects = 0 |
17 | | - for i in range(1, len(sorted_dates_combined_diff)): |
18 | | - if sorted_dates_combined_diff[i] == 1 and sorted_dates_combined_diff[i - 1] != 1: |
19 | | - # New start to consectutive detection set |
20 | | - consec_detects = 1 |
21 | | - if i + 1 == len(sorted_dates_combined_diff): |
22 | | - all_consec_detects.append(consec_detects) |
23 | | - elif sorted_dates_combined_diff[i] == 1 and sorted_dates_combined_diff[i - 1] == 1: |
24 | | - # Increment existing consecutive detection set |
25 | | - consec_detects += 1 |
26 | | - if i + 1 == len(sorted_dates_combined_diff): |
27 | | - all_consec_detects.append(consec_detects) |
28 | | - elif sorted_dates_combined_diff[i] != 1 and sorted_dates_combined_diff[i - 1] == 1: |
29 | | - # Store completed consecutive detection set and reset |
30 | | - all_consec_detects.append(consec_detects) |
31 | | - consec_detects = 0 |
32 | | - elif sorted_dates_combined_diff[i] != 1 and sorted_dates_combined_diff[i - 1] != 1: |
33 | | - consec_detects == 0 |
| 8 | +def count_max_consec_detects(nest_data: pd.DataFrame, date_data: pd.DataFrame) -> int: |
| 9 | + """Determine the maximum number of consecutive bird detections.""" |
| 10 | + assert date_data.shape[0] == 1, "date_data should be a DataFrame with one row" |
| 11 | + # Normalize to datetime and build an ordered index of dates observed at the site-year |
| 12 | + all_dates = sorted(pd.to_datetime(d) for d in date_data.loc[0, "Date"]) |
| 13 | + pos = {d: i for i, d in enumerate(all_dates)} |
| 14 | + idxs = sorted(pos.get(pd.to_datetime(d)) for d in nest_data["Date"].unique() if pd.to_datetime(d) in pos) |
| 15 | + idxs = [i for i in idxs if i is not None] |
| 16 | + if not idxs: |
| 17 | + return 0 |
| 18 | + longest = cur = 1 |
| 19 | + for i in range(1, len(idxs)): |
| 20 | + if idxs[i] - idxs[i - 1] == 1: |
| 21 | + cur += 1 |
34 | 22 | else: |
35 | | - assert False, "Oops, I shouldn't be here" |
36 | | - if all_consec_detects: |
37 | | - max_consec_detects = max(all_consec_detects) |
38 | | - else: |
39 | | - max_consec_detects = 0 |
40 | | - |
41 | | - return max_consec_detects |
| 23 | + longest = max(longest, cur) |
| 24 | + cur = 1 |
| 25 | + return max(longest, cur) |
42 | 26 |
|
43 | 27 |
|
44 | 28 | def process_nests(nest_file, year, site, savedir, min_score=0.3, min_detections=3, min_consec_detects=1): |
45 | | - """Process nests into a one row per nest table""" |
| 29 | + """Process nests into a one-row-per-nest table and write a shapefile.""" |
| 30 | + SCHEMA = { |
| 31 | + "geometry": "Point", |
| 32 | + "properties": { |
| 33 | + "nest_id": "int", |
| 34 | + "Site": "str", |
| 35 | + "Year": "str", |
| 36 | + "xmean": "float", |
| 37 | + "ymean": "float", |
| 38 | + "first_obs": "str", |
| 39 | + "last_obs": "str", |
| 40 | + "num_obs": "int", |
| 41 | + "species": "str", |
| 42 | + "sum_top1": "float", |
| 43 | + "num_top1": "int", |
| 44 | + "bird_match": "str", |
| 45 | + }, |
| 46 | + } |
| 47 | + |
46 | 48 | nests_data = geopandas.read_file(nest_file) |
47 | | - date_data = nests_data.groupby(['Site', 'Year']).agg({'Date': lambda x: x.unique().tolist()}).reset_index() |
48 | | - target_inds = nests_data['target_ind'].unique() |
49 | | - nests = [] |
| 49 | + |
| 50 | + # Build date_data: single row with all dates for the site-year |
| 51 | + date_data = (nests_data.groupby(["Site", "Year"]).agg({ |
| 52 | + "Date": lambda x: pd.Series(x).unique().tolist() |
| 53 | + }).reset_index()) |
| 54 | + |
| 55 | + target_inds = nests_data["target_ind"].unique() |
| 56 | + nests_rows = [] |
| 57 | + |
50 | 58 | for target_ind in target_inds: |
51 | | - nest_data = nests_data[(nests_data['target_ind'] == target_ind) & (nests_data['score'] >= min_score)] |
| 59 | + nest_data = nests_data[(nests_data["target_ind"] == target_ind) & (nests_data["score"] >= min_score)] |
52 | 60 | num_consec_detects = count_max_consec_detects(nest_data, date_data) |
| 61 | + |
53 | 62 | if len(nest_data) >= min_detections or num_consec_detects >= min_consec_detects: |
54 | | - summed_scores = nest_data.groupby(['Site', 'Year', 'target_ind', 'label']).score.agg(['sum', 'count']) |
55 | | - top_score_data = summed_scores[summed_scores['sum'] == max(summed_scores['sum'])].reset_index() |
56 | | - nest_info = nest_data.groupby(['Site', 'Year', 'target_ind']).agg({ |
57 | | - 'Date': ['min', 'max', 'count'], |
58 | | - 'match_xmin': ['mean'], |
59 | | - 'match_ymin': ['mean'], |
60 | | - 'match_xmax': ['mean'], |
61 | | - 'match_ymax': ['mean'] |
62 | | - }).reset_index() |
| 63 | + # Aggregate scores per label and pick the top label by summed score |
| 64 | + summed_scores = (nest_data.groupby(["Site", "Year", "target_ind", |
| 65 | + "label"])["score"].agg(["sum", "count"]).reset_index()) |
| 66 | + top_idx = summed_scores["sum"].idxmax() |
| 67 | + top_score_data = summed_scores.loc[top_idx] |
| 68 | + |
| 69 | + # Summary stats |
| 70 | + nest_info = (nest_data.groupby(["Site", "Year", "target_ind"]).agg({ |
| 71 | + "Date": ["min", "max", "count"], |
| 72 | + "match_xmin": ["mean"], |
| 73 | + "match_xmax": ["mean"], |
| 74 | + "match_ymin": ["mean"], |
| 75 | + "match_ymax": ["mean"], |
| 76 | + })) |
63 | 77 | xmean = (nest_info['match_xmin']['mean'][0] + nest_info['match_xmax']['mean']) / 2 |
64 | 78 | ymean = (nest_info['match_ymin']['mean'][0] + nest_info['match_ymax']['mean']) / 2 |
65 | | - bird_match = ",".join([str(x) for x in nest_data["bird_id"]]) |
66 | | - nests.append([ |
67 | | - target_ind, nest_info['Site'][0], nest_info['Year'][0], xmean[0], ymean[0], nest_info['Date']['min'][0], |
68 | | - nest_info['Date']['max'][0], nest_info['Date']['count'][0], top_score_data['label'][0], |
69 | | - top_score_data['sum'][0], top_score_data['count'][0], bird_match |
| 79 | + # Flatten date stats |
| 80 | + first_obs = nest_info[("Date", "min")].values[0] |
| 81 | + last_obs = nest_info[("Date", "max")].values[0] |
| 82 | + num_obs = int(nest_info[("Date", "count")].values[0]) |
| 83 | + |
| 84 | + bird_match = ",".join(str(x) for x in nest_data["bird_id"]) |
| 85 | + |
| 86 | + nests_rows.append([ |
| 87 | + int(target_ind), |
| 88 | + str(top_score_data["Site"]), |
| 89 | + str(top_score_data["Year"]), |
| 90 | + float(xmean), |
| 91 | + float(ymean), |
| 92 | + str(first_obs), |
| 93 | + str(last_obs), |
| 94 | + int(num_obs), |
| 95 | + str(top_score_data["label"]), |
| 96 | + float(top_score_data["sum"]), |
| 97 | + int(top_score_data["count"]), |
| 98 | + bird_match, |
70 | 99 | ]) |
71 | 100 |
|
72 | | - if not os.path.exists(savedir): |
73 | | - os.makedirs(savedir) |
| 101 | + os.makedirs(savedir, exist_ok=True) |
74 | 102 | filename = os.path.join(savedir, f"{site}_{year}_processed_nests.shp") |
75 | 103 |
|
76 | | - if nests: |
77 | | - nests = pd.DataFrame(nests, |
78 | | - columns=[ |
79 | | - 'nest_id', 'Site', 'Year', 'xmean', 'ymean', 'first_obs', 'last_obs', 'num_obs', |
80 | | - 'species', 'sum_top1', 'num_top1', 'bird_match' |
81 | | - ]) |
82 | | - nests_shp = geopandas.GeoDataFrame(nests, geometry=geopandas.points_from_xy(nests.xmean, nests.ymean)) |
83 | | - nests_shp.crs = nests_data.crs |
84 | | - nests_shp.to_file(filename) |
| 104 | + gdf_tofile = None |
| 105 | + if nests_rows: |
| 106 | + nests_df = pd.DataFrame(nests_rows, columns=list(SCHEMA["properties"].keys())) |
| 107 | + nests_gdf = geopandas.GeoDataFrame( |
| 108 | + nests_df, |
| 109 | + geometry=geopandas.points_from_xy(nests_df.xmean, nests_df.ymean), |
| 110 | + crs=nests_data.crs, |
| 111 | + ) |
| 112 | + gdf_tofile = nests_gdf |
85 | 113 | else: |
86 | | - schema = { |
87 | | - "geometry": "Polygon", |
88 | | - "properties": { |
89 | | - 'nest_id': 'int', |
90 | | - 'Site': 'str', |
91 | | - 'Year': 'str', |
92 | | - 'xmean': 'float', |
93 | | - 'ymean': 'float', |
94 | | - 'first_obs': 'str', |
95 | | - 'last_obs': 'str', |
96 | | - 'num_obs': 'int', |
97 | | - 'species': 'str', |
98 | | - 'sum_top1': 'float', |
99 | | - 'num_top1': 'int', |
100 | | - 'bird_match': 'str' |
101 | | - } |
| 114 | + empty_data = { |
| 115 | + k: pd.Series(dtype="int64" if v == "int" else "float64" if v == "float" else "object") |
| 116 | + for k, v in SCHEMA["properties"].items() |
102 | 117 | } |
103 | | - crs = nests_data.crs |
104 | | - empty_nests = geopandas.GeoDataFrame(geometry=[]) |
105 | | - empty_nests.to_file(filename, driver='ESRI Shapefile', schema=schema, crs=crs) |
| 118 | + empty_gdf = geopandas.GeoDataFrame( |
| 119 | + empty_data, |
| 120 | + geometry=geopandas.GeoSeries([], dtype="geometry"), |
| 121 | + crs=nests_data.crs, |
| 122 | + ) |
| 123 | + gdf_tofile = empty_gdf |
| 124 | + |
| 125 | + try: |
| 126 | + import pyogrio |
| 127 | + gdf_tofile.to_file(filename, driver="ESRI Shapefile", engine="pyogrio") |
| 128 | + except ImportError: |
| 129 | + gdf_tofile.to_file(filename, driver="ESRI Shapefile", engine="fiona") |
106 | 130 |
|
107 | 131 |
|
108 | 132 | if __name__ == "__main__": |
|
0 commit comments