Skip to content

Commit 7aa902d

Browse files
Ken LippoldKen Lippold
authored andcommitted
Merging main
2 parents b24ea0c + c8ee03a commit 7aa902d

File tree

4 files changed

+24
-10
lines changed

4 files changed

+24
-10
lines changed

setup.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[metadata]
22
name = hydroserverpy
3-
version = 1.7.0b4
3+
version = 1.6.2
44
description = A Python client for managing HydroServer data
55
long_description_content_type = text/markdown
66
long_description = file: README.md

src/hydroserverpy/api/services/iam/workspace.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,15 +87,15 @@ def add_collaborator(
8787
)
8888

8989
def edit_collaborator_role(
90-
self, uid: Union[UUID, str], email: EmailStr, role: Union["Role", UUID, str]
90+
self, uid: Union[UUID, str], email: EmailStr, role: Union["Role", UUID, str], role_id: UUID
9191
) -> "Collaborator":
9292
"""Edit the role of a collaborator in a workspace."""
9393

9494
path = f"/{self.client.base_route}/{self.model.get_route()}/{str(uid)}/collaborators"
9595
headers = {"Content-type": "application/json"}
9696
body = {
9797
"email": email,
98-
"roleId": normalize_uuid(role)
98+
"roleId": normalize_uuid(role if role is not ... else role_id)
9999
}
100100

101101
response = self.client.request(
@@ -165,6 +165,7 @@ def update_api_key(
165165
uid: Union[UUID, str],
166166
api_key_id: Union[UUID, str],
167167
role: Union["Role", UUID, str] = ...,
168+
role_id: UUID = ...,
168169
name: str = ...,
169170
description: Optional[str] = ...,
170171
is_active: bool = ...,
@@ -175,7 +176,7 @@ def update_api_key(
175176
path = f"/{self.client.base_route}/{self.model.get_route()}/{str(uid)}/api-keys/{api_key_id}"
176177
headers = {"Content-type": "application/json"}
177178
body = {
178-
"roleId": normalize_uuid(role),
179+
"roleId": normalize_uuid(role if role is not ... else role_id),
179180
"name": name,
180181
"description": description,
181182
"isActive": is_active,

src/hydroserverpy/api/services/sta/datastream.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -149,10 +149,15 @@ def update(
149149
name: str = ...,
150150
description: str = ...,
151151
thing: Union["Thing", UUID, str] = ...,
152+
thing_id: UUID = ...,
152153
sensor: Union["Sensor", UUID, str] = ...,
154+
sensor_id: UUID = ...,
153155
observed_property: Union["ObservedProperty", UUID, str] = ...,
156+
observed_property_id: UUID = ...,
154157
processing_level: Union["ProcessingLevel", UUID, str] = ...,
158+
processing_level_id: UUID = ...,
155159
unit: Union["Unit", UUID, str] = ...,
160+
unit_id: UUID = ...,
156161
observation_type: str = ...,
157162
result_type: str = ...,
158163
sampled_medium: str = ...,
@@ -180,11 +185,15 @@ def update(
180185
body = {
181186
"name": name,
182187
"description": description,
183-
"thingId": normalize_uuid(thing),
184-
"sensorId": normalize_uuid(sensor),
185-
"observedPropertyId": normalize_uuid(observed_property),
186-
"processingLevelId": normalize_uuid(processing_level),
187-
"unitId": normalize_uuid(unit),
188+
"thingId": normalize_uuid(thing if thing is not ... else thing_id),
189+
"sensorId": normalize_uuid(sensor if sensor is not ... else sensor_id),
190+
"observedPropertyId": normalize_uuid(
191+
observed_property if observed_property is not ... else observed_property_id
192+
),
193+
"processingLevelId": normalize_uuid(
194+
processing_level if processing_level is not ... else processing_level_id
195+
),
196+
"unitId": normalize_uuid(unit if unit is not ... else unit_id),
188197
"observationType": observation_type,
189198
"resultType": result_type,
190199
"sampledMedium": sampled_medium,

src/hydroserverpy/etl/loaders/hydroserver_loader.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,13 @@ def load(self, data: pd.DataFrame, task: Task) -> None:
2828
begin_date = self.earliest_begin_date(task)
2929
new_data = data[data["timestamp"] > begin_date]
3030
for col in new_data.columns.difference(["timestamp"]):
31+
datastream = self.client.datastreams.get(
32+
uid=str(col)
33+
)
34+
ds_cutoff = datastream.phenomenon_end_time
3135
df = (
3236
new_data[["timestamp", col]]
37+
.loc[lambda d: d["timestamp"] > ds_cutoff if ds_cutoff else True]
3338
.rename(columns={col: "value"})
3439
.dropna(subset=["value"])
3540
)
@@ -67,7 +72,6 @@ def load(self, data: pd.DataFrame, task: Task) -> None:
6772
start,
6873
end - 1,
6974
)
70-
break
7175
raise
7276

7377
def _fetch_earliest_begin(

0 commit comments

Comments
 (0)