-
Notifications
You must be signed in to change notification settings - Fork 0
[PYG-440] 8️⃣Python-SDK v8 #632
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
63c9a70
83ad9cd
bf17aab
284f300
dfeb361
d917298
87188de
18d02e9
650ea27
c03ffc9
e57f223
812f2c5
aab43e4
de26e2f
330c995
d1f477a
419d672
351119b
a841ec8
a4e05b6
f3abbfb
b37e759
295ca36
59cf4cd
02c4e80
71aa87a
daa09cd
0d75739
770edf0
e5960ea
80953b8
2786ceb
78dbd9b
fb0b087
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,11 +1,14 @@ | ||
| from __future__ import annotations | ||
|
|
||
| import datetime | ||
| from collections.abc import Callable | ||
| from typing import ( | ||
| Annotated, | ||
| Optional, | ||
| Any, | ||
| no_type_check, | ||
| TypeVar, | ||
| Set, | ||
| ) | ||
|
|
||
| from cognite.client import data_modeling as dm | ||
|
|
@@ -24,6 +27,23 @@ from pydantic import BaseModel, BeforeValidator, model_validator, field_validato | |
| from pydantic.alias_generators import to_camel | ||
| from pydantic.functional_serializers import PlainSerializer | ||
|
|
||
| T_CogniteResource = TypeVar("T_CogniteResource", bound=CogniteTimeSeries | CogniteSequence | CogniteFileMetadata) | ||
|
|
||
| _MISSING_VALUE = -999 | ||
|
|
||
| def _create_load_method(resource_cls: type[T_CogniteResource], required_fields: Set[str]) -> Callable[[Any], Any]: | ||
| def _load_if_dict(value: Any) -> Any: | ||
| if not isinstance(value, dict): | ||
| return value | ||
| if missing_values := set(required_fields) - set(value.keys()): | ||
| raise ValueError(f"Missing required fields: {', '.join(missing_values)}") | ||
| for key in ["createdTime", "lastUpdatedTime"]: | ||
| # GraphQL does not support returning these properties, while the read classes requires them. | ||
| value[key] = _MISSING_VALUE | ||
| return resource_cls.load(value) | ||
|
|
||
| return _load_if_dict | ||
|
|
||
|
|
||
| TimeSeries = Annotated[ | ||
| CogniteTimeSeries, | ||
|
|
@@ -32,7 +52,7 @@ TimeSeries = Annotated[ | |
| return_type=dict, | ||
| when_used="unless-none", | ||
| ), | ||
| BeforeValidator(lambda v: CogniteTimeSeries.load(v) if isinstance(v, dict) else v), | ||
| BeforeValidator(_create_load_method(CogniteTimeSeries, {"id", "isStep", "isString"})), | ||
| ] | ||
|
|
||
|
|
||
|
|
@@ -43,7 +63,7 @@ SequenceRead = Annotated[ | |
| return_type=dict, | ||
| when_used="unless-none", | ||
| ), | ||
| BeforeValidator(lambda v: CogniteSequence.load(v) if isinstance(v, dict) else v), | ||
| BeforeValidator(_create_load_method(CogniteSequence, {"id", "columns"})), | ||
| ] | ||
|
|
||
|
|
||
|
|
@@ -54,7 +74,7 @@ FileMetadata = Annotated[ | |
| return_type=dict, | ||
| when_used="unless-none", | ||
| ), | ||
| BeforeValidator(lambda v: CogniteFileMetadata.load(v) if isinstance(v, dict) else v), | ||
| BeforeValidator(_create_load_method(CogniteFileMetadata, {"id", "uploaded", "name"})), | ||
| ] | ||
|
|
||
|
|
||
|
|
@@ -123,6 +143,15 @@ class TimeSeriesGraphQL(GraphQLExternal): | |
| datetime.datetime.fromisoformat(item["timestamp"].replace("Z", "+00:00")) | ||
| ) | ||
| data["datapoints"] = datapoints["items"] | ||
| if missing := [name for name in ["id", "isString", "isStep"] if data.get(name) is None]: | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The PySDK does not give nice error messages on missing values. Just a KeyError on the first failure. |
||
| raise ValueError( | ||
| f"Cannot create datapoints, missing required fields: {', '.join(missing)}. " | ||
| "You need to include these in your query." | ||
| ) | ||
| if "type" not in data: | ||
| # Type is not supported in the timeseries you retrieve through GraphQL, but it is required | ||
| # for the Datapoints object. Luckily it can be inferred from the isString field, so we set it here. | ||
| data["type"] = "string" if data["isString"] else "numeric" | ||
| data["data"] = Datapoints.load(data) | ||
| if isinstance(data, dict) and "getLatestDataPoint" in data: | ||
| latest = data.pop("getLatestDataPoint") | ||
|
|
@@ -146,6 +175,7 @@ class TimeSeriesGraphQL(GraphQLExternal): | |
| description=self.description, | ||
| ) | ||
|
|
||
| @no_type_check | ||
| def as_read(self) -> CogniteTimeSeries: | ||
|
Comment on lines
+178
to
179
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We do not do type check here as this is handled by the pydantic validation. If any error is raised, then this is expected behavior. See docs: https://cognite-pygen.readthedocs-hosted.com/en/latest/usage/querying.html#data-classes |
||
| return CogniteTimeSeries( | ||
| id=self.id, | ||
|
|
@@ -160,6 +190,8 @@ class TimeSeriesGraphQL(GraphQLExternal): | |
| is_step=self.is_step, | ||
| description=self.description, | ||
| security_categories=self.security_categories, | ||
| created_time=self.created_time, | ||
| last_updated_time=self.last_updated_time, | ||
| ) | ||
|
|
||
|
|
||
|
|
@@ -237,7 +269,7 @@ class SequenceColumnGraphQL(GraphQLExternal): | |
| @field_validator("value_type", mode="before") | ||
| def title_value_type(cls, value: Any) -> Any: | ||
| if isinstance(value, str): | ||
| return value.title() | ||
| return value.upper() | ||
| return value | ||
|
|
||
| @no_type_check | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -150,7 +150,6 @@ class DataPointsAPI: | |
| uniform_index=uniform_index, | ||
| include_aggregate_name=include_aggregate_name, | ||
| include_granularity_name=include_granularity_name, | ||
| column_names="instance_id", | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This no longer exists, but is the prioritized field when doing retrieves. |
||
| ) | ||
|
|
||
| def __getattr__(self, item: str) -> Any: | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -107,7 +107,7 @@ def __init__( | |
|
|
||
| @property | ||
| def _views(self) -> dm.ViewList: | ||
| return dm.ViewList(self._view_by_id.values()) | ||
| return dm.ViewList(list(self._view_by_id.values())) | ||
|
|
||
| def __str__(self): | ||
| args = [ | ||
|
|
@@ -390,6 +390,7 @@ def _only_null_values(count: int) -> list[None]: | |
| external.timeseries.extend( | ||
| [ | ||
| TimeSeries( | ||
| id=-1, | ||
| external_id=ts, | ||
| name=ts, | ||
| data_set_id=self._data_set_id, | ||
|
|
@@ -398,6 +399,8 @@ def _only_null_values(count: int) -> list[None]: | |
| metadata={ | ||
| "source": f"Pygen{type(self).__name__}", | ||
| }, | ||
| created_time=0, | ||
| last_updated_time=1, | ||
| ) | ||
| for timeseries_set in values | ||
| for ts in ( | ||
|
|
@@ -412,6 +415,7 @@ def _only_null_values(count: int) -> list[None]: | |
| external.file.extend( | ||
| [ | ||
| FileMetadata( | ||
| id=-1, | ||
| external_id=file, | ||
| name=file, | ||
| source=self._instance_space, | ||
|
|
@@ -420,6 +424,9 @@ def _only_null_values(count: int) -> list[None]: | |
| metadata={ | ||
| "source": f"Pygen{type(self).__name__}", | ||
| }, | ||
| created_time=0, | ||
| last_updated_time=1, | ||
| uploaded=False, | ||
| ) | ||
| for file_set in values | ||
| for file in (cast(list[str], file_set) if isinstance(file_set, list) else [cast(str, file_set)]) | ||
|
|
@@ -430,13 +437,14 @@ def _only_null_values(count: int) -> list[None]: | |
| external.sequence.extend( | ||
| [ | ||
| Sequence( | ||
| id=-1, | ||
| external_id=seq, | ||
| name=seq, | ||
| data_set_id=self._data_set_id, | ||
| columns=[ | ||
| SequenceColumn( | ||
| external_id="value", | ||
| value_type=cast(Literal["Double"], "DOUBLE"), | ||
| value_type="DOUBLE", | ||
|
Comment on lines
-439
to
+447
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Bug fixed in the SDK :) |
||
| metadata={ | ||
| "source": f"Pygen{type(self).__name__}", | ||
| }, | ||
|
|
@@ -445,6 +453,8 @@ def _only_null_values(count: int) -> list[None]: | |
| metadata={ | ||
| "source": f"Pygen{type(self).__name__}", | ||
| }, | ||
| created_time=0, | ||
| last_updated_time=1, | ||
| ) | ||
| for seq_set in values | ||
| for seq in (cast(list[str], seq_set) if isinstance(seq_set, list) else [cast(str, seq_set)]) | ||
|
|
@@ -830,7 +840,7 @@ def deploy( | |
| if nodes or edges: | ||
| # There is an 'edge' if there is an outward and inward edge on two views, we can get duplicated edges. | ||
| # We should remove the duplicates. | ||
| edges = dm.EdgeApplyList({edge.as_id(): edge for edge in edges}.values()) | ||
| edges = dm.EdgeApplyList(list({edge.as_id(): edge for edge in edges}.values())) | ||
|
|
||
| created = client.data_modeling.instances.apply( | ||
| nodes, | ||
|
|
||
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Improved error message, there can be KeyErrors inside the model_validate call as wel