Skip to content

Commit 9e59836

Browse files
Fix out of date model configuration parameters (#51)
1 parent b2fca19 commit 9e59836

File tree

19 files changed

+251
-135
lines changed

19 files changed

+251
-135
lines changed
File renamed without changes.

examples/batch-facial-action-coding-system/batch-facial-action-coding-system.ipynb

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,12 @@
3838
"print()\n",
3939
"\n",
4040
"with open(\"predictions.json\", \"r\") as f:\n",
41-
" predictions = json.load(f)\n",
42-
" for prediction in predictions:\n",
43-
" for file_data in prediction[\"files\"]:\n",
44-
" face_predictions = file_data[\"models\"][\"face\"]\n",
41+
" full_predictions = json.load(f)\n",
42+
" for source in full_predictions:\n",
43+
" source_name = source[\"source\"][\"url\"]\n",
44+
" predictions = source[\"results\"][\"predictions\"]\n",
45+
" for prediction in predictions:\n",
46+
" face_predictions = prediction[\"models\"][\"face\"][\"grouped_predictions\"]\n",
4547
" for face_prediction in face_predictions:\n",
4648
" for frame in face_prediction[\"predictions\"]:\n",
4749
" print_emotions(frame[\"emotions\"])"

examples/batch-text-entity-recognition/batch-text-entity-recognition.ipynb

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,12 @@
3838
"print()\n",
3939
"\n",
4040
"with open(\"predictions.json\", \"r\") as f:\n",
41-
" predictions = json.load(f)\n",
42-
" for prediction in predictions:\n",
43-
" for file_data in prediction[\"files\"]:\n",
44-
" ner_predictions = file_data[\"models\"][\"ner\"]\n",
41+
" full_predictions = json.load(f)\n",
42+
" for source in full_predictions:\n",
43+
" source_name = source[\"source\"][\"url\"]\n",
44+
" predictions = source[\"results\"][\"predictions\"]\n",
45+
" for prediction in predictions:\n",
46+
" ner_predictions = prediction[\"models\"][\"ner\"][\"grouped_predictions\"]\n",
4547
" for ner_prediction in ner_predictions:\n",
4648
" for entity_data in ner_prediction[\"predictions\"]:\n",
4749
" print(entity_data[\"entity\"])\n",

examples/batch-text-sentiment-analysis/batch-text-sentiment-analysis.ipynb

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,13 +38,15 @@
3838
"print()\n",
3939
"\n",
4040
"with open(\"predictions.json\", \"r\") as f:\n",
41-
" predictions = json.load(f)\n",
42-
" for prediction in predictions:\n",
43-
" for file_data in prediction[\"files\"]:\n",
44-
" language_predictions = file_data[\"models\"][\"language\"]\n",
41+
" full_predictions = json.load(f)\n",
42+
" for source in full_predictions:\n",
43+
" source_name = source[\"source\"][\"url\"]\n",
44+
" predictions = source[\"results\"][\"predictions\"]\n",
45+
" for prediction in predictions:\n",
46+
" language_predictions = prediction[\"models\"][\"language\"][\"grouped_predictions\"]\n",
4547
" for language_prediction in language_predictions:\n",
4648
" for chunk in language_prediction[\"predictions\"]:\n",
47-
" print(chunk[\"word\"])\n",
49+
" print(chunk[\"text\"])\n",
4850
" print_emotions(chunk[\"emotions\"])\n",
4951
" print(\"~ ~ ~\")\n",
5052
" print_sentiment(chunk[\"sentiment\"])\n",

examples/batch-text-toxicity-detection/batch-text-toxicity-detection.ipynb

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,13 +38,15 @@
3838
"print()\n",
3939
"\n",
4040
"with open(\"predictions.json\", \"r\") as f:\n",
41-
" predictions = json.load(f)\n",
42-
" for prediction in predictions:\n",
43-
" for file_data in prediction[\"files\"]:\n",
44-
" language_predictions = file_data[\"models\"][\"language\"]\n",
41+
" full_predictions = json.load(f)\n",
42+
" for source in full_predictions:\n",
43+
" source_name = source[\"source\"][\"url\"]\n",
44+
" predictions = source[\"results\"][\"predictions\"]\n",
45+
" for prediction in predictions:\n",
46+
" language_predictions = prediction[\"models\"][\"language\"][\"grouped_predictions\"]\n",
4547
" for language_prediction in language_predictions:\n",
4648
" for chunk in language_prediction[\"predictions\"]:\n",
47-
" print(chunk[\"word\"])\n",
49+
" print(chunk[\"text\"])\n",
4850
" print_emotions(chunk[\"emotions\"])\n",
4951
" print(\"~ ~ ~\")\n",
5052
" print_toxicity(chunk[\"toxicity\"])\n",

examples/batch-voice-expression/batch-voice-expression.ipynb

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -39,19 +39,21 @@
3939
"print(\"Predictions ready!\")\n",
4040
"\n",
4141
"with open(\"predictions.json\", \"r\") as f:\n",
42-
" predictions = json.load(f)\n",
43-
" for prediction in predictions:\n",
44-
" for file_data in prediction[\"files\"]:\n",
42+
" full_predictions = json.load(f)\n",
43+
" for source in full_predictions:\n",
44+
" source_name = source[\"source\"][\"url\"]\n",
45+
" predictions = source[\"results\"][\"predictions\"]\n",
46+
" for prediction in predictions:\n",
4547
" print()\n",
4648
" print(\"Speech prosody\")\n",
47-
" prosody_predictions = file_data[\"models\"][\"prosody\"]\n",
49+
" prosody_predictions = prediction[\"models\"][\"prosody\"][\"grouped_predictions\"]\n",
4850
" for prosody_prediction in prosody_predictions:\n",
4951
" for segment in prosody_prediction[\"predictions\"][:1]:\n",
5052
" print_emotions(segment[\"emotions\"])\n",
5153
"\n",
5254
" print()\n",
5355
" print(\"Vocal burst\")\n",
54-
" burst_predictions = file_data[\"models\"][\"burst\"]\n",
56+
" burst_predictions = prediction[\"models\"][\"burst\"][\"grouped_predictions\"]\n",
5557
" for burst_prediction in burst_predictions:\n",
5658
" for segment in burst_prediction[\"predictions\"][:1]:\n",
5759
" print_emotions(segment[\"emotions\"])"

hume/_batch/batch_job_details.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
"""Batch job details."""
22
import json
3+
from datetime import datetime
34
from typing import Any, Dict, List, Optional
45

56
from hume._batch.batch_job_state import BatchJobState
@@ -99,3 +100,55 @@ def _get_invalid_response_message(cls, response: Any) -> str:
99100
message = "HumeBatchClient initialized with invalid API key."
100101

101102
return message
103+
104+
def get_status(self) -> BatchJobStatus:
105+
"""Get the status of the job.
106+
107+
Returns:
108+
BatchJobStatus: The status of the `BatchJob`.
109+
"""
110+
return self.state.status
111+
112+
def get_run_time_ms(self) -> Optional[int]:
113+
"""Get the total time in milliseconds it took for the job to run if the job is in a terminal state.
114+
115+
Returns:
116+
Optional[int]: Time in milliseconds it took for the job to run. If the job is not in a terminal
117+
state then `None` is returned.
118+
"""
119+
if self.state.started_timestamp_ms is not None and self.state.ended_timestamp_ms is not None:
120+
return self.state.ended_timestamp_ms - self.state.started_timestamp_ms
121+
return None
122+
123+
def get_created_time(self) -> Optional[datetime]:
124+
"""Get the time the job was created.
125+
126+
Returns:
127+
Optional[datetime]: Datetime when the job was created. If the job has not started
128+
then `None` is returned.
129+
"""
130+
if self.state.created_timestamp_ms is None:
131+
return None
132+
return datetime.utcfromtimestamp(self.state.created_timestamp_ms / 1000)
133+
134+
def get_started_time(self) -> Optional[datetime]:
135+
"""Get the time the job started running.
136+
137+
Returns:
138+
Optional[datetime]: Datetime when the job started running. If the job has not started
139+
then `None` is returned.
140+
"""
141+
if self.state.started_timestamp_ms is None:
142+
return None
143+
return datetime.utcfromtimestamp(self.state.started_timestamp_ms / 1000)
144+
145+
def get_ended_time(self) -> Optional[datetime]:
146+
"""Get the time the job stopped running if the job is in a terminal state.
147+
148+
Returns:
149+
Optional[datetime]: Datetime when the job started running. If the job is not in a terminal
150+
state then `None` is returned.
151+
"""
152+
if self.state.ended_timestamp_ms is None:
153+
return None
154+
return datetime.utcfromtimestamp(self.state.ended_timestamp_ms / 1000)

hume/_batch/batch_job_state.py

Lines changed: 0 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
"""Batch job state."""
22
from dataclasses import dataclass
3-
from datetime import datetime
43
from typing import Optional
54

65
from hume._batch.batch_job_status import BatchJobStatus
@@ -21,47 +20,3 @@ class BatchJobState:
2120
created_timestamp_ms: Optional[int]
2221
started_timestamp_ms: Optional[int]
2322
ended_timestamp_ms: Optional[int]
24-
25-
def get_run_time_ms(self) -> Optional[int]:
26-
"""Get the total time in milliseconds it took for the job to run if the job is in a terminal state.
27-
28-
Returns:
29-
Optional[int]: Time in milliseconds it took for the job to run. If the job is not in a terminal
30-
state then `None` is returned.
31-
"""
32-
if self.started_timestamp_ms is not None and self.ended_timestamp_ms is not None:
33-
return self.ended_timestamp_ms - self.started_timestamp_ms
34-
return None
35-
36-
def get_created_time(self) -> Optional[datetime]:
37-
"""Get the time the job was created.
38-
39-
Returns:
40-
Optional[datetime]: Datetime when the job was created. If the job has not started
41-
then `None` is returned.
42-
"""
43-
if self.created_timestamp_ms is None:
44-
return None
45-
return datetime.utcfromtimestamp(self.created_timestamp_ms / 1000)
46-
47-
def get_started_time(self) -> Optional[datetime]:
48-
"""Get the time the job started running.
49-
50-
Returns:
51-
Optional[datetime]: Datetime when the job started running. If the job has not started
52-
then `None` is returned.
53-
"""
54-
if self.started_timestamp_ms is None:
55-
return None
56-
return datetime.utcfromtimestamp(self.started_timestamp_ms / 1000)
57-
58-
def get_ended_time(self) -> Optional[datetime]:
59-
"""Get the time the job stopped running if the job is in a terminal state.
60-
61-
Returns:
62-
Optional[datetime]: Datetime when the job started running. If the job is not in a terminal
63-
state then `None` is returned.
64-
"""
65-
if self.ended_timestamp_ms is None:
66-
return None
67-
return datetime.utcfromtimestamp(self.ended_timestamp_ms / 1000)

hume/_batch/transcription_config.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,6 @@ class TranscriptionConfig(ConfigBase["TranscriptionConfig"]):
1414
If missing or null, it will be automatically detected. Values are `zh`, `da`, `nl`, `en`, `en-AU`,
1515
`en-IN`, `en-NZ`, `en-GB`, `fr`, `fr-CA`, `de`, `hi`, `hi-Latn`, `id`, `it`, `ja`, `ko`, `no`,
1616
`pl`, `pt`, `pt-BR`, `pt-PT`, `ru`, `es`, `es-419`, `sv`, `ta`, `tr`, or `uk`.
17-
This configuration is not available for the streaming API.
18-
identify_speakers (Optional[bool]): Whether to return identifiers for speakers over time.
19-
If true, unique identifiers will be assigned to spoken words to differentiate different speakers.
20-
If false, all speakers will be tagged with an "unknown" ID.
21-
This configuration is not available for the streaming API.
2217
"""
2318

2419
language: Optional[str] = None
25-
identify_speakers: Optional[bool] = None

hume/models/config/face_config.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,24 +13,28 @@ class FaceConfig(ModelConfigBase["FaceConfig"]):
1313
Args:
1414
fps_pred (Optional[float]): Number of frames per second to process. Other frames will be omitted
1515
from the response.
16-
This configuration is not available for the streaming API.
16+
This configuration is only available for the batch API.
1717
prob_threshold (Optional[float]): Face detection probability threshold. Faces detected with a
1818
probability less than this threshold will be omitted from the response.
19-
This configuration is not available for the streaming API.
19+
This configuration is only available for the batch API.
2020
identify_faces (Optional[bool]): Whether to return identifiers for faces across frames.
2121
If true, unique identifiers will be assigned to face bounding boxes to differentiate different faces.
2222
If false, all faces will be tagged with an "unknown" ID.
2323
min_face_size (Optional[float]): Minimum bounding box side length in pixels to treat as a face.
2424
Faces detected with a bounding box side length in pixels less than this threshold will be
2525
omitted from the response.
26-
This configuration is not available for the streaming API.
26+
This configuration is only available for the batch API.
2727
save_faces (Optional[bool]): Whether to extract and save the detected faces to the artifacts
2828
directory included in the response.
29-
This configuration is not available for the streaming API.
29+
This configuration is only available for the batch API.
3030
descriptions (Optional[Dict[str, Any]]): Configuration for Descriptions predictions.
31-
If missing or null, no Descriptions predictions will be generated.
31+
Descriptions prediction can be enabled by setting "descriptions": {}.
32+
Currently, Descriptions prediction cannot be further configured with any parameters.
33+
If missing or null, no descriptions predictions will be generated.
3234
facs (Optional[Dict[str, Any]]): Configuration for FACS predictions.
33-
If missing or null, no FACS predictions will be generated.
35+
FACS prediction can be enabled by setting "facs": {}.
36+
Currently, FACS prediction cannot be further configured with any parameters.
37+
If missing or null, no facs predictions will be generated.
3438
"""
3539

3640
fps_pred: Optional[float] = None

0 commit comments

Comments
 (0)