|
9 | 9 | ) |
10 | 10 |
|
11 | 11 |
|
| 12 | +def validate_diarization_response(response_dict): |
| 13 | + """ |
| 14 | + Helper function to validate diarization response structure |
| 15 | + """ |
| 16 | + # Validate top-level speaker_segments field |
| 17 | + assert "speaker_segments" in response_dict |
| 18 | + assert isinstance(response_dict["speaker_segments"], list) |
| 19 | + assert len(response_dict["speaker_segments"]) > 0 |
| 20 | + |
| 21 | + # Validate each speaker segment structure |
| 22 | + for segment in response_dict["speaker_segments"]: |
| 23 | + assert "text" in segment |
| 24 | + assert "id" in segment |
| 25 | + assert "speaker_id" in segment |
| 26 | + assert "start" in segment |
| 27 | + assert "end" in segment |
| 28 | + assert "words" in segment |
| 29 | + |
| 30 | + # Validate nested words in speaker segments |
| 31 | + assert isinstance(segment["words"], list) |
| 32 | + for word in segment["words"]: |
| 33 | + assert "id" in word |
| 34 | + assert "word" in word |
| 35 | + assert "start" in word |
| 36 | + assert "end" in word |
| 37 | + assert "speaker_id" in word |
| 38 | + |
| 39 | + # Validate top-level words field |
| 40 | + assert "words" in response_dict |
| 41 | + assert isinstance(response_dict["words"], list) |
| 42 | + assert len(response_dict["words"]) > 0 |
| 43 | + |
| 44 | + # Validate each word in top-level words |
| 45 | + for word in response_dict["words"]: |
| 46 | + assert "id" in word |
| 47 | + assert "word" in word |
| 48 | + assert "start" in word |
| 49 | + assert "end" in word |
| 50 | + assert "speaker_id" in word |
| 51 | + |
| 52 | + |
12 | 53 | class TestTogetherTranscriptions: |
13 | 54 | @pytest.fixture |
14 | 55 | def sync_together_client(self) -> Together: |
@@ -116,3 +157,96 @@ def test_language_detection_hindi(self, sync_together_client): |
116 | 157 | assert len(response.text) > 0 |
117 | 158 | assert hasattr(response, "language") |
118 | 159 | assert response.language == "hi" |
| 160 | + |
| 161 | + def test_diarization_default(self, sync_together_client): |
| 162 | + """ |
| 163 | + Test diarization with default model in verbose JSON format |
| 164 | + """ |
| 165 | + audio_url = "https://together-public-test-data.s3.us-west-2.amazonaws.com/audio/2-speaker-conversation.wav" |
| 166 | + |
| 167 | + response = sync_together_client.audio.transcriptions.create( |
| 168 | + file=audio_url, |
| 169 | + model="openai/whisper-large-v3", |
| 170 | + response_format="verbose_json", |
| 171 | + diarize=True, |
| 172 | + ) |
| 173 | + |
| 174 | + assert isinstance(response, AudioTranscriptionVerboseResponse) |
| 175 | + assert isinstance(response.text, str) |
| 176 | + assert len(response.text) > 0 |
| 177 | + |
| 178 | + # Validate diarization fields |
| 179 | + response_dict = response.model_dump() |
| 180 | + validate_diarization_response(response_dict) |
| 181 | + |
| 182 | + def test_diarization_nvidia(self, sync_together_client): |
| 183 | + """ |
| 184 | + Test diarization with nvidia model in verbose JSON format |
| 185 | + """ |
| 186 | + audio_url = "https://together-public-test-data.s3.us-west-2.amazonaws.com/audio/2-speaker-conversation.wav" |
| 187 | + |
| 188 | + response = sync_together_client.audio.transcriptions.create( |
| 189 | + file=audio_url, |
| 190 | + model="openai/whisper-large-v3", |
| 191 | + response_format="verbose_json", |
| 192 | + diarize=True, |
| 193 | + diarization_model="nvidia", |
| 194 | + ) |
| 195 | + |
| 196 | + assert isinstance(response, AudioTranscriptionVerboseResponse) |
| 197 | + assert isinstance(response.text, str) |
| 198 | + assert len(response.text) > 0 |
| 199 | + |
| 200 | + # Validate diarization fields |
| 201 | + response_dict = response.model_dump() |
| 202 | + validate_diarization_response(response_dict) |
| 203 | + |
| 204 | + def test_diarization_pyannote(self, sync_together_client): |
| 205 | + """ |
| 206 | + Test diarization with pyannote model in verbose JSON format |
| 207 | + """ |
| 208 | + audio_url = "https://together-public-test-data.s3.us-west-2.amazonaws.com/audio/2-speaker-conversation.wav" |
| 209 | + |
| 210 | + response = sync_together_client.audio.transcriptions.create( |
| 211 | + file=audio_url, |
| 212 | + model="openai/whisper-large-v3", |
| 213 | + response_format="verbose_json", |
| 214 | + diarize=True, |
| 215 | + diarization_model="pyannote", |
| 216 | + ) |
| 217 | + |
| 218 | + assert isinstance(response, AudioTranscriptionVerboseResponse) |
| 219 | + assert isinstance(response.text, str) |
| 220 | + assert len(response.text) > 0 |
| 221 | + |
| 222 | + # Validate diarization fields |
| 223 | + response_dict = response.model_dump() |
| 224 | + validate_diarization_response(response_dict) |
| 225 | + |
| 226 | + def test_no_diarization(self, sync_together_client): |
| 227 | + """ |
| 228 | + Test with diarize=false should not have speaker segments |
| 229 | + """ |
| 230 | + audio_url = "https://together-public-test-data.s3.us-west-2.amazonaws.com/audio/2-speaker-conversation.wav" |
| 231 | + |
| 232 | + response = sync_together_client.audio.transcriptions.create( |
| 233 | + file=audio_url, |
| 234 | + model="openai/whisper-large-v3", |
| 235 | + response_format="verbose_json", |
| 236 | + diarize=False, |
| 237 | + ) |
| 238 | + |
| 239 | + assert isinstance(response, AudioTranscriptionVerboseResponse) |
| 240 | + assert isinstance(response.text, str) |
| 241 | + assert len(response.text) > 0 |
| 242 | + |
| 243 | + # Verify no diarization fields |
| 244 | + response_dict = response.model_dump() |
| 245 | + assert response_dict.get("speaker_segments") is None |
| 246 | + assert response_dict.get("words") is None |
| 247 | + |
| 248 | + # Should still have standard fields |
| 249 | + assert "text" in response_dict |
| 250 | + assert "language" in response_dict |
| 251 | + assert "duration" in response_dict |
| 252 | + assert "segments" in response_dict |
0 commit comments