|
232 | 232 | " sentiment = utt.meta.get(\"gpt_sentiment\", \"Not processed\")\n", |
233 | 233 | " print(f\" {utt.speaker.id}: '{utt.text[:50]}...' -> Sentiment: {sentiment}\")\n" |
234 | 234 | ] |
| 235 | + }, |
| 236 | + { |
| 237 | + "cell_type": "markdown", |
| 238 | + "id": "0d9408e2", |
| 239 | + "metadata": {}, |
| 240 | + "source": [ |
| 241 | + "## Testing Prompts on Single Objects\n", |
| 242 | + "\n", |
| 243 | + "The `transform_single` method allows you to test your prompts on individual objects without processing an entire corpus. This function allows user to test prompt development.\n" |
| 244 | + ] |
| 245 | + }, |
| 246 | + { |
| 247 | + "cell_type": "code", |
| 248 | + "execution_count": null, |
| 249 | + "id": "f11439f8", |
| 250 | + "metadata": {}, |
| 251 | + "outputs": [], |
| 252 | + "source": [ |
| 253 | + "# Test with a single utterance using string input\n", |
| 254 | + "test_text = \"I absolutely love this new feature! It's amazing!\"\n", |
| 255 | + "result = sentiment_transformer.transform_single(test_text)\n", |
| 256 | + "print(f\"Input: {test_text}\")\n", |
| 257 | + "print(f\"Sentiment: {result.meta.get('gpt_sentiment', 'Not processed')}\")\n", |
| 258 | + "print(f\"Result type: {type(result)}\")\n", |
| 259 | + "print()\n" |
| 260 | + ] |
| 261 | + }, |
| 262 | + { |
| 263 | + "cell_type": "code", |
| 264 | + "execution_count": null, |
| 265 | + "id": "82858c34", |
| 266 | + "metadata": {}, |
| 267 | + "outputs": [], |
| 268 | + "source": [ |
| 269 | + "# Test with an actual utterance object\n", |
| 270 | + "from convokit import Utterance, Speaker\n", |
| 271 | + "\n", |
| 272 | + "test_utterance = Utterance(\n", |
| 273 | + " id=\"test_utt\",\n", |
| 274 | + " text=\"This is terrible! I hate it!\",\n", |
| 275 | + " speaker=Speaker(id=\"test_speaker\")\n", |
| 276 | + ")\n", |
| 277 | + "\n", |
| 278 | + "result = sentiment_transformer.transform_single(test_utterance)\n", |
| 279 | + "print(f\"Input utterance: {test_utterance.text}\")\n", |
| 280 | + "print(f\"Speaker: {test_utterance.speaker.id}\")\n", |
| 281 | + "print(f\"Sentiment: {result.meta.get('gpt_sentiment', 'Not processed')}\")\n", |
| 282 | + "print()\n" |
| 283 | + ] |
| 284 | + }, |
| 285 | + { |
| 286 | + "cell_type": "code", |
| 287 | + "execution_count": null, |
| 288 | + "id": "93446e06", |
| 289 | + "metadata": {}, |
| 290 | + "outputs": [], |
| 291 | + "source": [ |
| 292 | + "# Example: Testing conversation-level transformation\n", |
| 293 | + "conversation_transformer = LLMPromptTransformer(\n", |
| 294 | + " provider=\"gpt\",\n", |
| 295 | + " model=\"gpt-4o-mini\",\n", |
| 296 | + " object_level=\"conversation\",\n", |
| 297 | + " prompt=\"Summarize this conversation in one sentence: {formatted_object}\",\n", |
| 298 | + " formatter=lambda conv: \" \".join([utt.text for utt in conv.iter_utterances()]),\n", |
| 299 | + " metadata_name=\"conversation_summary\",\n", |
| 300 | + " config_manager=config\n", |
| 301 | + ")\n", |
| 302 | + "\n", |
| 303 | + "test_conv = corpus.get_conversation(corpus.get_conversation_ids()[0])\n", |
| 304 | + "result = conversation_transformer.transform_single(test_conv)\n", |
| 305 | + "print(f\"Conversation ID: {test_conv.id}\")\n", |
| 306 | + "print(f\"Summary: {result.meta.get('conversation_summary', 'Not processed')}\")\n", |
| 307 | + "print()\n" |
| 308 | + ] |
235 | 309 | } |
236 | 310 | ], |
237 | 311 | "metadata": { |
|
0 commit comments