@@ -381,7 +381,7 @@ def test_stream_reasoning_summary() -> None:
381
381
assert isinstance (response_2 , AIMessage )
382
382
383
383
384
- # TODO: VCR some of these
384
+ @ pytest . mark . vcr
385
385
def test_code_interpreter () -> None :
386
386
llm = ChatOpenAI (model = "o4-mini" , use_responses_api = True )
387
387
llm_with_tools = llm .bind_tools (
@@ -420,8 +420,8 @@ def test_code_interpreter() -> None:
420
420
_ = llm_with_tools .invoke ([input_message , full , next_message ])
421
421
422
422
423
+ @pytest .mark .vcr
423
424
def test_mcp_builtin () -> None :
424
- pytest .skip () # TODO: set up VCR
425
425
llm = ChatOpenAI (model = "o4-mini" , use_responses_api = True )
426
426
427
427
llm_with_tools = llm .bind_tools (
@@ -434,10 +434,14 @@ def test_mcp_builtin() -> None:
434
434
}
435
435
]
436
436
)
437
- response = llm_with_tools .invoke (
438
- "What transport protocols does the 2025-03-26 version of the MCP spec "
439
- "(modelcontextprotocol/modelcontextprotocol) support?"
440
- )
437
+ input_message = {
438
+ "role" : "user" ,
439
+ "content" : (
440
+ "What transport protocols does the 2025-03-26 version of the MCP spec "
441
+ "support?"
442
+ ),
443
+ }
444
+ response = llm_with_tools .invoke ([input_message ])
441
445
442
446
approval_message = HumanMessage (
443
447
[
@@ -453,6 +457,10 @@ def test_mcp_builtin() -> None:
453
457
_ = llm_with_tools .invoke (
454
458
[approval_message ], previous_response_id = response .response_metadata ["id" ]
455
459
)
460
+ # Zero-data retention (e.g., as below) requires change in output format.
461
+ # _ = llm_with_tools.invoke(
462
+ # [input_message, response, approval_message]
463
+ # )
456
464
457
465
458
466
@pytest .mark .vcr ()
0 commit comments