Skip to content

Commit 3b7538d

Browse files
committed
Backport PR ipython#14846: Add request number and logging (do not use print)
1 parent b30c77c commit 3b7538d

File tree

5 files changed

+125
-7
lines changed

5 files changed

+125
-7
lines changed

IPython/core/tests/fake_llm.py

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
import asyncio
2+
3+
try:
4+
from jupyter_ai_magics import BaseProvider
5+
from langchain_community.llms import FakeListLLM
6+
except ImportError:
7+
8+
class BaseProvider:
9+
pass
10+
11+
class FakeListLLM:
12+
pass
13+
14+
15+
FIBONACCI = """\
16+
def fib(n):
17+
if n < 2: return n
18+
return fib(n - 1) + fib(n - 2)
19+
"""
20+
21+
22+
class FibonacciCompletionProvider(BaseProvider, FakeListLLM): # type: ignore[misc, valid-type]
23+
24+
id = "my_provider"
25+
name = "My Provider"
26+
model_id_key = "model"
27+
models = ["model_a"]
28+
29+
def __init__(self, **kwargs):
30+
kwargs["responses"] = ["This fake response will not be used for completion"]
31+
kwargs["model_id"] = "model_a"
32+
super().__init__(**kwargs)
33+
34+
async def generate_inline_completions(self, request):
35+
raise ValueError("IPython only supports streaming models.")
36+
37+
async def stream_inline_completions(self, request):
38+
from jupyter_ai.completions.models import (
39+
InlineCompletionList,
40+
InlineCompletionReply,
41+
)
42+
43+
assert request.number > 0
44+
token = f"t{request.number}s0"
45+
last_line = request.prefix.rstrip("\n").splitlines()[-1]
46+
47+
if not FIBONACCI.startswith(last_line):
48+
return
49+
50+
yield InlineCompletionReply(
51+
list=InlineCompletionList(
52+
items=[
53+
{"insertText": "", "isIncomplete": True, "token": token},
54+
]
55+
),
56+
reply_to=request.number,
57+
)
58+
59+
async for reply in self._stream(
60+
FIBONACCI[len(last_line) :],
61+
request.number,
62+
token,
63+
):
64+
yield reply
65+
66+
async def _stream(self, sentence, request_number, token, start_with=""):
67+
from jupyter_ai.completions.models import InlineCompletionStreamChunk
68+
69+
suggestion = start_with
70+
71+
for fragment in sentence.split(" "):
72+
await asyncio.sleep(0.05)
73+
if suggestion:
74+
suggestion += " "
75+
suggestion += fragment
76+
yield InlineCompletionStreamChunk(
77+
type="stream",
78+
response={"insertText": suggestion, "token": token},
79+
reply_to=request_number,
80+
done=False,
81+
)
82+
83+
# finally, send a message confirming that we are done
84+
yield InlineCompletionStreamChunk(
85+
type="stream",
86+
response={"insertText": suggestion, "token": token},
87+
reply_to=request_number,
88+
done=True,
89+
)

IPython/terminal/interactiveshell.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -513,7 +513,6 @@ def _set_autosuggestions(self, provider=None):
513513
name = self.llm_prefix_from_history
514514

515515
if name == "no_prefix":
516-
print("set tofun1", self.llm_prefix_from_history)
517516

518517
def no_prefix(history_manager):
519518
return ""

IPython/terminal/shortcuts/auto_suggest.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,7 @@ def __init__(self):
185185
self.skip_lines = 0
186186
self._connected_apps = []
187187
self._llm_provider = None
188+
self._request_number = 0
188189

189190
def reset_history_position(self, _: Buffer):
190191
self.skip_lines = 0
@@ -350,7 +351,7 @@ async def error_catcher(buffer):
350351
try:
351352
await self._trigger_llm_core(buffer)
352353
except Exception as e:
353-
get_ipython().log.error("error")
354+
get_ipython().log.error("error %s", e)
354355
raise
355356

356357
# here we need a cancellable task so we can't just await the error catched
@@ -365,9 +366,8 @@ async def _trigger_llm_core(self, buffer: Buffer):
365366
provider to stream it's response back to us iteratively setting it as
366367
the suggestion on the current buffer.
367368
368-
Unlike with JupyterAi, as we do not have multiple cell, the cell number
369-
is always set to `0`, note that we _could_ set it to a new number each
370-
time and ignore threply from past numbers.
369+
Unlike with JupyterAi, as we do not have multiple cell, the cell id
370+
is always set to `None`.
371371
372372
We set the prefix to the current cell content, but could also inset the
373373
rest of the history or even just the non-fail history.
@@ -389,10 +389,12 @@ async def _trigger_llm_core(self, buffer: Buffer):
389389

390390
hm = buffer.history.shell.history_manager
391391
prefix = self._llm_prefixer(hm)
392-
print(prefix)
392+
get_ipython().log.debug("prefix: %s", prefix)
393393

394+
self._request_number += 1
395+
request_number = self._request_number
394396
request = jai_models.InlineCompletionRequest(
395-
number=0,
397+
number=request_number,
396398
prefix=prefix + buffer.document.text,
397399
suffix="",
398400
mime="text/x-python",
@@ -405,6 +407,9 @@ async def _trigger_llm_core(self, buffer: Buffer):
405407
async for reply_and_chunks in self._llm_provider.stream_inline_completions(
406408
request
407409
):
410+
if self._request_number != request_number:
411+
# If a new suggestion was requested, skip processing this one.
412+
return
408413
if isinstance(reply_and_chunks, jai_models.InlineCompletionReply):
409414
if len(reply_and_chunks.list.items) > 1:
410415
raise ValueError(

IPython/terminal/tests/test_shortcuts.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,14 @@
77
accept_word,
88
accept_and_keep_cursor,
99
discard,
10+
llm_autosuggestion,
1011
NavigableAutoSuggestFromHistory,
1112
swap_autosuggestion_up,
1213
swap_autosuggestion_down,
1314
)
1415
from IPython.terminal.shortcuts.auto_match import skip_over
1516
from IPython.terminal.shortcuts import create_ipython_shortcuts, reset_search_buffer
17+
from IPython.testing import decorators as dec
1618

1719
from prompt_toolkit.history import InMemoryHistory
1820
from prompt_toolkit.buffer import Buffer
@@ -41,6 +43,26 @@ def make_event(text, cursor, suggestion):
4143
return event
4244

4345

46+
try:
47+
from .fake_llm import FIBONACCI
48+
except ImportError:
49+
FIBONACCI = None
50+
51+
52+
@dec.skip_without("jupyter_ai")
53+
@pytest.mark.asyncio
54+
async def test_llm_autosuggestion():
55+
provider = NavigableAutoSuggestFromHistory()
56+
ip = get_ipython()
57+
ip.auto_suggest = provider
58+
ip.llm_provider_class = "tests.fake_llm.FibonacciCompletionProvider"
59+
text = "def fib"
60+
event = make_event(text, len(text), "")
61+
event.current_buffer.history.shell.history_manager.get_range = Mock(return_value=[])
62+
await llm_autosuggestion(event)
63+
assert event.current_buffer.suggestion.text == FIBONACCI[len(text) :]
64+
65+
4466
@pytest.mark.parametrize(
4567
"text, suggestion, expected",
4668
[
@@ -226,6 +248,7 @@ def test_other_providers():
226248
assert swap_autosuggestion_down(event) is None
227249

228250

251+
@pytest.mark.asyncio
229252
async def test_navigable_provider():
230253
provider = NavigableAutoSuggestFromHistory()
231254
history = InMemoryHistory(history_strings=["very_a", "very", "very_b", "very_c"])
@@ -278,6 +301,7 @@ def get_suggestion():
278301
assert get_suggestion().text == "_a"
279302

280303

304+
@pytest.mark.asyncio
281305
async def test_navigable_provider_multiline_entries():
282306
provider = NavigableAutoSuggestFromHistory()
283307
history = InMemoryHistory(history_strings=["very_a\nvery_b", "very_c"])

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@ test = [
104104
test_extra = [
105105
"ipython[test]",
106106
"curio",
107+
"jupyter_ai",
107108
"matplotlib!=3.2.0",
108109
"nbformat",
109110
"numpy>=1.23",

0 commit comments

Comments
 (0)