Skip to content

Commit dd14727

Browse files
authored
Backport PR ipython#14846 on branch 8.x (Add request number and logging (do not use print)) (ipython#14854)
Backport PR ipython#14846 on branch 8.x (Add request number and logging (do not use print))
2 parents 5cee6df + 54e08a1 commit dd14727

File tree

5 files changed

+125
-7
lines changed

5 files changed

+125
-7
lines changed

IPython/terminal/interactiveshell.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -513,7 +513,6 @@ def _set_autosuggestions(self, provider=None):
513513
name = self.llm_prefix_from_history
514514

515515
if name == "no_prefix":
516-
print("set tofun1", self.llm_prefix_from_history)
517516

518517
def no_prefix(history_manager):
519518
return ""

IPython/terminal/shortcuts/auto_suggest.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,7 @@ def __init__(self):
181181
self.skip_lines = 0
182182
self._connected_apps = []
183183
self._llm_provider = None
184+
self._request_number = 0
184185

185186
def reset_history_position(self, _: Buffer):
186187
self.skip_lines = 0
@@ -346,7 +347,7 @@ async def error_catcher(buffer):
346347
try:
347348
await self._trigger_llm_core(buffer)
348349
except Exception as e:
349-
get_ipython().log.error("error")
350+
get_ipython().log.error("error %s", e)
350351
raise
351352

352353
# here we need a cancellable task so we can't just await the error caught
@@ -361,9 +362,8 @@ async def _trigger_llm_core(self, buffer: Buffer):
361362
provider to stream it's response back to us iteratively setting it as
362363
the suggestion on the current buffer.
363364
364-
Unlike with JupyterAi, as we do not have multiple cell, the cell number
365-
is always set to `0`, note that we _could_ set it to a new number each
366-
time and ignore threply from past numbers.
365+
Unlike with JupyterAi, as we do not have multiple cell, the cell id
366+
is always set to `None`.
367367
368368
We set the prefix to the current cell content, but could also inset the
369369
rest of the history or even just the non-fail history.
@@ -385,10 +385,12 @@ async def _trigger_llm_core(self, buffer: Buffer):
385385

386386
hm = buffer.history.shell.history_manager
387387
prefix = self._llm_prefixer(hm)
388-
print(prefix)
388+
get_ipython().log.debug("prefix: %s", prefix)
389389

390+
self._request_number += 1
391+
request_number = self._request_number
390392
request = jai_models.InlineCompletionRequest(
391-
number=0,
393+
number=request_number,
392394
prefix=prefix + buffer.document.text,
393395
suffix="",
394396
mime="text/x-python",
@@ -401,6 +403,9 @@ async def _trigger_llm_core(self, buffer: Buffer):
401403
async for reply_and_chunks in self._llm_provider.stream_inline_completions(
402404
request
403405
):
406+
if self._request_number != request_number:
407+
# If a new suggestion was requested, skip processing this one.
408+
return
404409
if isinstance(reply_and_chunks, jai_models.InlineCompletionReply):
405410
if len(reply_and_chunks.list.items) > 1:
406411
raise ValueError(

IPython/terminal/tests/fake_llm.py

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
import asyncio
2+
3+
try:
4+
from jupyter_ai_magics import BaseProvider
5+
from langchain_community.llms import FakeListLLM
6+
except ImportError:
7+
8+
class BaseProvider:
9+
pass
10+
11+
class FakeListLLM:
12+
pass
13+
14+
15+
FIBONACCI = """\
16+
def fib(n):
17+
if n < 2: return n
18+
return fib(n - 1) + fib(n - 2)
19+
"""
20+
21+
22+
class FibonacciCompletionProvider(BaseProvider, FakeListLLM): # type: ignore[misc, valid-type]
23+
24+
id = "my_provider"
25+
name = "My Provider"
26+
model_id_key = "model"
27+
models = ["model_a"]
28+
29+
def __init__(self, **kwargs):
30+
kwargs["responses"] = ["This fake response will not be used for completion"]
31+
kwargs["model_id"] = "model_a"
32+
super().__init__(**kwargs)
33+
34+
async def generate_inline_completions(self, request):
35+
raise ValueError("IPython only supports streaming models.")
36+
37+
async def stream_inline_completions(self, request):
38+
from jupyter_ai.completions.models import (
39+
InlineCompletionList,
40+
InlineCompletionReply,
41+
)
42+
43+
assert request.number > 0
44+
token = f"t{request.number}s0"
45+
last_line = request.prefix.rstrip("\n").splitlines()[-1]
46+
47+
if not FIBONACCI.startswith(last_line):
48+
return
49+
50+
yield InlineCompletionReply(
51+
list=InlineCompletionList(
52+
items=[
53+
{"insertText": "", "isIncomplete": True, "token": token},
54+
]
55+
),
56+
reply_to=request.number,
57+
)
58+
59+
async for reply in self._stream(
60+
FIBONACCI[len(last_line) :],
61+
request.number,
62+
token,
63+
):
64+
yield reply
65+
66+
async def _stream(self, sentence, request_number, token, start_with=""):
67+
from jupyter_ai.completions.models import InlineCompletionStreamChunk
68+
69+
suggestion = start_with
70+
71+
for fragment in sentence.split(" "):
72+
await asyncio.sleep(0.05)
73+
if suggestion:
74+
suggestion += " "
75+
suggestion += fragment
76+
yield InlineCompletionStreamChunk(
77+
type="stream",
78+
response={"insertText": suggestion, "token": token},
79+
reply_to=request_number,
80+
done=False,
81+
)
82+
83+
# finally, send a message confirming that we are done
84+
yield InlineCompletionStreamChunk(
85+
type="stream",
86+
response={"insertText": suggestion, "token": token},
87+
reply_to=request_number,
88+
done=True,
89+
)

IPython/terminal/tests/test_shortcuts.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,14 @@
77
accept_word,
88
accept_and_keep_cursor,
99
discard,
10+
llm_autosuggestion,
1011
NavigableAutoSuggestFromHistory,
1112
swap_autosuggestion_up,
1213
swap_autosuggestion_down,
1314
)
1415
from IPython.terminal.shortcuts.auto_match import skip_over
1516
from IPython.terminal.shortcuts import create_ipython_shortcuts, reset_search_buffer
17+
from IPython.testing import decorators as dec
1618

1719
from prompt_toolkit.history import InMemoryHistory
1820
from prompt_toolkit.buffer import Buffer
@@ -41,6 +43,26 @@ def make_event(text, cursor, suggestion):
4143
return event
4244

4345

46+
try:
47+
from .fake_llm import FIBONACCI
48+
except ImportError:
49+
FIBONACCI = None
50+
51+
52+
@dec.skip_without("jupyter_ai")
53+
@pytest.mark.asyncio
54+
async def test_llm_autosuggestion():
55+
provider = NavigableAutoSuggestFromHistory()
56+
ip = get_ipython()
57+
ip.auto_suggest = provider
58+
ip.llm_provider_class = "IPython.terminal.tests.fake_llm.FibonacciCompletionProvider"
59+
text = "def fib"
60+
event = make_event(text, len(text), "")
61+
event.current_buffer.history.shell.history_manager.get_range = Mock(return_value=[])
62+
await llm_autosuggestion(event)
63+
assert event.current_buffer.suggestion.text == FIBONACCI[len(text) :]
64+
65+
4466
@pytest.mark.parametrize(
4567
"text, suggestion, expected",
4668
[
@@ -226,6 +248,7 @@ def test_other_providers():
226248
assert swap_autosuggestion_down(event) is None
227249

228250

251+
@pytest.mark.asyncio
229252
async def test_navigable_provider():
230253
provider = NavigableAutoSuggestFromHistory()
231254
history = InMemoryHistory(history_strings=["very_a", "very", "very_b", "very_c"])
@@ -278,6 +301,7 @@ def get_suggestion():
278301
assert get_suggestion().text == "_a"
279302

280303

304+
@pytest.mark.asyncio
281305
async def test_navigable_provider_multiline_entries():
282306
provider = NavigableAutoSuggestFromHistory()
283307
history = InMemoryHistory(history_strings=["very_a\nvery_b", "very_c"])

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@ test = [
104104
test_extra = [
105105
"ipython[test]",
106106
"curio",
107+
"jupyter_ai",
107108
"matplotlib!=3.2.0",
108109
"nbformat",
109110
"numpy>=1.23",

0 commit comments

Comments
 (0)