diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 299caee..ce6edd8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,3 +11,5 @@ repos: - id: ruff args: [ --fix ] - id: ruff-format +ci: + autoupdate_schedule: quarterly diff --git a/README.md b/README.md index cf314d0..9fa1ec8 100644 --- a/README.md +++ b/README.md @@ -4,17 +4,16 @@ [![Project License](https://img.shields.io/github/license/spyder-ide/langchain-provider)](./LICENSE) [![Join the chat at https://gitter.im/spyder-ide/public](https://badges.gitter.im/spyder-ide/spyder.svg)](https://gitter.im/spyder-ide/public) -[![OpenCollective Backers](https://opencollective.com/spyder/backers/badge.svg?color=blue)](#backers) +[![OpenCollective Backers](https://opencollective.com/spyder/backers/badge.svg?color=blue)](#sponsors) [![OpenCollective Sponsors](https://opencollective.com/spyder/sponsors/badge.svg?color=blue)](#sponsors) ----- +--- # Overview - ## Installation -To use this completions provider you will need to install Spyder 6 (at least 6.0.0a3) +To use this completions provider you will need to install Spyder 6 (at least 6.1.0) To install the provider package from source, you can use `pip` with something like: @@ -24,7 +23,32 @@ Or from PyPI something like: pip install langchain-provider -Also, you need to have a OpenAI API key, which you can get from [here](https://platform.openai.com/signup) and then set it as a environment variable (`OPENAI_API_KEY`). +Also, you need to set the environment variable `OPENAI_API_KEY`. In case +you are actually using the OpenAI API you can get it from [here](https://platform.openai.com/signup) +but for usage with, for example, local LLMs (via things like [LMStudio](https://lmstudio.ai)) +you will need to set it with a corresponding valid value. + +Depending on the API you are using and options to configure available, defining the +following JSON schema for the model structured output could be useful: + +```json +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Generated schema for responses", + "type": "object", + "properties": { + "suggestions": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "suggestions" + ] +} +``` ## Preview @@ -32,7 +56,7 @@ Also, you need to have a OpenAI API key, which you can get from [here](https://p ## Configuration -To configure the provider number of suggestions (1 - 10) or the model to use (`gpt-3.5-turbo`, or `gpt4`) you can click on the Langchain status bar and click the `Change provider parameters` menu entry: +To configure the provider number of suggestions (1 - 10), the model to use and API URL you can click on the Langchain status bar and then the `Change provider parameters` menu entry: ![langchain provider config](https://raw.githubusercontent.com/spyder-ide/langchain-provider/master/langchain-provider-config.gif) @@ -68,8 +92,7 @@ and the donations we have received from our users around the world through [Open [Spyder Github](https://github.com/spyder-ide/spyder) -[Troubleshooting Guide and FAQ]( -https://github.com/spyder-ide/spyder/wiki/Troubleshooting-Guide-and-FAQ) +[Troubleshooting Guide and FAQ](https://github.com/spyder-ide/spyder/wiki/Troubleshooting-Guide-and-FAQ) [Development Wiki](https://github.com/spyder-ide/spyder/wiki/Dev:-Index) diff --git a/langchain-provider-config.gif b/langchain-provider-config.gif index 2466c66..4a03b85 100644 Binary files a/langchain-provider-config.gif and b/langchain-provider-config.gif differ diff --git a/langchain-provider.gif b/langchain-provider.gif index 076a0c2..830675b 100644 Binary files a/langchain-provider.gif and b/langchain-provider.gif differ diff --git a/langchain_provider/client.py b/langchain_provider/client.py index 581e448..f61bab6 100644 --- a/langchain_provider/client.py +++ b/langchain_provider/client.py @@ -6,18 +6,19 @@ """Langchain completions HTTP client.""" # Standard library imports -import json import logging # Third party imports from langchain_community.chat_models import ChatOpenAI -from langchain.chains import LLMChain -from langchain.prompts.chat import ( +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.prompts import ( ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) +from pydantic import BaseModel, Field from qtpy.QtCore import QObject, QThread, Signal, QMutex, Slot +from qtpy.QtGui import QTextCursor # Spyder imports from spyder.plugins.completion.api import CompletionRequestTypes, CompletionItemKind @@ -30,6 +31,12 @@ LANG_ICON_SCALE = 1 +class Suggestions(BaseModel): + """Completion suggestions list.""" + + suggestions: list[str] = Field(description="List of completion suggestions strings") + + class LangchainClient(QObject): sig_response_ready = Signal(int, dict) sig_client_started = Signal() @@ -39,7 +46,7 @@ class LangchainClient(QObject): sig_status_response_ready = Signal((str,), (dict,)) sig_onboarding_response_ready = Signal(str) - def __init__(self, parent, template, model_name, language="python"): + def __init__(self, parent, template, model_name, api_url, language="python"): QObject.__init__(self, parent) self.requests = {} self.language = language @@ -55,6 +62,7 @@ def __init__(self, parent, template, model_name, language="python"): self.template = template self.model_name = model_name + self.api_url = api_url self.chain = None def start(self): @@ -70,14 +78,12 @@ def start(self): llm = ChatOpenAI( temperature=0, model_name=self.model_name, + base_url=self.api_url, ) chat_prompt = ChatPromptTemplate.from_messages( [system_message_prompt, code_message_prompt] ) - chain = LLMChain( - llm=llm, - prompt=chat_prompt, - ) + chain = chat_prompt | llm | JsonOutputParser(pydantic_object=Suggestions) self.chain = chain self.sig_client_started.emit() except ValueError as e: @@ -97,9 +103,10 @@ def stop(self): self.thread.wait() self.thread_started = False - def update_configuration(self, model_name, template): + def update_configuration(self, model_name, api_url, template): self.stop() self.model_name = model_name + self.api_url = api_url self.template = template self.start() @@ -110,24 +117,28 @@ def get_status(self, filename): langchain_status = self.model_name self.sig_status_response_ready[str].emit(langchain_status) - def run_chain(self, params=None): - response = None - try: - prevResponse = self.chain.invoke(params)["text"] - if prevResponse[0] == '"': - response = json.loads("{" + prevResponse + "}") - else: - response = json.loads(prevResponse) - return response - except Exception: + def request_completions(self, params): + response = self.chain.invoke(params) + if not isinstance(response, dict) or "suggestions" not in response: + response = {"suggestions": []} self.sig_client_error.emit("No suggestions available") - return {"suggestions": []} - - def send(self, params): - response = None - response = self.run_chain(params=params) return response + def handle_completion_text(self, msg, completion): + codeeditor = msg["response_instance"] + cursor = codeeditor.textCursor() + current_pos = cursor.position() + cursor.movePosition(QTextCursor.StartOfBlock, QTextCursor.KeepAnchor) + current_line = cursor.selectedText() + cursor.setPosition(current_pos) + codeeditor.setTextCursor(cursor) + current_word = msg["current_word"] + if current_word: + current_line = current_line.removesuffix(current_word) + completion_text = completion.removeprefix(current_line).replace("/", "\\/") + + return completion_text + @Slot(dict) def handle_msg(self, message): """Handle one message""" @@ -138,24 +149,24 @@ def handle_msg(self, message): elif msg_type == CompletionRequestTypes.DOCUMENT_DID_CHANGE: self.opened_files[msg["file"]] = msg["text"] elif msg_type == CompletionRequestTypes.DOCUMENT_COMPLETION: - response = self.send(self.opened_files[msg["file"]]) + response = self.request_completions(self.opened_files[msg["file"]]) logger.debug(response) if response is None: return {"params": []} spyder_completions = [] completions = response["suggestions"] - if completions is not None: - for i, completion in enumerate(completions): - entry = { - "kind": CompletionItemKind.TEXT, - "label": completion, - "insertText": completion, - "filterText": "", - # Use the returned ordering - "sortText": (0, i), - "documentation": completion, - "provider": LANG_COMPLETION, - "icon": ("langchain", LANG_ICON_SCALE), - } - spyder_completions.append(entry) + for i, completion in enumerate(completions): + completion_text = self.handle_completion_text(msg, completion) + entry = { + "kind": CompletionItemKind.TEXT, + "label": completion, + "insertText": completion_text, + "filterText": "", + # Use the returned ordering + "sortText": (0, i), + "documentation": completion, + "provider": LANG_COMPLETION, + "icon": ("langchain", LANG_ICON_SCALE), + } + spyder_completions.append(entry) self.sig_response_ready.emit(_id, {"params": spyder_completions}) diff --git a/langchain_provider/provider.py b/langchain_provider/provider.py index 396fd1d..fe06f39 100644 --- a/langchain_provider/provider.py +++ b/langchain_provider/provider.py @@ -30,22 +30,29 @@ class LangchainProvider(SpyderCompletionProvider): COMPLETION_PROVIDER_NAME = "langchain" DEFAULT_ORDER = 1 SLOW = True - CONF_VERSION = "1.0.0" + CONF_VERSION = "2.0.0" CONF_DEFAULTS = [ ("suggestions", 4), ("language", "Python"), - ("model_name", "gpt-3.5-turbo"), + ("model_name", "No model"), + ("api_url", "https://api.openai.com/v1"), + ( + "template", + """You are a helpful assistant in completing following {language} code based +on the previous sentence. + +You always give {num_suggestions} suggestions. + +Example : a=3 b=4 print +AI : "suggestions": ["print(a)", "print(b)", "print(a+b)"] +Example : a=3 b=4 c +AI : "suggestions": ["c=a+b", "c=a-b", "c=5"] +Format the output as JSON with the following key: + suggestions + +""", + ), ] - TEMPLATE_PARAM = """You are a helpful assistant in completing following {0} code based - on the previous sentence. - You always complete the code in same line and give {1} suggestions. - Example : a=3 b=4 print - AI : "suggestions": ["print(a)", "print(b)", "print(a+b)"] - Example : a=3 b=4 c - AI : "suggestions": ["c=a+b", "c=a-b", "c=5"] - Format the output as JSON with the following key: - suggestions - """ def __init__(self, parent, config): super().__init__(parent, config) @@ -56,8 +63,10 @@ def __init__(self, parent, config): self.client = LangchainClient( None, model_name=self.get_conf("model_name"), - template=self.TEMPLATE_PARAM.format( - self.get_conf("language"), self.get_conf("suggestions") + api_url=self.get_conf("api_url"), + template=self.get_conf("template").format( + language=self.get_conf("language"), + num_suggestions=self.get_conf("suggestions"), ), ) @@ -128,8 +137,10 @@ def update_langchain_configuration(self, config): return self.client.update_configuration( self.get_conf("model_name"), - self.TEMPLATE_PARAM.format( - self.get_conf("language"), self.get_conf("suggestions") + self.get_conf("api_url"), + self.get_conf("template").format( + language=self.get_conf("language"), + num_suggestions=self.get_conf("suggestions"), ), ) diff --git a/langchain_provider/widgets/config_dialog.py b/langchain_provider/widgets/config_dialog.py index 3bc462e..099fea4 100644 --- a/langchain_provider/widgets/config_dialog.py +++ b/langchain_provider/widgets/config_dialog.py @@ -13,12 +13,13 @@ # Third party imports from qtpy.QtCore import Qt from qtpy.QtWidgets import ( - QComboBox, QDialog, QDialogButtonBox, QFormLayout, - QSpinBox, QHBoxLayout, + QLineEdit, + QSpinBox, + QTextEdit, QVBoxLayout, ) @@ -31,9 +32,9 @@ class LangchainConfigDialog(QDialog): def __init__(self, provider, parent=None): super().__init__(parent=parent) - self._provider = provider + self.setFixedSize(600, 400) self.setAttribute(Qt.WA_DeleteOnClose) self.setWindowTitle(_("Langchain provider configuration")) self.setModal(True) @@ -45,13 +46,23 @@ def __init__(self, provider, parent=None): self.suggestions_spinbox.setValue(provider.get_conf("suggestions")) model_label_text = _("Model name:") - self.model_combobox = QComboBox() - self.model_combobox.addItems(["gpt-3.5-turbo", "gpt-4"]) - self.model_combobox.setCurrentText(provider.get_conf("model_name")) + self.model_lineedit = QLineEdit() + self.model_lineedit.setText(provider.get_conf("model_name")) + + api_url_label_text = _("API URL:") + self.api_url_lineedit = QLineEdit() + self.api_url_lineedit.setText(provider.get_conf("api_url")) + + template_label_text = _("System prompt:") + self.template_textedit = QTextEdit() + self.template_textedit.setPlainText(provider.get_conf("template")) form_layout = QFormLayout() + form_layout.setFieldGrowthPolicy(QFormLayout.ExpandingFieldsGrow) form_layout.addRow(suggestions_label_text, self.suggestions_spinbox) - form_layout.addRow(model_label_text, self.model_combobox) + form_layout.addRow(model_label_text, self.model_lineedit) + form_layout.addRow(api_url_label_text, self.api_url_lineedit) + form_layout.addRow(template_label_text, self.template_textedit) bbox = QDialogButtonBox( QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Vertical, self @@ -70,6 +81,9 @@ def __init__(self, provider, parent=None): self.suggestions_spinbox.setFocus() def accept(self): - self._provider.set_conf("suggestions", self.suggestions_spinbox.value()) - self._provider.set_conf("model_name", self.model_combobox.currentText()) - super().accept() + if self.model_lineedit.text() and self.api_url_lineedit.text(): + self._provider.set_conf("suggestions", self.suggestions_spinbox.value()) + self._provider.set_conf("model_name", self.model_lineedit.text()) + self._provider.set_conf("api_url", self.api_url_lineedit.text()) + self._provider.set_conf("template", self.template_textedit.toPlainText()) + super().accept() diff --git a/pyproject.toml b/pyproject.toml index 72cba11..8cabf03 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,13 +5,21 @@ build-backend = "setuptools.build_meta" [project] name = "langchain-provider" version = "0.2.0.dev0" -description = "Langchain provider for Spyder" +description = "Langchain based completions provider for Spyder" authors = [ {name = "Spyder Project Contributors", email = "spyder.python@gmail.com"}, ] license = {text = "MIT license"} -dependencies = ["langchain", "langchain-community", "openai", "qtawesome", "qtpy", "spyder>=6.0.0a3"] -requires-python = ">= 3.8" +dependencies = [ + "langchain-core>=1.2.14,<1.3", + "langchain-community>=0.4.1,<1.0", + "openai>=2.21.0,<3.0", + "pydantic>=2.12.5", + "qtawesome", + "qtpy", + "spyder>=6.1.0" +] +requires-python = ">= 3.9" readme = "README.md" classifiers = [ "Development Status :: 3 - Alpha", @@ -20,9 +28,11 @@ classifiers = [ "Intended Audience :: Science/Research", "Operating System :: OS Independent", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Topic :: Scientific/Engineering", "Topic :: Text Editors :: Integrated Development Environments (IDE)", ]