Skip to content

Commit b900d6a

Browse files
committed
Temporarily restore Python 3.7 compatibility
I'll roll this back after the release. This reverts commit b86fb1d.
1 parent 3890bc2 commit b900d6a

File tree

6 files changed

+42
-17
lines changed

6 files changed

+42
-17
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
Gptcmd allows you to interact with large language models, such as OpenAI's GPT, efficiently in your terminal. Gptcmd can manage multiple concurrent "threads" of conversation, allowing for free and easy prompt experimentation and iteration. Individual messages can be manipulated, loaded from, and saved to files (both plain text and JSON), and API parameters are fully customizable. In short, Gptcmd is simple yet flexible, useful for both basic conversation and more involved prototyping.
33

44
## Getting started
5-
Gptcmd requires [Python](https://python.org) 3.8.6 or later. It is available on PyPI, and can, for instance, be installed with `pip install gptcmd` at a command line shell. Running `gptcmd` at a shell starts the application. If Python's `bin` or `scripts` directory isn't on your path, you may need to launch the application with a command like `~/.local/bin/gptcmd` (depending on your system configuration). In most cases though, `gptcmd` should "just work".
5+
Gptcmd requires [Python](https://python.org) 3.7.1. Python 3.8.6 or later is strongly recommended and will be required to run future releases. Gptcmd is available on PyPI, and can, for instance, be installed with `pip install gptcmd` at a command line shell. Running `gptcmd` at a shell starts the application. If Python's `bin` or `scripts` directory isn't on your path, you may need to launch the application with a command like `~/.local/bin/gptcmd` (depending on your system configuration). In most cases though, `gptcmd` should "just work".
66

77
If you'd like to use OpenAI models and you don't have an OpenAI account, you'll need to create one and [add some credit](https://platform.openai.com/account/billing/overview). $5 or so goes very far, [especially on `gpt-4o-mini`](#model-selection).
88

pyproject.toml

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,16 +9,20 @@ authors = [
99
]
1010
description = "Command line GPT conversation and experimentation environment"
1111
readme = "README.md"
12-
requires-python = ">=3.8.6"
12+
requires-python = ">=3.7.1"
1313
classifiers = [
1414
"Programming Language :: Python :: 3",
1515
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
1616
"Operating System :: OS Independent",
1717
]
1818
dependencies = [
19-
"openai>=1.54.0, < 2.0.0",
20-
"tomli>=1.1.0, < 2.0.0 ; python_version < '3.11'",
21-
"backports.strenum>=1.3.1, < 2.0.0 ; python_version < '3.11'",
19+
# openai/openai-python#1784: versions 1.40.0 and above fail to install on Python 3.7
20+
"openai>=1.26.0, <2.0.0 ; python_version >= '3.8'",
21+
"openai>=1.26.0, <1.40.0 ; python_version < '3.8'",
22+
"tomli>=1.1.0 ; python_version < '3.11'",
23+
"backports.strenum>=1.3.1, <2.0.0 ; python_full_version >= '3.8.6' and python_version < '3.11'",
24+
"StrEnum==0.4.15 ; python_full_version < '3.8.6'",
25+
"importlib-metadata>=1.0.0, < 9.0.0 ; python_version < '3.8'"
2226
]
2327
dynamic = ["version"]
2428

@@ -37,5 +41,5 @@ version = {attr = "gptcmd.cli.__version__"}
3741

3842
[tool.black]
3943
line-length = 79
40-
target-version = ['py38']
44+
target-version = ['py37']
4145
preview=true

src/gptcmd/cli.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1133,7 +1133,6 @@ def _edit_interactively(
11331133
mode="w",
11341134
delete=False,
11351135
encoding="utf-8",
1136-
errors="ignore",
11371136
) as cam:
11381137
cam.write(initial_text)
11391138
tempname = cam.name

src/gptcmd/config.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,13 @@
1515
import shlex
1616
import shutil
1717
from importlib import resources
18-
from importlib.metadata import entry_points
1918
from typing import Dict, List, Optional, Type
2019

20+
if sys.version_info >= (3, 8):
21+
from importlib.metadata import entry_points
22+
else:
23+
from importlib_metadata import entry_points
24+
2125
if sys.version_info >= (3, 11):
2226
import tomllib
2327
else:

src/gptcmd/llm/openai.py

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -198,13 +198,18 @@ def complete(self, messages: Sequence[Message]) -> LLMResponse:
198198
)
199199
choice = resp.choices[0]
200200
prompt_tokens = resp.usage.prompt_tokens
201+
# Older versions of the openai package return
202+
# prompt_tokens_details as a dict, and newer versions return it as
203+
# a custom type or None.
204+
# Standardize on a dict representation.
201205
prompt_tokens_details = getattr(
202206
resp.usage, "prompt_tokens_details", None
203207
)
204208
if prompt_tokens_details is None:
205-
cached_prompt_tokens = 0
209+
prompt_tokens_details = {}
206210
else:
207-
cached_prompt_tokens = prompt_tokens_details.cached_tokens
211+
prompt_tokens_details = dict(prompt_tokens_details)
212+
cached_prompt_tokens = prompt_tokens_details.get("cached_tokens", 0)
208213
sampled_tokens = resp.usage.completion_tokens
209214

210215
return LLMResponse(
@@ -320,13 +325,20 @@ def __next__(self):
320325
raise CompletionError(str(e)) from e
321326
if chunk.usage:
322327
prompt_tokens = chunk.usage.prompt_tokens
328+
# Older versions of the openai package return
329+
# prompt_tokens_details as a dict, and newer versions return it as
330+
# a custom type or None.
331+
# Standardize on a dict representation.
323332
prompt_tokens_details = getattr(
324333
chunk.usage, "prompt_tokens_details", None
325334
)
326335
if prompt_tokens_details is None:
327-
cached_prompt_tokens = 0
336+
prompt_tokens_details = {}
328337
else:
329-
cached_prompt_tokens = prompt_tokens_details.cached_tokens
338+
prompt_tokens_details = dict(prompt_tokens_details)
339+
cached_prompt_tokens = prompt_tokens_details.get(
340+
"cached_tokens", 0
341+
)
330342
sampled_tokens = chunk.usage.completion_tokens
331343
self.prompt_tokens = prompt_tokens
332344
self.sampled_tokens = sampled_tokens

src/gptcmd/message.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
import sys
1515
from abc import ABC, abstractmethod
1616
from collections.abc import Sequence
17-
from enum import auto
1817
from typing import (
1918
Any,
2019
Callable,
@@ -30,8 +29,10 @@
3029

3130
if sys.version_info >= (3, 11):
3231
from enum import StrEnum
33-
else:
32+
elif sys.version_info >= (3, 8, 6):
3433
from backports.strenum import StrEnum
34+
else:
35+
from strenum import StrEnum
3536

3637

3738
T = TypeVar("T")
@@ -192,9 +193,14 @@ class MessageRole(StrEnum):
192193
Message objects
193194
"""
194195

195-
USER = auto()
196-
ASSISTANT = auto()
197-
SYSTEM = auto()
196+
# Don't use enum.auto() to define member values since its behaviour
197+
# differs between strenum and enum.
198+
# When gptcmd no longer supports Python 3.7, we can migrate to the
199+
# backports.strenum package.
200+
201+
USER = "user"
202+
ASSISTANT = "assistant"
203+
SYSTEM = "system"
198204

199205

200206
@dataclasses.dataclass

0 commit comments

Comments
 (0)