Skip to content

Commit f161783

Browse files
committed
Linting/Formatting of main app files
1 parent 60e2a5c commit f161783

File tree

9 files changed

+231
-93
lines changed

9 files changed

+231
-93
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,4 +7,4 @@ run:
77
--queue-size 100
88

99
install:
10-
pip install -e .
10+
pip install -e .

README.md

Lines changed: 40 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,9 @@ The server supports six types of MLX models:
108108
109109
### Flux-Series Image Models
110110

111-
The server supports multiple Flux and Qwen model configurations for advanced image generation and editing:
111+
> **⚠️ Note:** Image generation and editing capabilities require installation of `mflux`: `pip install mlx-openai-server[image-generation]` or `pip install git+https://github.com/cubist38/mflux.git`
112+
113+
The server supports multiple Flux model configurations for advanced image generation and editing:
112114

113115
#### Image Generation Models
114116
- **`flux-schnell`** - Fast generation with 4 default steps, no guidance (best for quick iterations)
@@ -202,6 +204,9 @@ Follow these steps to set up the MLX-powered server:
202204
git clone https://github.com/cubist38/mlx-openai-server.git
203205
cd mlx-openai-server
204206
pip install -e .
207+
208+
# Optional: For image generation/editing support
209+
pip install -e .[image-generation]
205210
```
206211

207212
### Using Conda (Recommended)
@@ -236,6 +241,9 @@ For better environment management and to avoid architecture issues, we recommend
236241
git clone https://github.com/cubist38/mlx-openai-server.git
237242
cd mlx-openai-server
238243
pip install -e .
244+
245+
# Optional: For image generation/editing support
246+
pip install -e .[image-generation]
239247
```
240248

241249
### Optional Dependencies
@@ -253,15 +261,44 @@ pip install mlx-openai-server
253261
- All core API endpoints and functionality
254262

255263
#### Image Generation & Editing Support
256-
The server includes support for image generation and editing capabilities:
264+
For image generation and editing capabilities, install with the image-generation extra:
265+
266+
```bash
267+
# Install with image generation support
268+
pip install mlx-openai-server[image-generation]
269+
```
270+
271+
Or install manually:
272+
```bash
273+
# First install the base server
274+
pip install mlx-openai-server
275+
276+
# Then install mflux for image generation/editing support
277+
pip install git+https://github.com/cubist38/mflux.git
278+
```
257279

258-
**Additional features:**
280+
**Additional features with mflux:**
259281
- Image generation models (`--model-type image-generation`)
260282
- Image editing models (`--model-type image-edit`)
261283
- MLX Flux-series model support
262284
- Qwen Image model support
263285
- LoRA adapter support for fine-tuned generation and editing
264286

287+
#### Enhanced Caching Support
288+
For enhanced caching and performance when working with complex ML models and objects, install with the enhanced-caching extra:
289+
290+
```bash
291+
# Install with enhanced caching support
292+
pip install mlx-openai-server[enhanced-caching]
293+
```
294+
295+
This enables better serialization and caching of objects from:
296+
- spaCy (NLP processing)
297+
- regex (regular expressions)
298+
- tiktoken (tokenization)
299+
- torch (PyTorch tensors and models)
300+
- transformers (Hugging Face models)
301+
265302
#### Whisper Models Support
266303
For whisper models to work properly, you need to install ffmpeg:
267304

app/__init__.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1 @@
1-
import os
2-
from .version import __version__
3-
4-
# Suppress transformers warnings
5-
os.environ['TRANSFORMERS_VERBOSITY'] = 'error'
6-
7-
__all__ = ["__version__"]
1+
"""MLX OpenAI Server package."""

app/cli.py

Lines changed: 87 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -5,20 +5,22 @@
55
the ASGI server.
66
"""
77

8+
from __future__ import annotations
9+
810
import asyncio
911
import sys
1012

1113
import click
1214
from loguru import logger
1315

1416
from .config import MLXServerConfig
15-
from .parsers import REASONING_PARSER_MAP, TOOL_PARSER_MAP, UNIFIED_PARSER_MAP
16-
from .message_converters import MESSAGE_CONVERTER_MAP
1717
from .main import start
18+
from .message_converters import MESSAGE_CONVERTER_MAP
19+
from .parsers import REASONING_PARSER_MAP, TOOL_PARSER_MAP, UNIFIED_PARSER_MAP
1820
from .version import __version__
1921

2022

21-
class UpperChoice(click.Choice):
23+
class UpperChoice(click.Choice[str]):
2224
"""Case-insensitive choice type that returns uppercase values.
2325
2426
This small convenience subclass normalizes user input in a
@@ -27,7 +29,7 @@ class UpperChoice(click.Choice):
2729
where the internal representation is uppercased.
2830
"""
2931

30-
def normalize_choice(self, choice, ctx):
32+
def normalize_choice(self, choice: str | None, ctx: click.Context | None) -> str | None: # type: ignore[override]
3133
"""Return the canonical uppercase choice or raise BadParameter.
3234
3335
Parameters
@@ -76,20 +78,19 @@ def normalize_choice(self, choice, ctx):
7678
🚀 Version: %(version)s
7779
""",
7880
)
79-
def cli():
81+
def cli() -> None:
8082
"""Top-level Click command group for the MLX server CLI.
8183
8284
Subcommands (such as ``launch``) are registered on this group and
8385
invoked by the console entry point.
8486
"""
85-
pass
8687

8788

88-
@cli.command()
89+
@cli.command(help="Start the MLX OpenAI Server with the supplied flags")
8990
@click.option(
9091
"--model-path",
9192
required=True,
92-
help="Path to the model (required for lm, multimodal, embeddings, image-generation, image-edit, whisper model types). With `image-generation` or `image-edit` model types, it should be the local path to the model.",
93+
help="Path to the model (required for lm, multimodal, embeddings, image-generation, image-edit, whisper model types). Can be a local path or Hugging Face repository ID (e.g., 'blackforestlabs/FLUX.1-dev').",
9394
)
9495
@click.option(
9596
"--model-type",
@@ -121,7 +122,18 @@ def cli():
121122
@click.option(
122123
"--config-name",
123124
default=None,
124-
type=click.Choice(["flux-schnell", "flux-dev", "flux-krea-dev", "flux-kontext-dev", "qwen-image", "qwen-image-edit", "z-image-turbo", "fibo"]),
125+
type=click.Choice(
126+
[
127+
"flux-schnell",
128+
"flux-dev",
129+
"flux-krea-dev",
130+
"flux-kontext-dev",
131+
"qwen-image",
132+
"qwen-image-edit",
133+
"z-image-turbo",
134+
"fibo",
135+
]
136+
),
125137
help="Config name of the model. Only used for image-generation and image-edit models.",
126138
)
127139
@click.option(
@@ -198,37 +210,79 @@ def cli():
198210
help="Enable debug mode for language models. Only works with language models (lm) and multimodal models.",
199211
)
200212
def launch(
201-
model_path,
202-
model_type,
203-
context_length,
204-
port,
205-
host,
206-
max_concurrency,
207-
queue_timeout,
208-
queue_size,
209-
quantize,
210-
config_name,
211-
lora_paths,
212-
lora_scales,
213-
disable_auto_resize,
214-
log_file,
215-
no_log_file,
216-
log_level,
217-
enable_auto_tool_choice,
218-
tool_call_parser,
219-
reasoning_parser,
220-
message_converter,
221-
trust_remote_code,
222-
chat_template_file,
223-
debug,
213+
model_path: str,
214+
model_type: str,
215+
context_length: int,
216+
port: int,
217+
host: str,
218+
max_concurrency: int,
219+
queue_timeout: int,
220+
queue_size: int,
221+
quantize: int,
222+
config_name: str | None,
223+
lora_paths: str | None,
224+
lora_scales: str | None,
225+
disable_auto_resize: bool,
226+
log_file: str | None,
227+
no_log_file: bool,
228+
log_level: str,
229+
enable_auto_tool_choice: bool,
230+
tool_call_parser: str | None,
231+
reasoning_parser: str | None,
232+
message_converter: str | None,
233+
trust_remote_code: bool,
234+
chat_template_file: str | None,
235+
debug: bool,
224236
) -> None:
225237
"""Start the FastAPI/Uvicorn server with the supplied flags.
226238
227239
The command builds a server configuration object using
228240
``MLXServerConfig`` and then calls the async ``start`` routine
229241
which handles the event loop and server lifecycle.
230-
"""
231242
243+
Parameters
244+
----------
245+
model_path : str
246+
Path to the model (required for lm, multimodal, embeddings, image-generation, image-edit, whisper model types).
247+
model_type : str
248+
Type of model to run (lm, multimodal, image-generation, image-edit, embeddings, whisper).
249+
context_length : int
250+
Context length for language models.
251+
port : int
252+
Port to run the server on.
253+
host : str
254+
Host to run the server on.
255+
max_concurrency : int
256+
Maximum number of concurrent requests.
257+
queue_timeout : int
258+
Request timeout in seconds.
259+
queue_size : int
260+
Maximum queue size for pending requests.
261+
quantize : int
262+
Quantization level for the model.
263+
config_name : str or None
264+
Config name of the model.
265+
lora_paths : str or None
266+
Path to the LoRA file(s).
267+
lora_scales : str or None
268+
Scale factor for the LoRA file(s).
269+
disable_auto_resize : bool
270+
Disable automatic model resizing.
271+
log_file : str or None
272+
Path to log file.
273+
no_log_file : bool
274+
Disable file logging entirely.
275+
log_level : str
276+
Set the logging level.
277+
enable_auto_tool_choice : bool
278+
Enable automatic tool choice.
279+
tool_call_parser : str or None
280+
Specify tool call parser to use.
281+
reasoning_parser : str or None
282+
Specify reasoning parser to use.
283+
trust_remote_code : bool
284+
Enable trust_remote_code when loading models.
285+
"""
232286
args = MLXServerConfig(
233287
model_path=model_path,
234288
model_type=model_type,

app/config.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -49,15 +49,14 @@ class MLXServerConfig:
4949
lora_paths_str: str | None = None
5050
lora_scales_str: str | None = None
5151

52-
def __post_init__(self):
52+
def __post_init__(self) -> None:
5353
"""Normalize certain CLI fields after instantiation.
5454
5555
- Convert comma-separated ``lora_paths`` and ``lora_scales`` into
5656
lists when provided.
5757
- Apply small model-type-specific defaults for ``config_name``
5858
and emit warnings when values appear inconsistent.
5959
"""
60-
6160
# Process comma-separated LoRA paths and scales into lists (or None)
6261
if self.lora_paths_str:
6362
self.lora_paths = [p.strip() for p in self.lora_paths_str.split(",") if p.strip()]
@@ -76,11 +75,9 @@ def __post_init__(self):
7675
# image-edit model types. If missing for those types, set defaults.
7776
if self.config_name and self.model_type not in ["image-generation", "image-edit"]:
7877
logger.warning(
79-
"Config name parameter '%s' provided but model type is '%s'. "
78+
f"Config name parameter '{self.config_name}' provided but model type is '{self.model_type}'. "
8079
"Config name is only used with image-generation "
81-
"and image-edit models.",
82-
self.config_name,
83-
self.model_type,
80+
"and image-edit models."
8481
)
8582
elif self.model_type == "image-generation" and not self.config_name:
8683
logger.warning(

app/main.py

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -27,13 +27,19 @@
2727
from .version import __version__
2828

2929

30-
def print_startup_banner(config_args):
31-
"""Log a compact startup banner describing the selected config.
30+
def print_startup_banner(config_args: MLXServerConfig) -> None:
31+
"""
32+
Log a compact startup banner describing the selected config.
3233
3334
The function emits human-friendly log messages that summarize the
3435
runtime configuration (model path/type, host/port, concurrency,
3536
LoRA settings, and logging options). Intended for the user-facing
3637
startup output only.
38+
39+
Parameters
40+
----------
41+
config_args : MLXServerConfig
42+
Configuration object containing runtime settings to display.
3743
"""
3844
logger.info("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
3945
logger.info(f"✨ MLX Server v{__version__} Starting ✨")
@@ -80,12 +86,18 @@ def print_startup_banner(config_args):
8086

8187

8288
async def start(config: MLXServerConfig) -> None:
83-
"""Run the ASGI server using the provided configuration.
89+
"""
90+
Run the ASGI server using the provided configuration.
8491
8592
This coroutine wires the configuration into the server setup
8693
routine, logs progress, and starts the Uvicorn server. It handles
8794
KeyboardInterrupt and logs any startup failures before exiting the
8895
process with a non-zero code.
96+
97+
Parameters
98+
----------
99+
config : MLXServerConfig
100+
Configuration object for server setup.
89101
"""
90102
try:
91103
# Display startup information
@@ -100,19 +112,20 @@ async def start(config: MLXServerConfig) -> None:
100112
except KeyboardInterrupt:
101113
logger.info("Server shutdown requested by user. Exiting...")
102114
except Exception as e:
103-
logger.error(f"Server startup failed: {str(e)}")
115+
logger.error(f"Server startup failed. {type(e).__name__}: {e}")
104116
sys.exit(1)
105117

106118

107-
def main():
108-
"""Normalize process args and dispatch to the Click CLI.
119+
def main() -> None:
120+
"""
121+
Normalize process args and dispatch to the Click CLI.
109122
110123
This helper gathers command-line arguments, inserts the "launch"
111124
subcommand when a subcommand is omitted for backwards compatibility,
112125
and delegates execution to :func:`app.cli.cli` through
113126
``cli.main``.
114127
"""
115-
from .cli import cli
128+
from .cli import cli # noqa: PLC0415
116129

117130
args = [str(x) for x in sys.argv[1:]]
118131
# Keep backwards compatibility: Add 'launch' subcommand if none is provided

0 commit comments

Comments
 (0)