|
26 | 26 |
|
27 | 27 | @t.runtime_checkable |
28 | 28 | class LazyGenerator(t.Protocol): |
29 | | - def __call__(self) -> type[Generator]: |
30 | | - ... |
| 29 | + def __call__(self) -> type[Generator]: ... |
31 | 30 |
|
32 | 31 |
|
33 | 32 | g_providers: dict[str, type[Generator] | LazyGenerator] = {} |
@@ -382,16 +381,14 @@ def chat( |
382 | 381 | self, |
383 | 382 | messages: t.Sequence[MessageDict], |
384 | 383 | params: GenerateParams | None = None, |
385 | | - ) -> ChatPipeline: |
386 | | - ... |
| 384 | + ) -> ChatPipeline: ... |
387 | 385 |
|
388 | 386 | @t.overload |
389 | 387 | def chat( |
390 | 388 | self, |
391 | 389 | messages: t.Sequence[Message] | MessageDict | Message | str | None = None, |
392 | 390 | params: GenerateParams | None = None, |
393 | | - ) -> ChatPipeline: |
394 | | - ... |
| 391 | + ) -> ChatPipeline: ... |
395 | 392 |
|
396 | 393 | def chat( |
397 | 394 | self, |
@@ -460,17 +457,15 @@ def chat( |
460 | 457 | generator: Generator, |
461 | 458 | messages: t.Sequence[MessageDict], |
462 | 459 | params: GenerateParams | None = None, |
463 | | -) -> ChatPipeline: |
464 | | - ... |
| 460 | +) -> ChatPipeline: ... |
465 | 461 |
|
466 | 462 |
|
467 | 463 | @t.overload |
468 | 464 | def chat( |
469 | 465 | generator: Generator, |
470 | 466 | messages: t.Sequence[Message] | MessageDict | Message | str | None = None, |
471 | 467 | params: GenerateParams | None = None, |
472 | | -) -> ChatPipeline: |
473 | | - ... |
| 468 | +) -> ChatPipeline: ... |
474 | 469 |
|
475 | 470 |
|
476 | 471 | def chat( |
@@ -597,7 +592,7 @@ def get_generator(identifier: str, *, params: GenerateParams | None = None) -> G |
597 | 592 | if "," in model: |
598 | 593 | try: |
599 | 594 | model, kwargs_str = model.split(",", 1) |
600 | | - kwargs = dict(arg.split("=") for arg in kwargs_str.split(",")) |
| 595 | + kwargs = dict(arg.split("=", 1) for arg in kwargs_str.split(",")) |
601 | 596 | except Exception as e: |
602 | 597 | raise InvalidModelSpecifiedError(identifier) from e |
603 | 598 |
|
|
0 commit comments