Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .github/actions/free-disk-space/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
name: 'Free Disk Space'
description: 'Frees disk space on the runner'
runs:
using: "composite"
steps:
- name: Print disk space before cleanup
run: |
df -h
shell: bash
- name: Free Disk Space Linux
if: runner.os == 'Linux'
run: |
sudo docker rmi "$(docker image ls -aq)" >/dev/null 2>&1 || true
sudo rm -rf \
/usr/share/dotnet /usr/local/lib/android /opt/ghc \
/usr/local/share/powershell /usr/share/swift /usr/local/.ghcup \
/usr/lib/jvm || true
sudo apt install aptitude -y >/dev/null 2>&1
sudo aptitude purge '~n ^mysql' -f -y >/dev/null 2>&1
sudo aptitude purge '~n ^dotnet' -f -y >/dev/null 2>&1
sudo apt-get autoremove -y >/dev/null 2>&1
sudo apt-get autoclean -y >/dev/null 2>&1
shell: bash
- name: Print disk space after cleanup
run: |
df -h
shell: bash
2 changes: 2 additions & 0 deletions .github/workflows/quality.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ jobs:
python-version: ['3.10', '3.11', '3.12'] # Need to add 3.13 once we resolve outlines issues.
steps:
- uses: actions/checkout@v4
- name: Free disk space
uses: ./.github/actions/free-disk-space
- name: Install uv and set the python version
uses: astral-sh/setup-uv@v5
with:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

from mellea import MelleaSession
from mellea.backends.types import ModelOption
from mellea.stdlib.base import CBlock
from mellea.stdlib.instruction import Instruction

from .._prompt_modules import PromptModule, PromptModuleString
Expand Down Expand Up @@ -114,9 +115,8 @@ def generate( # type: ignore[override]
instruction = Instruction(description=user_prompt, prefix=system_prompt)

try:
gen_result = mellea_session.backend.generate_from_context(
gen_result = mellea_session.act(
action=instruction,
ctx=mellea_session.ctx,
model_options={
ModelOption.TEMPERATURE: 0,
ModelOption.MAX_NEW_TOKENS: max_new_tokens,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,9 +216,8 @@ def generate( # type: ignore[override]
instruction = Instruction(description=user_prompt, prefix=system_prompt)

try:
gen_result = mellea_session.backend.generate_from_context(
gen_result = mellea_session.act(
action=instruction,
ctx=mellea_session.ctx,
model_options={
ModelOption.TEMPERATURE: 0,
ModelOption.MAX_NEW_TOKENS: max_new_tokens,
Expand Down
3 changes: 1 addition & 2 deletions cli/decompose/prompt_modules/subtask_list/_subtask_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,8 @@ def generate(
instruction = Instruction(description=user_prompt, prefix=system_prompt)

try:
gen_result = mellea_session.backend.generate_from_context(
gen_result = mellea_session.act(
action=instruction,
ctx=mellea_session.ctx,
model_options={
ModelOption.TEMPERATURE: 0,
ModelOption.MAX_NEW_TOKENS: max_new_tokens,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,9 +218,8 @@ def generate( # type: ignore[override]
instruction = Instruction(description=user_prompt, prefix=system_prompt)

try:
gen_result = mellea_session.backend.generate_from_context(
gen_result = mellea_session.act(
action=instruction,
ctx=mellea_session.ctx,
model_options={
ModelOption.TEMPERATURE: 0,
ModelOption.MAX_NEW_TOKENS: max_new_tokens,
Expand Down
4 changes: 2 additions & 2 deletions docs/examples/mify/rich_document_advanced.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
# Note: Because the template for a RichDocument just outputs it as markdown,
# the model doesn't really know what to do with it in this context. However, this
# is a useful pattern if you want to use a component with a specified template.
thunk = m.backend.generate_from_context(action=rd, ctx=m.ctx)
thunk = m.act(action=rd)
print(thunk.value) # > - user: What is the primary goal of the GLTR tool...

# 5. The class is opinionated and outputs the document as markdown to the model (like in the initial example).
Expand Down Expand Up @@ -87,7 +87,7 @@ def from_document_file(
rds.format_for_llm().args
) # > {'titles': ['GLTR: Statistical Detection and Visualization of Generated Text', 'Abstract', ..., 'References']}

thunk = m.backend.generate_from_context(action=rds, ctx=m.ctx)
thunk = m.act(action=rds)
print(thunk.value) # > The document appears to be an academic research paper...

# 6. We can also pass this document as grounding context to an instruction.
Expand Down
3 changes: 1 addition & 2 deletions mellea/backends/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,9 @@ def generate_from_context(
*,
format: type[BaseModelSubclass] | None = None,
model_options: dict | None = None,
generate_logs: list[GenerateLog] | None = None,
tool_calls: bool = False,
) -> ModelOutputThunk: # i.e., ContextDiff
"""Generates a model output from a context. May not mutate the context.
"""Generates a model output from a context. May not mutate the context. This must be called from a running event loop as it creates a task to run the generation request.

Args:
action: The last item of the context should be passed in as an `action` instead of as part of the `ctx`. See `docs/dev/generate_signature_decisions.md`.
Expand Down
6 changes: 3 additions & 3 deletions mellea/backends/aloras/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import abc

from mellea.stdlib.base import CBlock
from mellea.stdlib.base import CBlock, ModelOutputThunk


class Alora(abc.ABC):
Expand All @@ -24,8 +24,8 @@ def __init__(self, name: str):
self.name: str = name

@abc.abstractmethod
def generate_using_strings(self, *args, **kwargs) -> str:
"""Generates from the ALora using raw strings as the interface for both inputs and outputs.
def generate_using_strings(self, *args, **kwargs) -> ModelOutputThunk:
"""Generates from the ALora using raw strings as the interface for inputs. In most cases, must be run from a running event loop.

This has a generic signature because each aLoRA has different parameters depending on its functionality and how it gets called.
"""
Expand Down
Loading