Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion backends/arm/test/models/test_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
TosaPipelineMI,
)

from executorch.examples.models.llama.config.llm_config import LlmConfig
from executorch.extension.llm.export.config.llm_config import LlmConfig
from executorch.examples.models.llama.export_llama_lib import (
build_args_parser,
get_llama_model,
Expand Down
2 changes: 1 addition & 1 deletion examples/apple/mps/scripts/mps_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
serialize_from_bundled_program_to_flatbuffer,
)

from executorch.examples.models.llama.config.llm_config import LlmConfig
from executorch.extension.llm.export.config.llm_config import LlmConfig
from executorch.exir import (
EdgeCompileConfig,
EdgeProgramManager,
Expand Down
4 changes: 2 additions & 2 deletions examples/models/llama/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ runtime.python_library(
"//caffe2:torch",
"//executorch/examples/models:model_base",
"//executorch/examples/models/llama:llama_transformer",
"//executorch/examples/models/llama/config:llm_config",
"//executorch/extension/llm/export/config:llm_config",
"//executorch/examples/models:checkpoint",
],
)
Expand Down Expand Up @@ -150,7 +150,7 @@ runtime.python_library(
":source_transformation",
"//ai_codesign/gen_ai/fast_hadamard_transform:fast_hadamard_transform",
"//caffe2:torch",
"//executorch/examples/models/llama/config:llm_config",
"//executorch/extension/llm/export/config:llm_config",
"//executorch/backends/vulkan/_passes:vulkan_passes",
"//executorch/exir/passes:init_mutable_pass",
"//executorch/examples/models:model_base",
Expand Down
15 changes: 1 addition & 14 deletions examples/models/llama/config/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,12 @@ load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
load("@fbcode_macros//build_defs:python_unittest.bzl", "python_unittest")

def define_common_targets():
runtime.python_library(
name = "llm_config",
srcs = [
"llm_config.py",
],
_is_external_target = True,
base_module = "executorch.examples.models.llama.config",
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
)

python_unittest(
name = "test_llm_config",
srcs = [
"test_llm_config.py",
],
deps = [
":llm_config",
"//executorch/extension/llm/export/config:llm_config",
],
)
2 changes: 1 addition & 1 deletion examples/models/llama/export_llama_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

from executorch.devtools.etrecord import generate_etrecord as generate_etrecord_func

from executorch.examples.models.llama.config.llm_config import LlmConfig
from executorch.extension.llm.export.config.llm_config import LlmConfig
from executorch.examples.models.llama.hf_download import (
download_and_convert_hf_checkpoint,
)
Expand Down
2 changes: 1 addition & 1 deletion examples/models/llama/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
get_default_model_resource_dir,
)

from executorch.examples.models.llama.config.llm_config import LlmConfig
from executorch.extension.llm.export.config.llm_config import LlmConfig
from executorch.examples.models.llama.llama_transformer import construct_transformer
from executorch.examples.models.llama.model_args import ModelArgs
from executorch.examples.models.llama.rope import Rope
Expand Down
2 changes: 1 addition & 1 deletion examples/models/llama/runner/eager.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

import torch

from executorch.examples.models.llama.config.llm_config import LlmConfig
from executorch.extension.llm.export.config.llm_config import LlmConfig
from executorch.examples.models.llama.export_llama_lib import (
_prepare_for_llama_export,
build_args_parser as _build_args_parser,
Expand Down
2 changes: 1 addition & 1 deletion examples/models/llama/tests/test_export_llama_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import unittest

from executorch.devtools.backend_debug import get_delegation_info
from executorch.examples.models.llama.config.llm_config import LlmConfig
from executorch.extension.llm.export.config.llm_config import LlmConfig
from executorch.examples.models.llama.export_llama_lib import (
_export_llama,
build_args_parser,
Expand Down
2 changes: 1 addition & 1 deletion examples/models/llama3_2_vision/runner/eager.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from typing import Optional

import torch
from executorch.examples.models.llama.config.llm_config import LlmConfig
from executorch.extension.llm.export.config.llm_config import LlmConfig

from executorch.examples.models.llama.export_llama_lib import _prepare_for_llama_export
from executorch.examples.models.llama.runner.eager import execute_runner
Expand Down
2 changes: 1 addition & 1 deletion examples/models/llava/export_llava.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
get_symmetric_quantization_config,
XNNPACKQuantizer,
)
from executorch.examples.models.llama.config.llm_config import LlmConfig
from executorch.extension.llm.export.config.llm_config import LlmConfig
from executorch.examples.models.llama.export_llama_lib import (
get_quantizer_and_quant_params,
)
Expand Down
4 changes: 4 additions & 0 deletions extension/llm/export/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,11 @@ python -m extension.llm.export.export_llm \

## Configuration Options

<<<<<<< HEAD
For a complete reference of all available configuration options, see the [LlmConfig class definition](config/llm_config.py) which documents all supported parameters for base, model, export, quantization, backend, and debug configurations.
=======
For a complete reference of all available configuration options, see the [LlmConfig class definition](../../../examples/models/llama/config/llm_config.py) which documents all supported parameters for base, model, export, quantization, backend, and debug configurations.
>>>>>>> main

## Further Reading

Expand Down
5 changes: 5 additions & 0 deletions extension/llm/export/config/TARGETS
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
load(":targets.bzl", "define_common_targets")

oncall("executorch")

define_common_targets()
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import ast
import re
from dataclasses import dataclass, field
from enum import Enum
from typing import ClassVar, List, Optional


Expand Down
15 changes: 15 additions & 0 deletions extension/llm/export/config/targets.bzl
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")

def define_common_targets():
runtime.python_library(
name = "llm_config",
srcs = [
"llm_config.py",
],
_is_external_target = True,
base_module = "executorch.extension.llm.export.config",
visibility = [
"//executorch/...",
"@EXECUTORCH_CLIENTS",
],
)
2 changes: 1 addition & 1 deletion extension/llm/export/export_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@

import hydra

from executorch.examples.models.llama.config.llm_config import LlmConfig
from executorch.extension.llm.export.config.llm_config import LlmConfig
from executorch.examples.models.llama.export_llama_lib import export_llama
from hydra.core.config_store import ConfigStore
from omegaconf import OmegaConf
Expand Down
Loading