Skip to content

Commit 117a22a

Browse files
authored
Renames for consistency (#14)
* Renames for consistency Signed-off-by: Ananth Subramaniam <ansubramania@nvidia.com> * cleanup Signed-off-by: Ananth Subramaniam <ansubramania@nvidia.com> * cleanup Signed-off-by: Ananth Subramaniam <ansubramania@nvidia.com> --------- Signed-off-by: Ananth Subramaniam <ansubramania@nvidia.com>
1 parent bb1e601 commit 117a22a

29 files changed

+62
-76
lines changed
Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,12 @@
2828
from megatron.core.optimizer import OptimizerConfig
2929
from megatron.core.transformer.module import MegatronModule
3030

31-
from nemo_lm.checkpointing import save_checkpoint
32-
from nemo_lm.config import CheckpointConfig, ConfigContainer, LoggerConfig, TokenizerConfig
33-
from nemo_lm.model.gpt import GPTConfig, torch_dtype_from_mcore_config
34-
from nemo_lm.model.t5 import T5Config
35-
from nemo_lm.state import GlobalState
31+
from nemo_lm.models.gpt import GPTConfig, torch_dtype_from_mcore_config
32+
from nemo_lm.models.t5 import T5Config
3633
from nemo_lm.tokenizers.tokenizer import _HuggingFaceTokenizer
34+
from nemo_lm.training.checkpointing import save_checkpoint
35+
from nemo_lm.training.config import CheckpointConfig, ConfigContainer, LoggerConfig, TokenizerConfig
36+
from nemo_lm.training.state import GlobalState
3737
from nemo_lm.utils.instantiate_utils import instantiate
3838

3939
if TYPE_CHECKING:
Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@
1818

1919
import torch
2020

21-
from nemo_lm.converter.common import BaseExporter, BaseImporter, dtype_from_hf
22-
from nemo_lm.converter.state_transform import TransformFns, apply_transforms, state_transform
23-
from nemo_lm.model.llama import Llama4Config, Llama31Config, LlamaConfig
21+
from nemo_lm.converters.common import BaseExporter, BaseImporter, dtype_from_hf
22+
from nemo_lm.converters.state_transform import TransformFns, apply_transforms, state_transform
23+
from nemo_lm.models.llama import Llama4Config, Llama31Config, LlamaConfig
2424

2525
if TYPE_CHECKING:
2626
from transformers import LlamaConfig as HFLlamaConfig

nemo_lm/data/builders/hf_dataset.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,10 @@
2424
from datasets import Dataset, DatasetDict, load_dataset
2525
from tqdm import tqdm
2626

27-
from nemo_lm.config import FinetuningDatasetConfig
2827
from nemo_lm.data.builders.finetuning_dataset import FinetuningDatasetBuilder
2928
from nemo_lm.data.datasets.sft import get_dataset_root
3029
from nemo_lm.tokenizers.tokenizer import MegatronTokenizer
30+
from nemo_lm.training.config import FinetuningDatasetConfig
3131
from nemo_lm.utils.common_utils import print_rank_0
3232

3333
logger = logging.getLogger(__name__)

nemo_lm/data/loaders.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,9 @@
2121
from megatron.core.rerun_state_machine import RerunDataIterator
2222
from torch.utils.data import DataLoader
2323

24-
from nemo_lm.config import ConfigContainer
2524
from nemo_lm.data.samplers import build_pretraining_data_loader
26-
from nemo_lm.state import TrainState
25+
from nemo_lm.training.config import ConfigContainer
26+
from nemo_lm.training.state import TrainState
2727
from nemo_lm.utils.common_utils import print_rank_0
2828
from nemo_lm.utils.sig_utils import DistributedSignalHandler
2929

nemo_lm/data/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,10 @@
2020
from megatron.core.datasets.blended_megatron_dataset_config import BlendedMegatronDatasetConfig
2121
from megatron.core.datasets.gpt_dataset import GPTDataset, MockGPTDataset
2222

23-
from nemo_lm.config import DataloaderConfig, FinetuningDatasetConfig, GPTDatasetConfig
24-
from nemo_lm.data.builders.finetuning_dataset import FinetuningDatasetBuilder
23+
from nemo_lm.data.builders import FinetuningDatasetBuilder
2524
from nemo_lm.data.builders.hf_dataset import HFDatasetBuilder, HFDatasetConfig
2625
from nemo_lm.tokenizers.tokenizer import MegatronTokenizer
26+
from nemo_lm.training.config import DataloaderConfig, FinetuningDatasetConfig, GPTDatasetConfig
2727
from nemo_lm.utils.common_utils import print_rank_0
2828

2929

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@
2323
from megatron.core.fp8_utils import is_float8tensor
2424
from megatron.core.transformer.module import Float16Module, MegatronModule
2525

26-
from nemo_lm.model.gpt import GPTConfig
27-
from nemo_lm.model.t5 import T5Config
26+
from nemo_lm.models.gpt import GPTConfig
27+
from nemo_lm.models.t5 import T5Config
2828

2929

3030
def get_model_from_config(
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020
import torch
2121
import torch.nn.functional as F
2222

23-
from nemo_lm.model.gpt import GPTConfig
24-
from nemo_lm.model.llama4_utils import get_llama4_layer_spec
23+
from nemo_lm.models.gpt import GPTConfig
24+
from nemo_lm.models.llama4_utils import get_llama4_layer_spec
2525

2626
try:
2727
from megatron.core.transformer.spec_utils import ModuleSpec

0 commit comments

Comments
 (0)