Skip to content

Commit 9827329

Browse files
Apply isort and black reformatting
Signed-off-by: kevalmorabia97 <kevalmorabia97@users.noreply.github.com>
1 parent d217c65 commit 9827329

File tree

8 files changed

+12
-16
lines changed

8 files changed

+12
-16
lines changed

nemo/collections/llm/modelopt/distill/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
4040

4141
import modelopt.torch.opt as mto
42-
from modelopt.torch.distill import DistillationModel, DistillationLossBalancer
42+
from modelopt.torch.distill import DistillationLossBalancer, DistillationModel
4343

4444

4545
@dataclass

nemo/collections/llm/modelopt/model_utils.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from typing import TYPE_CHECKING, Callable, Optional, Union
1919

2020
import lightning.pytorch as L
21+
import modelopt.torch.opt as mto
2122
import torch
2223
import torch.nn as nn
2324
from lightning.pytorch.plugins.io.wrapper import _WrappingCheckpointIO
@@ -32,8 +33,6 @@
3233
from nemo.utils.import_utils import safe_import
3334
from nemo.utils.model_utils import unwrap_model
3435

35-
import modelopt.torch.opt as mto
36-
3736
_, HAVE_TE = safe_import("transformer_engine")
3837
if HAVE_TE:
3938
# These custom modelopt specs are a mix of local MCORE and TE specs.

nemo/collections/llm/modelopt/prune/pruner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@
1515
from dataclasses import dataclass
1616
from functools import partial
1717

18+
import modelopt.torch.prune as mtp
1819
import pytorch_lightning as pl
1920
from megatron.core import dist_checkpointing
2021

21-
import modelopt.torch.prune as mtp
2222
from nemo import lightning as nl
2323
from nemo.collections import llm
2424
from nemo.lightning.ckpt_utils import ckpt_to_context_subdir

nemo/collections/llm/modelopt/quantization/quantizer.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,9 @@
2121
from pathlib import Path
2222
from typing import TYPE_CHECKING, Optional, Union
2323

24+
import modelopt.torch.export as mte
25+
import modelopt.torch.opt as mto
26+
import modelopt.torch.quantization as mtq
2427
import torch
2528
from datasets import load_dataset
2629
from megatron.core.inference.common_inference_params import CommonInferenceParams
@@ -39,18 +42,13 @@
3942
from nemo.utils.get_rank import is_global_rank_zero
4043
from nemo.utils.model_utils import unwrap_model
4144

42-
import modelopt.torch.export as mte
43-
import modelopt.torch.quantization as mtq
44-
import modelopt.torch.opt as mto
45-
4645
if TYPE_CHECKING:
4746
import lightning.pytorch as pl
4847

4948
from nemo.lightning import Trainer
5049
from nemo.lightning.megatron_parallel import MegatronParallel
5150

5251

53-
5452
QUANT_CFG_CHOICES = get_quant_cfg_choices()
5553
SUPPORTED_DTYPE = [16, "16", "bf16"] # Default precision for non-quantized layers
5654
SUPPORTED_EXPORT_FMT = ["trtllm", "nemo", "hf"]

nemo/collections/llm/modelopt/speculative/model_transform.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,10 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import torch.nn as nn
16-
1715
import modelopt.torch.opt as mto
1816
import modelopt.torch.speculative as mtsp
17+
import torch.nn as nn
18+
1919
from nemo.collections.llm import GPTModel
2020
from nemo.utils import logging
2121
from nemo.utils.model_utils import unwrap_model

nemo/export/quantize/quantizer.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,12 @@
1717
from contextlib import nullcontext
1818
from typing import Callable, Optional
1919

20+
import modelopt.torch.quantization as mtq
2021
import torch
2122
import torch.distributed as dist
2223
from megatron.core import parallel_state
2324
from megatron.core.transformer.module import Float16Module
25+
from modelopt.torch.export import export_tensorrt_llm_checkpoint
2426
from omegaconf.omegaconf import DictConfig, open_dict
2527

2628
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
@@ -29,9 +31,6 @@
2931
from nemo.utils.distributed import temporary_directory
3032
from nemo.utils.model_utils import save_artifacts, unwrap_model
3133

32-
import modelopt.torch.quantization as mtq
33-
from modelopt.torch.export import export_tensorrt_llm_checkpoint
34-
3534
QUANT_CFG_CHOICES = {
3635
"int8": mtq.INT8_DEFAULT_CFG,
3736
"int8_sq": mtq.INT8_SMOOTHQUANT_CFG,

nemo/lightning/fabric/strategies.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
Union,
2929
)
3030

31+
import modelopt.torch.opt as mto
3132
import torch
3233
from lightning.fabric.accelerators import CPUAccelerator
3334
from lightning.fabric.accelerators.accelerator import Accelerator
@@ -60,8 +61,6 @@
6061
from nemo.utils.import_utils import safe_import
6162
from nemo.utils.model_utils import unwrap_model
6263

63-
import modelopt.torch.opt as mto
64-
6564
if TYPE_CHECKING:
6665
from nemo.lightning.pytorch.plugins.data_sampler import DataSampler
6766

scripts/llm/gpt_prune.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@
6666
# isort: off
6767
# Import modelopt first to avoid circular import in 0.35.0
6868
import modelopt.torch.prune # noqa: F401
69+
6970
# isort: on
7071

7172
from nemo.collections import llm

0 commit comments

Comments
 (0)