Skip to content

Commit 3c32b83

Browse files
add basic ruff fixes
Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com>
1 parent fa3466c commit 3c32b83

File tree

13 files changed

+16
-32
lines changed

13 files changed

+16
-32
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,7 @@ repos:
7979
modelopt/onnx/quantization/ort_patching.py|
8080
modelopt/torch/_deploy/utils/onnx_utils.py|
8181
modelopt/torch/export/transformer_engine.py|
82+
modelopt/torch/puzzletron/anymodel/models/gpt_oss/gpt_oss_pruned_to_mxfp4.py|
8283
modelopt/torch/quantization/export_onnx.py|
8384
modelopt/torch/quantization/plugins/attention.py|
8485
modelopt/torch/speculative/eagle/utils.py|
@@ -100,8 +101,6 @@ repos:
100101
examples/speculative_decoding/main.py|
101102
examples/speculative_decoding/medusa_utils.py|
102103
examples/speculative_decoding/server_generate.py|
103-
examples/puzzletron/evaluation/lm_eval_anymodel.py|
104-
modelopt/torch/puzzletron/anymodel/models/gpt_oss/gpt_oss_pruned_to_mxfp4.py|
105104
experimental/dms/models/qwen3/configuration_qwen3_dms.py|
106105
experimental/dms/models/qwen3/modeling_qwen3_dms.py|
107106
)$

modelopt/torch/puzzletron/anymodel/models/nemotron_h/nemotron_h_model_descriptor.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,6 @@ def init_rotary_embedding(model, runtime):
151151
"""
152152
NemotronH has no positional embeddings
153153
"""
154-
pass
155154

156155
@staticmethod
157156
def input_embedding_name():

modelopt/torch/puzzletron/anymodel/models/nemotron_h_v2/nemotron_h_v2_model_descriptor.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,6 @@ def init_rotary_embedding(model, runtime):
131131
"""
132132
NemotronH has no positional embeddings
133133
"""
134-
pass
135134

136135
@staticmethod
137136
def input_embedding_name():

modelopt/torch/puzzletron/anymodel/models/qwen2/qwen2_model_descriptor.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,5 +144,3 @@ class Qwen2FFNIntermediateLayerDescriptor(LlamaFFNIntermediateLayerDescriptor):
144144
145145
Qwen2 uses the same FFN structure as Llama (gate_proj, up_proj, down_proj).
146146
"""
147-
148-
pass

modelopt/torch/puzzletron/anymodel/models/qwen3_vl/qwen3_vl_model_descriptor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ class Qwen3VLExpertRemovalLayerDescriptor(ExpertRemovalLayerDescriptor):
202202
moe_prefix_name: str = "model.language_model.layers.{layer_idx}.mlp"
203203
# Router: Qwen3VLMoeTextTopKRouter has self.weight, no bias
204204
router_weights: List[str] = field(default_factory=lambda: ["gate.weight"])
205-
router_biases: List[str] = field(default_factory=lambda: [])
205+
router_biases: List[str] = field(default_factory=list)
206206
# Fused expert format: Qwen3VLMoeTextExperts stores all experts in single tensors
207207
# with shape [num_experts, ...] instead of separate tensors per expert.
208208
is_fused_experts: bool = True

modelopt/torch/puzzletron/mip/run_puzzle.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import dataclasses
2121
import enum
2222
import json
23+
import sys
2324
from collections.abc import Hashable, Iterable
2425
from copy import deepcopy
2526
from pathlib import Path
@@ -401,14 +402,14 @@ def _assert_valid_config(args, puzzle_profile):
401402
missing_args = [arg for arg in required_args if arg not in args or getattr(args, arg) is None]
402403
if missing_args:
403404
mprint(f"error: The following arguments are required: {', '.join(missing_args)}")
404-
exit(1)
405+
sys.exit(1)
405406

406407
# Make sure we have specified subblock_stats_args
407408
if "subblock_stats_args" not in args and "subblock_stats_args" not in puzzle_profile:
408409
mprint(
409410
"error: Must specify `subblock_stats_arrs` in either puzzle_profile or as a commandline arg."
410411
)
411-
exit(1)
412+
sys.exit(1)
412413

413414
# Make sure we have specified constraints
414415
if (
@@ -420,7 +421,7 @@ def _assert_valid_config(args, puzzle_profile):
420421
mprint(
421422
"error: Must specify either `mip_constraints` or `human_constraints` in one of puzzle_profile or as a commandline argument."
422423
)
423-
exit(1)
424+
sys.exit(1)
424425

425426

426427
def _get_minimal_unique_names(dicts: list[dict]) -> list[str]:

modelopt/torch/puzzletron/mip/utils.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,6 @@
2121
class InfeasibleError(Exception):
2222
"""Exception raised when optimization problem is infeasible."""
2323

24-
pass
25-
2624

2725
def sort_replacements(layer_replacements: list[dict]) -> list[dict]:
2826
"""Sort layer replacements by parent layer indices.

modelopt/torch/puzzletron/sewing_kit/core.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -676,8 +676,6 @@ def forward(
676676
if work is not None:
677677
work.wait()
678678

679-
pass
680-
681679
if len(node.stitches_from) > 0:
682680
assert len(peers) == 1, (
683681
f"Cannot use multiple peers when using RemoteTarget as a source ({peers=})"

modelopt/torch/puzzletron/sewing_kit/utils.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@
1616
from __future__ import annotations
1717

1818
import inspect
19-
from collections.abc import Sequence
2019
from contextlib import contextmanager
2120
from typing import (
21+
TYPE_CHECKING,
2222
Any,
2323
Callable,
2424
ContextManager,
@@ -43,6 +43,9 @@
4343
from torch._subclasses import FakeTensor, FakeTensorMode
4444
from typing_extensions import override
4545

46+
if TYPE_CHECKING:
47+
from collections.abc import Sequence
48+
4649
Fn = TypeVar("Fn", bound=Callable)
4750

4851

@@ -61,11 +64,11 @@ def __call__(self, fn: Fn, disable: bool = False) -> Fn: ...
6164

6265

6366
try:
64-
dynamo_skip: DynamoSkip = cast(Any, torch._dynamo.decorators).skip
65-
dynamo_disable: DynamoDisable = cast(Any, torch._dynamo.decorators).disable
67+
dynamo_skip: DynamoSkip = cast("Any", torch._dynamo.decorators).skip
68+
dynamo_disable: DynamoDisable = cast("Any", torch._dynamo.decorators).disable
6669
except:
67-
dynamo_skip: DynamoSkip = cast(Any, torch._dynamo.eval_frame).skip
68-
dynamo_disable: DynamoDisable = cast(Any, torch._dynamo.eval_frame).disable
70+
dynamo_skip: DynamoSkip = cast("Any", torch._dynamo.eval_frame).skip
71+
dynamo_disable: DynamoDisable = cast("Any", torch._dynamo.eval_frame).disable
6972

7073

7174
TModule = TypeVar("TModule", bound=nn.Module)
@@ -264,7 +267,7 @@ def __new__(cls, elem, device) -> MyFakeTensor:
264267
dispatch_device=True,
265268
device_for_backend_keys=device,
266269
)
267-
return cast(MyFakeTensor, self)
270+
return cast("MyFakeTensor", self)
268271

269272
@classmethod
270273
@dynamo_disable

modelopt/torch/puzzletron/subblock_stats/calc_subblock_stats.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,6 @@ def calculate_subblock_stats(
203203
)
204204

205205
if is_calc_runtime:
206-
pass
207206
# TODO: fix
208207
# from puzzle_tools.calc_subblock_runtime import measure_non_block_runtime_ms
209208
# non_block_runtime_ms, embedding_runtime_ms, lm_head_runtime_ms = \

0 commit comments

Comments
 (0)