Skip to content

Commit a768ad1

Browse files
authored
Merge branch 'main' into transformers-v5-pr
2 parents b34420f + 01de02e commit a768ad1

File tree

5 files changed

+11
-8
lines changed

5 files changed

+11
-8
lines changed

.github/workflows/pr_modular_tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ jobs:
117117

118118
- name: Install dependencies
119119
run: |
120-
uv pip install -e ".[quality,test]"
120+
uv pip install -e ".[quality]"
121121
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
122122
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
123123
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps

.github/workflows/pr_tests.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ jobs:
115115

116116
- name: Install dependencies
117117
run: |
118-
uv pip install -e ".[quality,test]"
118+
uv pip install -e ".[quality]"
119119
#uv pip uninstall transformers huggingface_hub && uv pip install --prerelease allow -U transformers@git+https://github.com/huggingface/transformers.git
120120
uv pip uninstall transformers huggingface_hub && uv pip install transformers==4.57.1
121121
uv pip uninstall accelerate && uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
@@ -192,7 +192,7 @@ jobs:
192192

193193
- name: Install dependencies
194194
run: |
195-
uv pip install -e ".[quality,test]"
195+
uv pip install -e ".[quality]"
196196
197197
- name: Environment
198198
run: |
@@ -245,7 +245,7 @@ jobs:
245245

246246
- name: Install dependencies
247247
run: |
248-
uv pip install -e ".[quality,test]"
248+
uv pip install -e ".[quality]"
249249
# TODO (sayakpaul, DN6): revisit `--no-deps`
250250
uv pip install -U peft@git+https://github.com/huggingface/peft.git --no-deps
251251
uv pip install -U tokenizers

.github/workflows/push_tests_mps.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ jobs:
4141
shell: arch -arch arm64 bash {0}
4242
run: |
4343
${CONDA_RUN} python -m pip install --upgrade pip uv
44-
${CONDA_RUN} python -m uv pip install -e ".[quality,test]"
44+
${CONDA_RUN} python -m uv pip install -e ".[quality]"
4545
${CONDA_RUN} python -m uv pip install torch torchvision torchaudio
4646
${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
4747
${CONDA_RUN} python -m uv pip install transformers --upgrade

src/diffusers/quantizers/gguf/utils.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -516,6 +516,9 @@ def dequantize_gguf_tensor(tensor):
516516

517517
block_size, type_size = GGML_QUANT_SIZES[quant_type]
518518

519+
# Conver to plain tensor to avoid unnecessary __torch_function__ overhead.
520+
tensor = tensor.as_tensor()
521+
519522
tensor = tensor.view(torch.uint8)
520523
shape = _quant_shape_from_byte_shape(tensor.shape, type_size, block_size)
521524

@@ -525,7 +528,7 @@ def dequantize_gguf_tensor(tensor):
525528
dequant = dequant_fn(blocks, block_size, type_size)
526529
dequant = dequant.reshape(shape)
527530

528-
return dequant.as_tensor()
531+
return dequant
529532

530533

531534
class GGUFParameter(torch.nn.Parameter):

tests/models/testing_utils/lora.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -375,7 +375,7 @@ def _check_model_hotswap(
375375
# additionally check if dynamic compilation works.
376376
if different_shapes is not None:
377377
for height, width in different_shapes:
378-
new_inputs_dict = self.prepare_dummy_input(height=height, width=width)
378+
new_inputs_dict = self.get_dummy_inputs(height=height, width=width)
379379
_ = model(**new_inputs_dict)
380380
else:
381381
output0_after = model(**inputs_dict)["sample"]
@@ -390,7 +390,7 @@ def _check_model_hotswap(
390390
with torch.inference_mode():
391391
if different_shapes is not None:
392392
for height, width in different_shapes:
393-
new_inputs_dict = self.prepare_dummy_input(height=height, width=width)
393+
new_inputs_dict = self.get_dummy_inputs(height=height, width=width)
394394
_ = model(**new_inputs_dict)
395395
else:
396396
output1_after = model(**inputs_dict)["sample"]

0 commit comments

Comments
 (0)