Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def test_controlnet_hunyuandit(self):

if torch_device == "xpu":
expected_slice = np.array(
[0.6376953, 0.84375, 0.58691406, 0.48046875, 0.43652344, 0.5517578, 0.54248047, 0.5644531, 0.48217773]
[0.6948242, 0.89160156, 0.59375, 0.5078125, 0.57910156, 0.6035156, 0.58447266, 0.53564453, 0.52246094]
)
else:
expected_slice = np.array(
Expand Down
11 changes: 8 additions & 3 deletions tests/pipelines/flux/test_pipeline_flux.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
)

from ...testing_utils import (
Expectations,
backend_empty_cache,
nightly,
numpy_cosine_similarity_distance,
Expand Down Expand Up @@ -276,10 +277,14 @@ def test_flux_inference(self):
image = pipe(**inputs).images[0]
image_slice = image[0, :10, :10]
# fmt: off
expected_slice = np.array(
[0.3242, 0.3203, 0.3164, 0.3164, 0.3125, 0.3125, 0.3281, 0.3242, 0.3203, 0.3301, 0.3262, 0.3242, 0.3281, 0.3242, 0.3203, 0.3262, 0.3262, 0.3164, 0.3262, 0.3281, 0.3184, 0.3281, 0.3281, 0.3203, 0.3281, 0.3281, 0.3164, 0.3320, 0.3320, 0.3203],
dtype=np.float32,

expected_slices = Expectations(
{
("cuda", None): np.array([0.3242, 0.3203, 0.3164, 0.3164, 0.3125, 0.3125, 0.3281, 0.3242, 0.3203, 0.3301, 0.3262, 0.3242, 0.3281, 0.3242, 0.3203, 0.3262, 0.3262, 0.3164, 0.3262, 0.3281, 0.3184, 0.3281, 0.3281, 0.3203, 0.3281, 0.3281, 0.3164, 0.3320, 0.3320, 0.3203], dtype=np.float32,),
("xpu", 3): np.array([0.3301, 0.3281, 0.3359, 0.3203, 0.3203, 0.3281, 0.3281, 0.3301, 0.3340, 0.3281, 0.3320, 0.3359, 0.3281, 0.3301, 0.3320, 0.3242, 0.3301, 0.3281, 0.3242, 0.3320, 0.3320, 0.3281, 0.3320, 0.3320, 0.3262, 0.3320, 0.3301, 0.3301, 0.3359, 0.3320], dtype=np.float32,),
}
)
expected_slice = expected_slices.get_expectation()
# fmt: on

max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())
Expand Down
50 changes: 25 additions & 25 deletions tests/quantization/gguf/test_gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,33 +360,33 @@ def test_pipeline_inference(self):
{
("xpu", 3): np.array(
[
0.1953125,
0.3125,
0.31445312,
0.13085938,
0.30664062,
0.29296875,
0.11523438,
0.2890625,
0.16796875,
0.27929688,
0.28320312,
0.16601562,
0.3046875,
0.328125,
0.140625,
0.31640625,
0.11328125,
0.27539062,
0.26171875,
0.10742188,
0.26367188,
0.26171875,
0.1484375,
0.2734375,
0.296875,
0.13476562,
0.2890625,
0.30078125,
0.1171875,
0.28125,
0.28125,
0.16015625,
0.31445312,
0.30078125,
0.15625,
0.32421875,
0.12304688,
0.3046875,
0.3046875,
0.17578125,
0.3359375,
0.3203125,
0.16601562,
0.34375,
0.31640625,
0.15429688,
0.328125,
0.31054688,
0.296875,
0.14453125,
0.30859375,
0.2890625,
]
),
("cuda", 7): np.array(
Expand Down
91 changes: 91 additions & 0 deletions tests/single_file/single_file_testing_utils.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import gc
import tempfile
from io import BytesIO

Expand All @@ -9,7 +10,10 @@
from diffusers.models.attention_processor import AttnProcessor

from ..testing_utils import (
backend_empty_cache,
nightly,
numpy_cosine_similarity_distance,
require_torch_accelerator,
torch_device,
)

Expand Down Expand Up @@ -47,6 +51,93 @@ def download_diffusers_config(repo_id, tmpdir):
return path


@nightly
@require_torch_accelerator
class SingleFileModelTesterMixin:
def setup_method(self):
gc.collect()
backend_empty_cache(torch_device)

def teardown_method(self):
gc.collect()
backend_empty_cache(torch_device)

def test_single_file_model_config(self):
pretrained_kwargs = {}
single_file_kwargs = {}

if hasattr(self, "subfolder") and self.subfolder:
pretrained_kwargs["subfolder"] = self.subfolder

if hasattr(self, "torch_dtype") and self.torch_dtype:
pretrained_kwargs["torch_dtype"] = self.torch_dtype
single_file_kwargs["torch_dtype"] = self.torch_dtype

model = self.model_class.from_pretrained(self.repo_id, **pretrained_kwargs)
model_single_file = self.model_class.from_single_file(self.ckpt_path, **single_file_kwargs)

PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)

def test_single_file_model_parameters(self):
pretrained_kwargs = {}
single_file_kwargs = {}

if hasattr(self, "subfolder") and self.subfolder:
pretrained_kwargs["subfolder"] = self.subfolder

if hasattr(self, "torch_dtype") and self.torch_dtype:
pretrained_kwargs["torch_dtype"] = self.torch_dtype
single_file_kwargs["torch_dtype"] = self.torch_dtype

model = self.model_class.from_pretrained(self.repo_id, **pretrained_kwargs)
model_single_file = self.model_class.from_single_file(self.ckpt_path, **single_file_kwargs)

state_dict = model.state_dict()
state_dict_single_file = model_single_file.state_dict()

assert set(state_dict.keys()) == set(state_dict_single_file.keys()), (
"Model parameters keys differ between pretrained and single file loading"
)

for key in state_dict.keys():
param = state_dict[key]
param_single_file = state_dict_single_file[key]

assert param.shape == param_single_file.shape, (
f"Parameter shape mismatch for {key}: "
f"pretrained {param.shape} vs single file {param_single_file.shape}"
)

assert torch.allclose(param, param_single_file, rtol=1e-5, atol=1e-5), (
f"Parameter values differ for {key}: "
f"max difference {torch.max(torch.abs(param - param_single_file)).item()}"
)

def test_checkpoint_altered_keys_loading(self):
# Test loading with checkpoints that have altered keys
if not hasattr(self, "alternate_keys_ckpt_paths") or not self.alternate_keys_ckpt_paths:
return

for ckpt_path in self.alternate_keys_ckpt_paths:
backend_empty_cache(torch_device)

single_file_kwargs = {}
if hasattr(self, "torch_dtype") and self.torch_dtype:
single_file_kwargs["torch_dtype"] = self.torch_dtype

model = self.model_class.from_single_file(ckpt_path, **single_file_kwargs)

del model
gc.collect()
backend_empty_cache(torch_device)


class SDSingleFileTesterMixin:
single_file_kwargs = {}

Expand Down
41 changes: 3 additions & 38 deletions tests/single_file/test_lumina2_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,61 +13,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import gc
import unittest

from diffusers import (
Lumina2Transformer2DModel,
)

from ..testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
from .single_file_testing_utils import SingleFileModelTesterMixin


enable_full_determinism()


@require_torch_accelerator
class Lumina2Transformer2DModelSingleFileTests(unittest.TestCase):
class TestLumina2Transformer2DModelSingleFile(SingleFileModelTesterMixin):
model_class = Lumina2Transformer2DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors"
alternate_keys_ckpt_paths = [
"https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors"
]

repo_id = "Alpha-VLLM/Lumina-Image-2.0"

def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)

def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)

def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)

PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)

def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
backend_empty_cache(torch_device)
model = self.model_class.from_single_file(ckpt_path)

del model
gc.collect()
backend_empty_cache(torch_device)
subfolder = "transformer"
32 changes: 2 additions & 30 deletions tests/single_file/test_model_autoencoder_dc_single_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import gc
import unittest

import torch

Expand All @@ -23,38 +21,24 @@
)

from ..testing_utils import (
backend_empty_cache,
enable_full_determinism,
load_hf_numpy,
numpy_cosine_similarity_distance,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import SingleFileModelTesterMixin


enable_full_determinism()


@slow
@require_torch_accelerator
class AutoencoderDCSingleFileTests(unittest.TestCase):
class TestAutoencoderDCSingleFile(SingleFileModelTesterMixin):
model_class = AutoencoderDC
ckpt_path = "https://huggingface.co/mit-han-lab/dc-ae-f32c32-sana-1.0/blob/main/model.safetensors"
repo_id = "mit-han-lab/dc-ae-f32c32-sana-1.0-diffusers"
main_input_name = "sample"
base_precision = 1e-2

def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)

def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)

def get_file_format(self, seed, shape):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"

Expand All @@ -80,18 +64,6 @@ def test_single_file_inference_same_as_pretrained(self):

assert numpy_cosine_similarity_distance(output_slice_1, output_slice_2) < 1e-4

def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path)

PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between pretrained loading and single file loading"
)

def test_single_file_in_type_variant_components(self):
# `in` variant checkpoints require passing in a `config` parameter
# in order to set the scaling factor correctly.
Expand Down
33 changes: 2 additions & 31 deletions tests/single_file/test_model_controlnet_single_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import gc
import unittest

import torch

Expand All @@ -23,46 +21,19 @@
)

from ..testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import SingleFileModelTesterMixin


enable_full_determinism()


@slow
@require_torch_accelerator
class ControlNetModelSingleFileTests(unittest.TestCase):
class TestControlNetModelSingleFile(SingleFileModelTesterMixin):
model_class = ControlNetModel
ckpt_path = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"
repo_id = "lllyasviel/control_v11p_sd15_canny"

def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)

def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)

def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path)

PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)

def test_single_file_arguments(self):
model_default = self.model_class.from_single_file(self.ckpt_path)

Expand Down
Loading
Loading