From ff0b9a3c4c38cbd8814eddb217973bda335e4b68 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Mon, 9 Jun 2025 20:59:00 -0600 Subject: [PATCH 01/89] working state from hameerabbasi and iddl --- .../pipelines/chroma/pipeline_chroma.py | 1001 +++++++++++++++++ 1 file changed, 1001 insertions(+) create mode 100644 src/diffusers/pipelines/chroma/pipeline_chroma.py diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py new file mode 100644 index 000000000000..50c0c4cedc57 --- /dev/null +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -0,0 +1,1001 @@ +# Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import Any, Callable, Dict, List, Optional, Union + +import numpy as np +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextModel, + CLIPTokenizer, + CLIPVisionModelWithProjection, + T5EncoderModel, + T5TokenizerFast, +) + +from ...image_processor import PipelineImageInput, VaeImageProcessor +from ...loaders import FluxIPAdapterMixin, FluxLoraLoaderMixin, FromSingleFileMixin, TextualInversionLoaderMixin +from ...models import AutoencoderKL, FluxTransformer2DModel +from ...schedulers import FlowMatchEulerDiscreteScheduler +from ...utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from ...utils.torch_utils import randn_tensor +from ..pipeline_utils import DiffusionPipeline +from .pipeline_output import FluxPipelineOutput + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + XLA_AVAILABLE = True +else: + XLA_AVAILABLE = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> import torch + >>> from diffusers import FluxPipeline + + >>> pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16) + >>> pipe.to("cuda") + >>> prompt = "A cat holding a sign that says hello world" + >>> # Depending on the variant being used, the pipeline call will slightly vary. + >>> # Refer to the pipeline documentation for more details. + >>> image = pipe(prompt, num_inference_steps=4, guidance_scale=0.0).images[0] + >>> image.save("flux.png") + ``` +""" + + +def calculate_shift( + image_seq_len, + base_seq_len: int = 256, + max_seq_len: int = 4096, + base_shift: float = 0.5, + max_shift: float = 1.15, +): + m = (max_shift - base_shift) / (max_seq_len - base_seq_len) + b = base_shift - m * base_seq_len + mu = image_seq_len * m + b + return mu + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + r""" + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") + if timesteps is not None: + accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +class FluxPipeline( + DiffusionPipeline, + FluxLoraLoaderMixin, + FromSingleFileMixin, + TextualInversionLoaderMixin, + FluxIPAdapterMixin, +): + r""" + The Flux pipeline for text-to-image generation. + + Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ + + Args: + transformer ([`FluxTransformer2DModel`]): + Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. + scheduler ([`FlowMatchEulerDiscreteScheduler`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + text_encoder_2 ([`T5EncoderModel`]): + [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically + the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). + tokenizer_2 (`T5TokenizerFast`): + Second Tokenizer of class + [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->transformer->vae" + _optional_components = ["image_encoder", "feature_extractor"] + _callback_tensor_inputs = ["latents", "prompt_embeds"] + + def __init__( + self, + scheduler: FlowMatchEulerDiscreteScheduler, + vae: AutoencoderKL, + text_encoder: CLIPTextModel, + tokenizer: CLIPTokenizer, + text_encoder_2: T5EncoderModel, + tokenizer_2: T5TokenizerFast, + transformer: FluxTransformer2DModel, + image_encoder: CLIPVisionModelWithProjection = None, + feature_extractor: CLIPImageProcessor = None, + variant: str = "flux", + ): + super().__init__() + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + text_encoder_2=text_encoder_2, + tokenizer=tokenizer, + tokenizer_2=tokenizer_2, + transformer=transformer, + scheduler=scheduler, + image_encoder=image_encoder, + feature_extractor=feature_extractor, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 + # Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible + # by the patch size. So the vae scale factor is multiplied by the patch size to account for this + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) + self.tokenizer_max_length = ( + self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 + ) + self.default_sample_size = 128 + if variant not in {"flux", "chroma"}: + raise ValueError("`variant` must be `'flux' or `'chroma'`.") + + self.variant = variant + + def _get_chroma_attn_mask(self, length: torch.Tensor, max_sequence_length: int) -> torch.Tensor: + attention_mask = torch.zeros((length.shape[0], max_sequence_length), dtype=torch.bool, device=length.device) + for i, n_tokens in enumerate(length): + n_tokens = torch.max(n_tokens + 1, max_sequence_length) + attention_mask[i, :n_tokens] = True + return attention_mask + + def _get_t5_prompt_embeds( + self, + prompt: Union[str, List[str]] = None, + num_images_per_prompt: int = 1, + max_sequence_length: int = 512, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): + device = device or self._execution_device + dtype = dtype or self.text_encoder.dtype + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2) + + text_inputs = self.tokenizer_2( + prompt, + padding="max_length", + max_length=max_sequence_length, + truncation=True, + return_length=(self.variant == "chroma"), + return_overflowing_tokens=False, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because `max_sequence_length` is set to " + f" {max_sequence_length} tokens: {removed_text}" + ) + + prompt_embeds = self.text_encoder_2( + text_input_ids.to(device), + output_hidden_states=False, + attention_mask=( + self._get_chroma_attn_mask(text_inputs.length, max_sequence_length).to(device) + if self.variant == "chroma" + else None + ), + )[0] + + dtype = self.text_encoder_2.dtype + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + _, seq_len, _ = prompt_embeds.shape + + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) + + return prompt_embeds + + def _get_clip_prompt_embeds( + self, + prompt: Union[str, List[str]], + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + ): + device = device or self._execution_device + + prompt = [prompt] if isinstance(prompt, str) else prompt + batch_size = len(prompt) + + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) + + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer_max_length, + truncation=True, + return_overflowing_tokens=False, + return_length=False, + return_tensors="pt", + ) + + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {self.tokenizer_max_length} tokens: {removed_text}" + ) + prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) + + # Use pooled output of CLIPTextModel + prompt_embeds = prompt_embeds.pooler_output + prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) + + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) + prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) + + return prompt_embeds + + def encode_prompt( + self, + prompt: Union[str, List[str]], + prompt_2: Union[str, List[str]], + device: Optional[torch.device] = None, + num_images_per_prompt: int = 1, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + max_sequence_length: int = 512, + lora_scale: Optional[float] = None, + ): + r""" + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in all text-encoders + device: (`torch.device`): + torch device + num_images_per_prompt (`int`): + number of images that should be generated per prompt + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + """ + device = device or self._execution_device + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if self.text_encoder is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder, lora_scale) + if self.text_encoder_2 is not None and USE_PEFT_BACKEND: + scale_lora_layers(self.text_encoder_2, lora_scale) + + prompt = [prompt] if isinstance(prompt, str) else prompt + + if prompt_embeds is None: + prompt_2 = prompt_2 or prompt + prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 + + # We only use the pooled prompt output from the CLIPTextModel + pooled_prompt_embeds = self._get_clip_prompt_embeds( + prompt=prompt, + device=device, + num_images_per_prompt=num_images_per_prompt, + ) + prompt_embeds = self._get_t5_prompt_embeds( + prompt=prompt_2, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + device=device, + ) + + if self.text_encoder is not None: + if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder, lora_scale) + + if self.text_encoder_2 is not None: + if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(self.text_encoder_2, lora_scale) + + dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype + text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) + + return prompt_embeds, pooled_prompt_embeds, text_ids + + def encode_image(self, image, device, num_images_per_prompt): + dtype = next(self.image_encoder.parameters()).dtype + + if not isinstance(image, torch.Tensor): + image = self.feature_extractor(image, return_tensors="pt").pixel_values + + image = image.to(device=device, dtype=dtype) + image_embeds = self.image_encoder(image).image_embeds + image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) + return image_embeds + + def prepare_ip_adapter_image_embeds( + self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt + ): + image_embeds = [] + if ip_adapter_image_embeds is None: + if not isinstance(ip_adapter_image, list): + ip_adapter_image = [ip_adapter_image] + + if len(ip_adapter_image) != self.transformer.encoder_hid_proj.num_ip_adapters: + raise ValueError( + f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {self.transformer.encoder_hid_proj.num_ip_adapters} IP Adapters." + ) + + for single_ip_adapter_image in ip_adapter_image: + single_image_embeds = self.encode_image(single_ip_adapter_image, device, 1) + image_embeds.append(single_image_embeds[None, :]) + else: + if not isinstance(ip_adapter_image_embeds, list): + ip_adapter_image_embeds = [ip_adapter_image_embeds] + + if len(ip_adapter_image_embeds) != self.transformer.encoder_hid_proj.num_ip_adapters: + raise ValueError( + f"`ip_adapter_image_embeds` must have same length as the number of IP Adapters. Got {len(ip_adapter_image_embeds)} image embeds and {self.transformer.encoder_hid_proj.num_ip_adapters} IP Adapters." + ) + + for single_image_embeds in ip_adapter_image_embeds: + image_embeds.append(single_image_embeds) + + ip_adapter_image_embeds = [] + for single_image_embeds in image_embeds: + single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) + single_image_embeds = single_image_embeds.to(device=device) + ip_adapter_image_embeds.append(single_image_embeds) + + return ip_adapter_image_embeds + + def check_inputs( + self, + prompt, + prompt_2, + height, + width, + negative_prompt=None, + negative_prompt_2=None, + prompt_embeds=None, + negative_prompt_embeds=None, + pooled_prompt_embeds=None, + negative_pooled_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + max_sequence_length=None, + ): + if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: + logger.warning( + f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" + ) + + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt_2 is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): + raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") + elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): + raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + elif negative_prompt_2 is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and pooled_prompt_embeds is None: + raise ValueError( + "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." + ) + if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: + raise ValueError( + "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." + ) + + if max_sequence_length is not None and max_sequence_length > 512: + raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") + + @staticmethod + def _prepare_latent_image_ids(batch_size, height, width, device, dtype): + latent_image_ids = torch.zeros(height, width, 3) + latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None] + latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :] + + latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape + + latent_image_ids = latent_image_ids.reshape( + latent_image_id_height * latent_image_id_width, latent_image_id_channels + ) + + return latent_image_ids.to(device=device, dtype=dtype) + + @staticmethod + def _pack_latents(latents, batch_size, num_channels_latents, height, width): + latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) + latents = latents.permute(0, 2, 4, 1, 3, 5) + latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) + + return latents + + @staticmethod + def _unpack_latents(latents, height, width, vae_scale_factor): + batch_size, num_patches, channels = latents.shape + + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // (vae_scale_factor * 2)) + width = 2 * (int(width) // (vae_scale_factor * 2)) + + latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) + latents = latents.permute(0, 3, 1, 4, 2, 5) + + latents = latents.reshape(batch_size, channels // (2 * 2), height, width) + + return latents + + def enable_vae_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.vae.enable_slicing() + + def disable_vae_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_slicing() + + def enable_vae_tiling(self): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger images. + """ + self.vae.enable_tiling() + + def disable_vae_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to + computing decoding in one step. + """ + self.vae.disable_tiling() + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + dtype, + device, + generator, + latents=None, + ): + # VAE applies 8x compression on images but we must also account for packing which requires + # latent height and width to be divisible by 2. + height = 2 * (int(height) // (self.vae_scale_factor * 2)) + width = 2 * (int(width) // (self.vae_scale_factor * 2)) + + shape = (batch_size, num_channels_latents, height, width) + + if latents is not None: + latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) + return latents.to(device=device, dtype=dtype), latent_image_ids + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) + + latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) + + return latents, latent_image_ids + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def joint_attention_kwargs(self): + return self._joint_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def current_timestep(self): + return self._current_timestep + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt: Union[str, List[str]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + true_cfg_scale: float = 1.0, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 28, + sigmas: Optional[List[float]] = None, + guidance_scale: float = 3.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + ip_adapter_image: Optional[PipelineImageInput] = None, + ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + negative_ip_adapter_image: Optional[PipelineImageInput] = None, + negative_ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is + not greater than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. + true_cfg_scale (`float`, *optional*, defaults to 1.0): + When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 3.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + negative_ip_adapter_image: + (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. + negative_ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): + Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of + IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. If not + provided, embeddings are computed from the `ip_adapter_image` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` + is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated + images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._joint_attention_kwargs = joint_attention_kwargs + self._current_timestep = None + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + lora_scale = ( + self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None + ) + has_neg_prompt = negative_prompt is not None or ( + negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None + ) + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt + ( + prompt_embeds, + pooled_prompt_embeds, + text_ids, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + if do_true_cfg: + ( + negative_prompt_embeds, + negative_pooled_prompt_embeds, + negative_text_ids, + ) = self.encode_prompt( + prompt=negative_prompt, + prompt_2=negative_prompt_2, + prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=negative_pooled_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + + # 4. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + latents, latent_image_ids = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 5. Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas + image_seq_len = latents.shape[1] + mu = calculate_shift( + image_seq_len, + self.scheduler.config.get("base_image_seq_len", 256), + self.scheduler.config.get("max_image_seq_len", 4096), + self.scheduler.config.get("base_shift", 0.5), + self.scheduler.config.get("max_shift", 1.15), + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + sigmas=sigmas, + mu=mu, + ) + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # handle guidance + if self.transformer.config.guidance_embeds: + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) + guidance = guidance.expand(latents.shape[0]) + else: + guidance = None + + if (ip_adapter_image is not None or ip_adapter_image_embeds is not None) and ( + negative_ip_adapter_image is None and negative_ip_adapter_image_embeds is None + ): + negative_ip_adapter_image = np.zeros((width, height, 3), dtype=np.uint8) + negative_ip_adapter_image = [negative_ip_adapter_image] * self.transformer.encoder_hid_proj.num_ip_adapters + + elif (ip_adapter_image is None and ip_adapter_image_embeds is None) and ( + negative_ip_adapter_image is not None or negative_ip_adapter_image_embeds is not None + ): + ip_adapter_image = np.zeros((width, height, 3), dtype=np.uint8) + ip_adapter_image = [ip_adapter_image] * self.transformer.encoder_hid_proj.num_ip_adapters + + if self.joint_attention_kwargs is None: + self._joint_attention_kwargs = {} + + image_embeds = None + negative_image_embeds = None + if ip_adapter_image is not None or ip_adapter_image_embeds is not None: + image_embeds = self.prepare_ip_adapter_image_embeds( + ip_adapter_image, + ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + ) + if negative_ip_adapter_image is not None or negative_ip_adapter_image_embeds is not None: + negative_image_embeds = self.prepare_ip_adapter_image_embeds( + negative_ip_adapter_image, + negative_ip_adapter_image_embeds, + device, + batch_size * num_images_per_prompt, + ) + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + self._current_timestep = t + if image_embeds is not None: + self._joint_attention_kwargs["ip_adapter_image_embeds"] = image_embeds + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latents.shape[0]).to(latents.dtype) + + noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=guidance, + pooled_projections=pooled_prompt_embeds, + encoder_hidden_states=prompt_embeds, + txt_ids=text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + + if do_true_cfg: + if negative_image_embeds is not None: + self._joint_attention_kwargs["ip_adapter_image_embeds"] = negative_image_embeds + neg_noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=guidance, + pooled_projections=negative_pooled_prompt_embeds, + encoder_hidden_states=negative_prompt_embeds, + txt_ids=negative_text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + noise_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype: + if torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if XLA_AVAILABLE: + xm.mark_step() + + self._current_timestep = None + + if output_type == "latent": + image = latents + else: + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return FluxPipelineOutput(images=image) From 3c2865c5345f0d1ae506050bd559bdbfeead5e94 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Mon, 9 Jun 2025 21:02:12 -0600 Subject: [PATCH 02/89] working state form hameerabbasi and iddl (transformer) --- .../models/transformers/transformer_chroma.py | 636 ++++++++++++++++++ 1 file changed, 636 insertions(+) create mode 100644 src/diffusers/models/transformers/transformer_chroma.py diff --git a/src/diffusers/models/transformers/transformer_chroma.py b/src/diffusers/models/transformers/transformer_chroma.py new file mode 100644 index 000000000000..c542bcaaccf6 --- /dev/null +++ b/src/diffusers/models/transformers/transformer_chroma.py @@ -0,0 +1,636 @@ +# Copyright 2024 Black Forest Labs, The HuggingFace Team and The InstantX Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Any, Dict, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn + +from ...configuration_utils import ConfigMixin, register_to_config +from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers +from ...utils.import_utils import is_torch_npu_available +from ...utils.torch_utils import maybe_allow_in_graph +from ..attention import FeedForward +from ..attention_processor import ( + Attention, + AttentionProcessor, + FluxAttnProcessor2_0, + FluxAttnProcessor2_0_NPU, + FusedFluxAttnProcessor2_0, +) +from ..cache_utils import CacheMixin +from ..embeddings import ( + CombinedTimestepGuidanceTextProjEmbeddings, + CombinedTimestepTextProjChromaEmbeddings, + CombinedTimestepTextProjEmbeddings, + ChromaApproximator, + FluxPosEmbed, +) +from ..modeling_outputs import Transformer2DModelOutput +from ..modeling_utils import ModelMixin +from ..normalization import ( + AdaLayerNormContinuous, + AdaLayerNormContinuousPruned, + AdaLayerNormZero, + AdaLayerNormZeroPruned, + AdaLayerNormZeroSingle, + AdaLayerNormZeroSinglePruned, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +INVALID_VARIANT_ERRMSG = "`variant` must be `'flux' or `'chroma'`." + + +@maybe_allow_in_graph +class FluxSingleTransformerBlock(nn.Module): + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + mlp_ratio: float = 4.0, + variant: str = "flux", + ): + super().__init__() + self.mlp_hidden_dim = int(dim * mlp_ratio) + + if variant == "flux": + self.norm = AdaLayerNormZeroSingle(dim) + elif variant == "chroma": + self.norm = AdaLayerNormZeroSinglePruned(dim) + else: + raise ValueError(INVALID_VARIANT_ERRMSG) + + self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim) + self.act_mlp = nn.GELU(approximate="tanh") + self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) + + if is_torch_npu_available(): + deprecation_message = ( + "Defaulting to FluxAttnProcessor2_0_NPU for NPU devices will be removed. Attention processors " + "should be set explicitly using the `set_attn_processor` method." + ) + deprecate("npu_processor", "0.34.0", deprecation_message) + processor = FluxAttnProcessor2_0_NPU() + else: + processor = FluxAttnProcessor2_0() + + self.attn = Attention( + query_dim=dim, + cross_attention_dim=None, + dim_head=attention_head_dim, + heads=num_attention_heads, + out_dim=dim, + bias=True, + processor=processor, + qk_norm="rms_norm", + eps=1e-6, + pre_only=True, + ) + + def forward( + self, + hidden_states: torch.Tensor, + temb: torch.Tensor, + image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + ) -> torch.Tensor: + residual = hidden_states + norm_hidden_states, gate = self.norm(hidden_states, emb=temb) + mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) + joint_attention_kwargs = joint_attention_kwargs or {} + attn_output = self.attn( + hidden_states=norm_hidden_states, + image_rotary_emb=image_rotary_emb, + **joint_attention_kwargs, + ) + + hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) + gate = gate.unsqueeze(1) + hidden_states = gate * self.proj_out(hidden_states) + hidden_states = residual + hidden_states + if hidden_states.dtype == torch.float16: + hidden_states = hidden_states.clip(-65504, 65504) + + return hidden_states + + +@maybe_allow_in_graph +class FluxTransformerBlock(nn.Module): + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + qk_norm: str = "rms_norm", + eps: float = 1e-6, + variant: str = "flux", + ): + super().__init__() + + if variant == "flux": + self.norm1 = AdaLayerNormZero(dim) + self.norm1_context = AdaLayerNormZero(dim) + elif variant == "chroma": + self.norm1 = AdaLayerNormZeroPruned(dim) + self.norm1_context = AdaLayerNormZeroPruned(dim) + else: + raise ValueError(INVALID_VARIANT_ERRMSG) + + self.attn = Attention( + query_dim=dim, + cross_attention_dim=None, + added_kv_proj_dim=dim, + dim_head=attention_head_dim, + heads=num_attention_heads, + out_dim=dim, + context_pre_only=False, + bias=True, + processor=FluxAttnProcessor2_0(), + qk_norm=qk_norm, + eps=eps, + ) + + self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) + self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") + + self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) + self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor, + temb: torch.Tensor, + image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + temb_img, temb_txt = temb[:, :6], temb[:, 6:] + norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb_img) + + norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( + encoder_hidden_states, emb=temb_txt + ) + joint_attention_kwargs = joint_attention_kwargs or {} + # Attention. + attention_outputs = self.attn( + hidden_states=norm_hidden_states, + encoder_hidden_states=norm_encoder_hidden_states, + image_rotary_emb=image_rotary_emb, + **joint_attention_kwargs, + ) + + if len(attention_outputs) == 2: + attn_output, context_attn_output = attention_outputs + elif len(attention_outputs) == 3: + attn_output, context_attn_output, ip_attn_output = attention_outputs + + # Process attention outputs for the `hidden_states`. + attn_output = gate_msa.unsqueeze(1) * attn_output + hidden_states = hidden_states + attn_output + + norm_hidden_states = self.norm2(hidden_states) + norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] + + ff_output = self.ff(norm_hidden_states) + ff_output = gate_mlp.unsqueeze(1) * ff_output + + hidden_states = hidden_states + ff_output + if len(attention_outputs) == 3: + hidden_states = hidden_states + ip_attn_output + + # Process attention outputs for the `encoder_hidden_states`. + + context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output + encoder_hidden_states = encoder_hidden_states + context_attn_output + + norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) + norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] + + context_ff_output = self.ff_context(norm_encoder_hidden_states) + encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output + if encoder_hidden_states.dtype == torch.float16: + encoder_hidden_states = encoder_hidden_states.clip(-65504, 65504) + + return encoder_hidden_states, hidden_states + + +class FluxTransformer2DModel( + ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, FluxTransformer2DLoadersMixin, CacheMixin +): + """ + The Transformer model introduced in Flux. + + Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ + + Args: + patch_size (`int`, defaults to `1`): + Patch size to turn the input data into small patches. + in_channels (`int`, defaults to `64`): + The number of channels in the input. + out_channels (`int`, *optional*, defaults to `None`): + The number of channels in the output. If not specified, it defaults to `in_channels`. + num_layers (`int`, defaults to `19`): + The number of layers of dual stream DiT blocks to use. + num_single_layers (`int`, defaults to `38`): + The number of layers of single stream DiT blocks to use. + attention_head_dim (`int`, defaults to `128`): + The number of dimensions to use for each attention head. + num_attention_heads (`int`, defaults to `24`): + The number of attention heads to use. + joint_attention_dim (`int`, defaults to `4096`): + The number of dimensions to use for the joint attention (embedding/channel dimension of + `encoder_hidden_states`). + pooled_projection_dim (`int`, defaults to `768`): + The number of dimensions to use for the pooled projection. + guidance_embeds (`bool`, defaults to `False`): + Whether to use guidance embeddings for guidance-distilled variant of the model. + axes_dims_rope (`Tuple[int]`, defaults to `(16, 56, 56)`): + The dimensions to use for the rotary positional embeddings. + """ + + _supports_gradient_checkpointing = True + _no_split_modules = ["FluxTransformerBlock", "FluxSingleTransformerBlock"] + _skip_layerwise_casting_patterns = ["pos_embed", "norm"] + + @register_to_config + def __init__( + self, + patch_size: int = 1, + in_channels: int = 64, + out_channels: Optional[int] = None, + num_layers: int = 19, + num_single_layers: int = 38, + attention_head_dim: int = 128, + num_attention_heads: int = 24, + joint_attention_dim: int = 4096, + pooled_projection_dim: int = 768, + guidance_embeds: bool = False, + axes_dims_rope: Tuple[int, ...] = (16, 56, 56), + variant: str = "flux", + approximator_in_factor: int = 16, + approximator_hidden_dim: int = 5120, + approximator_layers: int = 5, + ): + super().__init__() + self.out_channels = out_channels or in_channels + self.inner_dim = num_attention_heads * attention_head_dim + + self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope) + + if variant == "flux": + text_time_guidance_cls = ( + CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings + ) + self.time_text_embed = text_time_guidance_cls( + embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim + ) + elif variant == "chroma": + self.time_text_embed = CombinedTimestepTextProjChromaEmbeddings( + factor=approximator_in_factor, + hidden_dim=approximator_hidden_dim, + out_dim=3 * num_single_layers + 2 * 6 * num_layers + 2, + embedding_dim=self.inner_dim, + n_layers=approximator_layers, + ) + self.distilled_guidance_layer = ChromaApproximator(in_dim=64, out_dim=3072, hidden_dim=5120, n_layers=5) + else: + raise ValueError(INVALID_VARIANT_ERRMSG) + + self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim) + self.x_embedder = nn.Linear(in_channels, self.inner_dim) + + self.transformer_blocks = nn.ModuleList( + [ + FluxTransformerBlock( + dim=self.inner_dim, + num_attention_heads=num_attention_heads, + attention_head_dim=attention_head_dim, + variant=variant, + ) + for _ in range(num_layers) + ] + ) + + self.single_transformer_blocks = nn.ModuleList( + [ + FluxSingleTransformerBlock( + dim=self.inner_dim, + num_attention_heads=num_attention_heads, + attention_head_dim=attention_head_dim, + variant=variant, + ) + for _ in range(num_single_layers) + ] + ) + + norm_out_cls = AdaLayerNormContinuous if variant != "chroma" else AdaLayerNormContinuousPruned + self.norm_out = norm_out_cls(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) + self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) + + self.gradient_checkpointing = False + + @property + def is_chroma(self) -> bool: + return isinstance(self.time_text_embed, CombinedTimestepTextProjChromaEmbeddings) + + @property + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor() + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor) + else: + module.set_processor(processor.pop(f"{name}.processor")) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedFluxAttnProcessor2_0 + def fuse_qkv_projections(self): + """ + Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) + are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is 🧪 experimental. + + + """ + self.original_attn_processors = None + + for _, attn_processor in self.attn_processors.items(): + if "Added" in str(attn_processor.__class__.__name__): + raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") + + self.original_attn_processors = self.attn_processors + + for module in self.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + + self.set_attn_processor(FusedFluxAttnProcessor2_0()) + + # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections + def unfuse_qkv_projections(self): + """Disables the fused QKV projection if enabled. + + + + This API is 🧪 experimental. + + + + """ + if self.original_attn_processors is not None: + self.set_attn_processor(self.original_attn_processors) + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor = None, + pooled_projections: torch.Tensor = None, + timestep: torch.LongTensor = None, + img_ids: torch.Tensor = None, + txt_ids: torch.Tensor = None, + guidance: torch.Tensor = None, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_block_samples=None, + controlnet_single_block_samples=None, + return_dict: bool = True, + controlnet_blocks_repeat: bool = False, + ) -> Union[torch.Tensor, Transformer2DModelOutput]: + """ + The [`FluxTransformer2DModel`] forward method. + + Args: + hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`): + Input `hidden_states`. + encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`): + Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. + pooled_projections (`torch.Tensor` of shape `(batch_size, projection_dim)`): Embeddings projected + from the embeddings of input conditions. + timestep ( `torch.LongTensor`): + Used to indicate denoising step. + block_controlnet_hidden_states: (`list` of `torch.Tensor`): + A list of tensors that if specified are added to the residuals of transformer blocks. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain + tuple. + + Returns: + If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a + `tuple` where the first element is the sample tensor. + """ + if joint_attention_kwargs is not None: + joint_attention_kwargs = joint_attention_kwargs.copy() + lora_scale = joint_attention_kwargs.pop("scale", 1.0) + else: + lora_scale = 1.0 + + if USE_PEFT_BACKEND: + # weight the lora layers by setting `lora_scale` for each PEFT layer + scale_lora_layers(self, lora_scale) + else: + if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None: + logger.warning( + "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." + ) + + is_chroma = self.is_chroma + hidden_states = self.x_embedder(hidden_states) + + timestep = timestep.to(hidden_states.dtype) * 1000 + if guidance is not None: + guidance = guidance.to(hidden_states.dtype) * 1000 + + if not is_chroma: + temb = ( + self.time_text_embed(timestep, pooled_projections) + if guidance is None + else self.time_text_embed(timestep, guidance, pooled_projections) + ) + else: + input_vec = self.time_text_embed(timestep, guidance, pooled_projections) + pooled_temb = self.distilled_guidance_layer(input_vec) + + encoder_hidden_states = self.context_embedder(encoder_hidden_states) + + if txt_ids.ndim == 3: + logger.warning( + "Passing `txt_ids` 3d torch.Tensor is deprecated." + "Please remove the batch dimension and pass it as a 2d torch Tensor" + ) + txt_ids = txt_ids[0] + if img_ids.ndim == 3: + logger.warning( + "Passing `img_ids` 3d torch.Tensor is deprecated." + "Please remove the batch dimension and pass it as a 2d torch Tensor" + ) + img_ids = img_ids[0] + + ids = torch.cat((txt_ids, img_ids), dim=0) + image_rotary_emb = self.pos_embed(ids) + + if joint_attention_kwargs is not None and "ip_adapter_image_embeds" in joint_attention_kwargs: + ip_adapter_image_embeds = joint_attention_kwargs.pop("ip_adapter_image_embeds") + ip_hidden_states = self.encoder_hid_proj(ip_adapter_image_embeds) + joint_attention_kwargs.update({"ip_hidden_states": ip_hidden_states}) + + for index_block, block in enumerate(self.transformer_blocks): + if is_chroma: + img_offset = 3 * len(self.single_transformer_blocks) + txt_offset = img_offset + 6 * len(self.transformer_blocks) + img_modulation = img_offset + 6 * index_block + text_modulation = txt_offset + 6 * index_block + temb = torch.cat( + ( + pooled_temb[:, img_modulation : img_modulation + 6], + pooled_temb[:, text_modulation : text_modulation + 6], + ), + dim=1, + ) + if torch.is_grad_enabled() and self.gradient_checkpointing: + encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( + block, + hidden_states, + encoder_hidden_states, + temb, + image_rotary_emb, + ) + + else: + encoder_hidden_states, hidden_states = block( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + temb=temb, + image_rotary_emb=image_rotary_emb, + joint_attention_kwargs=joint_attention_kwargs, + ) + + # controlnet residual + if controlnet_block_samples is not None: + interval_control = len(self.transformer_blocks) / len(controlnet_block_samples) + interval_control = int(np.ceil(interval_control)) + # For Xlabs ControlNet. + if controlnet_blocks_repeat: + hidden_states = ( + hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)] + ) + else: + hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control] + hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) + + for index_block, block in enumerate(self.single_transformer_blocks): + if is_chroma: + start_idx = 3 * index_block + temb = pooled_temb[:, start_idx : start_idx + 3] + if torch.is_grad_enabled() and self.gradient_checkpointing: + hidden_states = self._gradient_checkpointing_func( + block, + hidden_states, + temb, + image_rotary_emb, + ) + + else: + hidden_states = block( + hidden_states=hidden_states, + temb=temb, + image_rotary_emb=image_rotary_emb, + joint_attention_kwargs=joint_attention_kwargs, + ) + + # controlnet residual + if controlnet_single_block_samples is not None: + interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples) + interval_control = int(np.ceil(interval_control)) + hidden_states[:, encoder_hidden_states.shape[1] :, ...] = ( + hidden_states[:, encoder_hidden_states.shape[1] :, ...] + + controlnet_single_block_samples[index_block // interval_control] + ) + + hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...] + + if is_chroma: + temb = pooled_temb[:, -2:] + hidden_states = self.norm_out(hidden_states, temb) + output = self.proj_out(hidden_states) + + if USE_PEFT_BACKEND: + # remove `lora_scale` from each PEFT layer + unscale_lora_layers(self, lora_scale) + + if not return_dict: + return (output,) + + return Transformer2DModelOutput(sample=output) From e271af9495435016e2af1230e66ea242e624c720 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Mon, 9 Jun 2025 21:03:10 -0600 Subject: [PATCH 03/89] working state (normalization) --- src/diffusers/models/normalization.py | 119 +++++++++++++++++++++++++- 1 file changed, 116 insertions(+), 3 deletions(-) diff --git a/src/diffusers/models/normalization.py b/src/diffusers/models/normalization.py index 4a512c5cb166..f2b71bb6888e 100644 --- a/src/diffusers/models/normalization.py +++ b/src/diffusers/models/normalization.py @@ -171,6 +171,46 @@ def forward( return x, gate_msa, shift_mlp, scale_mlp, gate_mlp +class AdaLayerNormZeroPruned(nn.Module): + r""" + Norm layer adaptive layer norm zero (adaLN-Zero). + + Parameters: + embedding_dim (`int`): The size of each embedding vector. + num_embeddings (`int`): The size of the embeddings dictionary. + """ + + def __init__(self, embedding_dim: int, num_embeddings: Optional[int] = None, norm_type="layer_norm", bias=True): + super().__init__() + if num_embeddings is not None: + self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim) + else: + self.emb = None + + if norm_type == "layer_norm": + self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6) + elif norm_type == "fp32_layer_norm": + self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=False, bias=False) + else: + raise ValueError( + f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'." + ) + + def forward( + self, + x: torch.Tensor, + timestep: Optional[torch.Tensor] = None, + class_labels: Optional[torch.LongTensor] = None, + hidden_dtype: Optional[torch.dtype] = None, + emb: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + if self.emb is not None: + emb = self.emb(timestep, class_labels, hidden_dtype=hidden_dtype) + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.squeeze(0).chunk(6, dim=0) + x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] + return x, gate_msa, shift_mlp, scale_mlp, gate_mlp + + class AdaLayerNormZeroSingle(nn.Module): r""" Norm layer adaptive layer norm zero (adaLN-Zero). @@ -203,6 +243,35 @@ def forward( return x, gate_msa +class AdaLayerNormZeroSinglePruned(nn.Module): + r""" + Norm layer adaptive layer norm zero (adaLN-Zero). + + Parameters: + embedding_dim (`int`): The size of each embedding vector. + num_embeddings (`int`): The size of the embeddings dictionary. + """ + + def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True): + super().__init__() + + if norm_type == "layer_norm": + self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6) + else: + raise ValueError( + f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'." + ) + + def forward( + self, + x: torch.Tensor, + emb: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + shift_msa, scale_msa, gate_msa = emb.squeeze(0).chunk(3, dim=0) + x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] + return x, gate_msa + + class LuminaRMSNormZero(nn.Module): """ Norm layer adaptive RMS normalization zero. @@ -237,7 +306,7 @@ class AdaLayerNormSingle(nn.Module): r""" Norm layer adaptive layer norm single (adaLN-single). - As proposed in PixArt-Alpha (see: https://huggingface.co/papers/2310.00426; Section 2.3). + As proposed in PixArt-Alpha (see: https://arxiv.org/abs/2310.00426; Section 2.3). Parameters: embedding_dim (`int`): The size of each embedding vector. @@ -305,6 +374,50 @@ def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: return x +class AdaLayerNormContinuousPruned(nn.Module): + r""" + Adaptive normalization layer with a norm layer (layer_norm or rms_norm). + + Args: + embedding_dim (`int`): Embedding dimension to use during projection. + conditioning_embedding_dim (`int`): Dimension of the input condition. + elementwise_affine (`bool`, defaults to `True`): + Boolean flag to denote if affine transformation should be applied. + eps (`float`, defaults to 1e-5): Epsilon factor. + bias (`bias`, defaults to `True`): Boolean flag to denote if bias should be use. + norm_type (`str`, defaults to `"layer_norm"`): + Normalization layer to use. Values supported: "layer_norm", "rms_norm". + """ + + def __init__( + self, + embedding_dim: int, + conditioning_embedding_dim: int, + # NOTE: It is a bit weird that the norm layer can be configured to have scale and shift parameters + # because the output is immediately scaled and shifted by the projected conditioning embeddings. + # Note that AdaLayerNorm does not let the norm layer have scale and shift parameters. + # However, this is how it was implemented in the original code, and it's rather likely you should + # set `elementwise_affine` to False. + elementwise_affine=True, + eps=1e-5, + bias=True, + norm_type="layer_norm", + ): + super().__init__() + if norm_type == "layer_norm": + self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias) + elif norm_type == "rms_norm": + self.norm = RMSNorm(embedding_dim, eps, elementwise_affine) + else: + raise ValueError(f"unknown norm_type {norm_type}") + + def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: + # convert back to the original dtype in case `conditioning_embedding`` is upcasted to float32 (needed for hunyuanDiT) + shift, scale = torch.chunk(emb.squeeze(0).to(x.dtype), 2, dim=0) + x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] + return x + + class AdaLayerNormContinuous(nn.Module): r""" Adaptive normalization layer with a norm layer (layer_norm or rms_norm). @@ -510,7 +623,7 @@ def forward(self, input): class RMSNorm(nn.Module): r""" - RMS Norm as introduced in https://huggingface.co/papers/1910.07467 by Zhang et al. + RMS Norm as introduced in https://arxiv.org/abs/1910.07467 by Zhang et al. Args: dim (`int`): Number of dimensions to use for `weights`. Only effective when `elementwise_affine` is True. @@ -600,7 +713,7 @@ def forward(self, hidden_states): class GlobalResponseNorm(nn.Module): r""" - Global response normalization as introduced in ConvNeXt-v2 (https://huggingface.co/papers/2301.00808). + Global response normalization as introduced in ConvNeXt-v2 (https://arxiv.org/abs/2301.00808). Args: dim (`int`): Number of dimensions to use for the `gamma` and `beta`. From 15f2bd5c3971f94475eacc01c3ac5ac802e32461 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Mon, 9 Jun 2025 21:05:59 -0600 Subject: [PATCH 04/89] working state (embeddings) --- src/diffusers/models/embeddings.py | 54 ++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index c25e9997e3fb..8aa2ea5841e9 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -31,7 +31,7 @@ def get_timestep_embedding( downscale_freq_shift: float = 1, scale: float = 1, max_period: int = 10000, -): +) -> torch.Tensor: """ This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. @@ -1327,7 +1327,7 @@ def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shif self.downscale_freq_shift = downscale_freq_shift self.scale = scale - def forward(self, timesteps): + def forward(self, timesteps: torch.Tensor) -> torch.Tensor: t_emb = get_timestep_embedding( timesteps, self.num_channels, @@ -1401,7 +1401,7 @@ class ImagePositionalEmbeddings(nn.Module): Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the height and width of the latent space. - For more details, see figure 10 of the dall-e paper: https://huggingface.co/papers/2102.12092 + For more details, see figure 10 of the dall-e paper: https://arxiv.org/abs/2102.12092 For VQ-diffusion: @@ -1637,6 +1637,35 @@ def forward(self, timestep, guidance, pooled_projection): return conditioning +class CombinedTimestepTextProjChromaEmbeddings(nn.Module): + def __init__(self, factor: int, hidden_dim: int, out_dim: int, n_layers: int, embedding_dim: int): + super().__init__() + + self.time_proj = Timesteps(num_channels=factor, flip_sin_to_cos=True, downscale_freq_shift=0) + self.guidance_proj = Timesteps(num_channels=factor, flip_sin_to_cos=True, downscale_freq_shift=0) + + self.register_buffer( + "mod_proj", + get_timestep_embedding(torch.arange(out_dim)*1000, 2 * factor, flip_sin_to_cos=True, downscale_freq_shift=0, ), + persistent=False, + ) + + def forward( + self, timestep: torch.Tensor, guidance: Optional[torch.Tensor], pooled_projections: torch.Tensor + ) -> torch.Tensor: + mod_index_length = self.mod_proj.shape[0] + timesteps_proj = self.time_proj(timestep).to(dtype=timestep.dtype) + guidance_proj = self.guidance_proj(torch.tensor([0])).to(dtype=timestep.dtype, device=timestep.device) + + mod_proj = self.mod_proj.to(dtype=timesteps_proj.dtype, device=timesteps_proj.device) + timestep_guidance = ( + torch.cat([timesteps_proj, guidance_proj], dim=1).unsqueeze(1).repeat(1, mod_index_length, 1) + ) + input_vec = torch.cat([timestep_guidance, mod_proj.unsqueeze(0)], dim=-1) + + return input_vec + + class CogView3CombinedTimestepSizeEmbeddings(nn.Module): def __init__(self, embedding_dim: int, condition_dim: int, pooled_projection_dim: int, timesteps_dim: int = 256): super().__init__() @@ -2230,6 +2259,25 @@ def forward(self, caption): return hidden_states +class ChromaApproximator(nn.Module): + def __init__(self, in_dim: int, out_dim: int, hidden_dim: int, n_layers: int = 5): + super().__init__() + self.in_proj = nn.Linear(in_dim, hidden_dim, bias=True) + self.layers = nn.ModuleList( + [PixArtAlphaTextProjection(hidden_dim, hidden_dim, act_fn="silu") for _ in range(n_layers)] + ) + self.norms = nn.ModuleList([nn.RMSNorm(hidden_dim) for _ in range(n_layers)]) + self.out_proj = nn.Linear(hidden_dim, out_dim) + + def forward(self, x): + x = self.in_proj(x) + + for layer, norms in zip(self.layers, self.norms): + x = x + layer(norms(x)) + + return self.out_proj(x) + + class IPAdapterPlusImageProjectionBlock(nn.Module): def __init__( self, From 32e6a006cfe486ba774acf2920ffcf5382ed2449 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Mon, 9 Jun 2025 21:13:32 -0600 Subject: [PATCH 05/89] add chroma loader --- src/diffusers/loaders/single_file_utils.py | 166 +++++++++++++++++++++ 1 file changed, 166 insertions(+) diff --git a/src/diffusers/loaders/single_file_utils.py b/src/diffusers/loaders/single_file_utils.py index 0f762b949d47..aace8fc7bffb 100644 --- a/src/diffusers/loaders/single_file_utils.py +++ b/src/diffusers/loaders/single_file_utils.py @@ -3310,3 +3310,169 @@ def convert_hidream_transformer_to_diffusers(checkpoint, **kwargs): checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) return checkpoint + +def convert_chroma_transformer_checkpoint_to_diffusers(checkpoint, **kwargs): + converted_state_dict = {} + keys = list(checkpoint.keys()) + + for k in keys: + if "model.diffusion_model." in k: + checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k) + + num_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "double_blocks." in k))[-1] + 1 # noqa: C401 + num_single_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "single_blocks." in k))[-1] + 1 # noqa: C401 + num_guidance_layers = list(set(int(k.split(".", 3)[2]) for k in checkpoint if "distilled_guidance_layer.layers." in k))[-1] + 1 # noqa: C401 + mlp_ratio = 4.0 + inner_dim = 3072 + + # in SD3 original implementation of AdaLayerNormContinuous, it split linear projection output into shift, scale; + # while in diffusers it split into scale, shift. Here we swap the linear projection weights in order to be able to use diffusers implementation + def swap_scale_shift(weight): + shift, scale = weight.chunk(2, dim=0) + new_weight = torch.cat([scale, shift], dim=0) + return new_weight + + # guidance + converted_state_dict["time_text_embed.embedder.in_proj.bias"] = checkpoint.pop( + "distilled_guidance_layer.in_proj.bias" + ) + converted_state_dict["time_text_embed.embedder.in_proj.weight"] = checkpoint.pop( + "distilled_guidance_layer.in_proj.weight" + ) + converted_state_dict["time_text_embed.embedder.out_proj.bias"] = checkpoint.pop( + "distilled_guidance_layer.out_proj.bias" + ) + converted_state_dict["time_text_embed.embedder.out_proj.weight"] = checkpoint.pop( + "distilled_guidance_layer.out_proj.weight" + ) + for i in range(num_guidance_layers): + block_prefix = f"time_text_embed.embedder.layers.{i}." + converted_state_dict[f"{block_prefix}linear_1.bias"] = checkpoint.pop( + f"distilled_guidance_layer.layers.{i}.in_layer.bias" + ) + converted_state_dict[f"{block_prefix}linear_1.weight"] = checkpoint.pop( + f"distilled_guidance_layer.layers.{i}.in_layer.weight" + ) + converted_state_dict[f"{block_prefix}linear_2.bias"] = checkpoint.pop( + f"distilled_guidance_layer.layers.{i}.out_layer.bias" + ) + converted_state_dict[f"{block_prefix}linear_2.weight"] = checkpoint.pop( + f"distilled_guidance_layer.layers.{i}.out_layer.weight" + ) + converted_state_dict[f"time_text_embed.embedder.norms.{i}.weight"] = checkpoint.pop( + f"distilled_guidance_layer.norms.{i}.scale" + ) + + # context_embedder + converted_state_dict["context_embedder.weight"] = checkpoint.pop("txt_in.weight") + converted_state_dict["context_embedder.bias"] = checkpoint.pop("txt_in.bias") + + # x_embedder + converted_state_dict["x_embedder.weight"] = checkpoint.pop("img_in.weight") + converted_state_dict["x_embedder.bias"] = checkpoint.pop("img_in.bias") + + # double transformer blocks + for i in range(num_layers): + block_prefix = f"transformer_blocks.{i}." + # Q, K, V + sample_q, sample_k, sample_v = torch.chunk(checkpoint.pop(f"double_blocks.{i}.img_attn.qkv.weight"), 3, dim=0) + context_q, context_k, context_v = torch.chunk( + checkpoint.pop(f"double_blocks.{i}.txt_attn.qkv.weight"), 3, dim=0 + ) + sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( + checkpoint.pop(f"double_blocks.{i}.img_attn.qkv.bias"), 3, dim=0 + ) + context_q_bias, context_k_bias, context_v_bias = torch.chunk( + checkpoint.pop(f"double_blocks.{i}.txt_attn.qkv.bias"), 3, dim=0 + ) + converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([sample_q]) + converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([sample_q_bias]) + converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([sample_k]) + converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([sample_k_bias]) + converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([sample_v]) + converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([sample_v_bias]) + converted_state_dict[f"{block_prefix}attn.add_q_proj.weight"] = torch.cat([context_q]) + converted_state_dict[f"{block_prefix}attn.add_q_proj.bias"] = torch.cat([context_q_bias]) + converted_state_dict[f"{block_prefix}attn.add_k_proj.weight"] = torch.cat([context_k]) + converted_state_dict[f"{block_prefix}attn.add_k_proj.bias"] = torch.cat([context_k_bias]) + converted_state_dict[f"{block_prefix}attn.add_v_proj.weight"] = torch.cat([context_v]) + converted_state_dict[f"{block_prefix}attn.add_v_proj.bias"] = torch.cat([context_v_bias]) + # qk_norm + converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = checkpoint.pop( + f"double_blocks.{i}.img_attn.norm.query_norm.scale" + ) + converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = checkpoint.pop( + f"double_blocks.{i}.img_attn.norm.key_norm.scale" + ) + converted_state_dict[f"{block_prefix}attn.norm_added_q.weight"] = checkpoint.pop( + f"double_blocks.{i}.txt_attn.norm.query_norm.scale" + ) + converted_state_dict[f"{block_prefix}attn.norm_added_k.weight"] = checkpoint.pop( + f"double_blocks.{i}.txt_attn.norm.key_norm.scale" + ) + # ff img_mlp + converted_state_dict[f"{block_prefix}ff.net.0.proj.weight"] = checkpoint.pop( + f"double_blocks.{i}.img_mlp.0.weight" + ) + converted_state_dict[f"{block_prefix}ff.net.0.proj.bias"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.0.bias") + converted_state_dict[f"{block_prefix}ff.net.2.weight"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.2.weight") + converted_state_dict[f"{block_prefix}ff.net.2.bias"] = checkpoint.pop(f"double_blocks.{i}.img_mlp.2.bias") + converted_state_dict[f"{block_prefix}ff_context.net.0.proj.weight"] = checkpoint.pop( + f"double_blocks.{i}.txt_mlp.0.weight" + ) + converted_state_dict[f"{block_prefix}ff_context.net.0.proj.bias"] = checkpoint.pop( + f"double_blocks.{i}.txt_mlp.0.bias" + ) + converted_state_dict[f"{block_prefix}ff_context.net.2.weight"] = checkpoint.pop( + f"double_blocks.{i}.txt_mlp.2.weight" + ) + converted_state_dict[f"{block_prefix}ff_context.net.2.bias"] = checkpoint.pop( + f"double_blocks.{i}.txt_mlp.2.bias" + ) + # output projections. + converted_state_dict[f"{block_prefix}attn.to_out.0.weight"] = checkpoint.pop( + f"double_blocks.{i}.img_attn.proj.weight" + ) + converted_state_dict[f"{block_prefix}attn.to_out.0.bias"] = checkpoint.pop( + f"double_blocks.{i}.img_attn.proj.bias" + ) + converted_state_dict[f"{block_prefix}attn.to_add_out.weight"] = checkpoint.pop( + f"double_blocks.{i}.txt_attn.proj.weight" + ) + converted_state_dict[f"{block_prefix}attn.to_add_out.bias"] = checkpoint.pop( + f"double_blocks.{i}.txt_attn.proj.bias" + ) + + # single transformer blocks + for i in range(num_single_layers): + block_prefix = f"single_transformer_blocks.{i}." + # Q, K, V, mlp + mlp_hidden_dim = int(inner_dim * mlp_ratio) + split_size = (inner_dim, inner_dim, inner_dim, mlp_hidden_dim) + q, k, v, mlp = torch.split(checkpoint.pop(f"single_blocks.{i}.linear1.weight"), split_size, dim=0) + q_bias, k_bias, v_bias, mlp_bias = torch.split( + checkpoint.pop(f"single_blocks.{i}.linear1.bias"), split_size, dim=0 + ) + converted_state_dict[f"{block_prefix}attn.to_q.weight"] = torch.cat([q]) + converted_state_dict[f"{block_prefix}attn.to_q.bias"] = torch.cat([q_bias]) + converted_state_dict[f"{block_prefix}attn.to_k.weight"] = torch.cat([k]) + converted_state_dict[f"{block_prefix}attn.to_k.bias"] = torch.cat([k_bias]) + converted_state_dict[f"{block_prefix}attn.to_v.weight"] = torch.cat([v]) + converted_state_dict[f"{block_prefix}attn.to_v.bias"] = torch.cat([v_bias]) + converted_state_dict[f"{block_prefix}proj_mlp.weight"] = torch.cat([mlp]) + converted_state_dict[f"{block_prefix}proj_mlp.bias"] = torch.cat([mlp_bias]) + # qk norm + converted_state_dict[f"{block_prefix}attn.norm_q.weight"] = checkpoint.pop( + f"single_blocks.{i}.norm.query_norm.scale" + ) + converted_state_dict[f"{block_prefix}attn.norm_k.weight"] = checkpoint.pop( + f"single_blocks.{i}.norm.key_norm.scale" + ) + # output projections. + converted_state_dict[f"{block_prefix}proj_out.weight"] = checkpoint.pop(f"single_blocks.{i}.linear2.weight") + converted_state_dict[f"{block_prefix}proj_out.bias"] = checkpoint.pop(f"single_blocks.{i}.linear2.bias") + + converted_state_dict["proj_out.weight"] = checkpoint.pop("final_layer.linear.weight") + converted_state_dict["proj_out.bias"] = checkpoint.pop("final_layer.linear.bias") + + return converted_state_dict From bc36a0d883bc594ec49ed4c01537aa827a8202c1 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Mon, 9 Jun 2025 21:15:19 -0600 Subject: [PATCH 06/89] add chroma to mappings --- src/diffusers/loaders/single_file_model.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/diffusers/loaders/single_file_model.py b/src/diffusers/loaders/single_file_model.py index 6919c4949d59..82e4db7283cc 100644 --- a/src/diffusers/loaders/single_file_model.py +++ b/src/diffusers/loaders/single_file_model.py @@ -30,6 +30,7 @@ convert_auraflow_transformer_checkpoint_to_diffusers, convert_autoencoder_dc_checkpoint_to_diffusers, convert_controlnet_checkpoint, + convert_chroma_transformer_checkpoint_to_diffusers, convert_flux_transformer_checkpoint_to_diffusers, convert_hidream_transformer_to_diffusers, convert_hunyuan_video_transformer_to_diffusers, @@ -97,6 +98,10 @@ "checkpoint_mapping_fn": convert_flux_transformer_checkpoint_to_diffusers, "default_subfolder": "transformer", }, + "ChromaTransformer2DModel": { + "checkpoint_mapping_fn": convert_chroma_transformer_checkpoint_to_diffusers, + "default_subfolder": "transformer", + } "LTXVideoTransformer3DModel": { "checkpoint_mapping_fn": convert_ltx_transformer_checkpoint_to_diffusers, "default_subfolder": "transformer", From 33ea0b65a42f65965fe74ba1ab778b86d0d05919 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Mon, 9 Jun 2025 21:25:19 -0600 Subject: [PATCH 07/89] add chroma to transformer init --- src/diffusers/models/transformers/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/models/transformers/__init__.py b/src/diffusers/models/transformers/__init__.py index e7b8ba55ca61..cc03a0ccbcdf 100755 --- a/src/diffusers/models/transformers/__init__.py +++ b/src/diffusers/models/transformers/__init__.py @@ -17,6 +17,7 @@ from .t5_film_transformer import T5FilmDecoder from .transformer_2d import Transformer2DModel from .transformer_allegro import AllegroTransformer3DModel + from .transformer_chroma import ChromaTransformer2DModel from .transformer_cogview3plus import CogView3PlusTransformer2DModel from .transformer_cogview4 import CogView4Transformer2DModel from .transformer_cosmos import CosmosTransformer3DModel From 22ecd19f91039705f90a81c5cc1afa2d8413a26b Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Mon, 9 Jun 2025 21:32:52 -0600 Subject: [PATCH 08/89] take out variant stuff --- .../models/transformers/transformer_chroma.py | 119 ++++++------------ 1 file changed, 36 insertions(+), 83 deletions(-) diff --git a/src/diffusers/models/transformers/transformer_chroma.py b/src/diffusers/models/transformers/transformer_chroma.py index c542bcaaccf6..1f726f5cb4b0 100644 --- a/src/diffusers/models/transformers/transformer_chroma.py +++ b/src/diffusers/models/transformers/transformer_chroma.py @@ -43,40 +43,27 @@ from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import ( - AdaLayerNormContinuous, AdaLayerNormContinuousPruned, - AdaLayerNormZero, AdaLayerNormZeroPruned, - AdaLayerNormZeroSingle, AdaLayerNormZeroSinglePruned, ) logger = logging.get_logger(__name__) # pylint: disable=invalid-name -INVALID_VARIANT_ERRMSG = "`variant` must be `'flux' or `'chroma'`." - @maybe_allow_in_graph -class FluxSingleTransformerBlock(nn.Module): +class ChromaSingleTransformerBlock(nn.Module): def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, mlp_ratio: float = 4.0, - variant: str = "flux", ): super().__init__() self.mlp_hidden_dim = int(dim * mlp_ratio) - - if variant == "flux": - self.norm = AdaLayerNormZeroSingle(dim) - elif variant == "chroma": - self.norm = AdaLayerNormZeroSinglePruned(dim) - else: - raise ValueError(INVALID_VARIANT_ERRMSG) - + self.norm = AdaLayerNormZeroSinglePruned(dim) self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim) self.act_mlp = nn.GELU(approximate="tanh") self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) @@ -132,7 +119,7 @@ def forward( @maybe_allow_in_graph -class FluxTransformerBlock(nn.Module): +class ChromaTransformerBlock(nn.Module): def __init__( self, dim: int, @@ -140,18 +127,10 @@ def __init__( attention_head_dim: int, qk_norm: str = "rms_norm", eps: float = 1e-6, - variant: str = "flux", ): super().__init__() - - if variant == "flux": - self.norm1 = AdaLayerNormZero(dim) - self.norm1_context = AdaLayerNormZero(dim) - elif variant == "chroma": - self.norm1 = AdaLayerNormZeroPruned(dim) - self.norm1_context = AdaLayerNormZeroPruned(dim) - else: - raise ValueError(INVALID_VARIANT_ERRMSG) + self.norm1 = AdaLayerNormZeroPruned(dim) + self.norm1_context = AdaLayerNormZeroPruned(dim) self.attn = Attention( query_dim=dim, @@ -231,13 +210,13 @@ def forward( return encoder_hidden_states, hidden_states -class FluxTransformer2DModel( +class ChromaTransformer2DModel( ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, FluxTransformer2DLoadersMixin, CacheMixin ): """ - The Transformer model introduced in Flux. + The Transformer model introduced in Flux, modified for Chroma. - Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ + Reference: https://huggingface.co/lodestones/Chroma Args: patch_size (`int`, defaults to `1`): @@ -266,7 +245,7 @@ class FluxTransformer2DModel( """ _supports_gradient_checkpointing = True - _no_split_modules = ["FluxTransformerBlock", "FluxSingleTransformerBlock"] + _no_split_modules = ["ChromaTransformerBlock", "ChromaSingleTransformerBlock"] _skip_layerwise_casting_patterns = ["pos_embed", "norm"] @register_to_config @@ -283,7 +262,6 @@ def __init__( pooled_projection_dim: int = 768, guidance_embeds: bool = False, axes_dims_rope: Tuple[int, ...] = (16, 56, 56), - variant: str = "flux", approximator_in_factor: int = 16, approximator_hidden_dim: int = 5120, approximator_layers: int = 5, @@ -294,31 +272,21 @@ def __init__( self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope) - if variant == "flux": - text_time_guidance_cls = ( - CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings - ) - self.time_text_embed = text_time_guidance_cls( - embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim - ) - elif variant == "chroma": - self.time_text_embed = CombinedTimestepTextProjChromaEmbeddings( - factor=approximator_in_factor, - hidden_dim=approximator_hidden_dim, - out_dim=3 * num_single_layers + 2 * 6 * num_layers + 2, - embedding_dim=self.inner_dim, - n_layers=approximator_layers, - ) - self.distilled_guidance_layer = ChromaApproximator(in_dim=64, out_dim=3072, hidden_dim=5120, n_layers=5) - else: - raise ValueError(INVALID_VARIANT_ERRMSG) + self.time_text_embed = CombinedTimestepTextProjChromaEmbeddings( + factor=approximator_in_factor, + hidden_dim=approximator_hidden_dim, + out_dim=3 * num_single_layers + 2 * 6 * num_layers + 2, + embedding_dim=self.inner_dim, + n_layers=approximator_layers, + ) + self.distilled_guidance_layer = ChromaApproximator(in_dim=64, out_dim=3072, hidden_dim=5120, n_layers=5) self.context_embedder = nn.Linear(joint_attention_dim, self.inner_dim) self.x_embedder = nn.Linear(in_channels, self.inner_dim) self.transformer_blocks = nn.ModuleList( [ - FluxTransformerBlock( + ChromaTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, @@ -330,7 +298,7 @@ def __init__( self.single_transformer_blocks = nn.ModuleList( [ - FluxSingleTransformerBlock( + ChromaSingleTransformerBlock( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, @@ -340,16 +308,12 @@ def __init__( ] ) - norm_out_cls = AdaLayerNormContinuous if variant != "chroma" else AdaLayerNormContinuousPruned + norm_out_cls = AdaLayerNormContinuousPruned self.norm_out = norm_out_cls(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) self.gradient_checkpointing = False - @property - def is_chroma(self) -> bool: - return isinstance(self.time_text_embed, CombinedTimestepTextProjChromaEmbeddings) - @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: @@ -506,22 +470,14 @@ def forward( "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." ) - is_chroma = self.is_chroma hidden_states = self.x_embedder(hidden_states) timestep = timestep.to(hidden_states.dtype) * 1000 if guidance is not None: guidance = guidance.to(hidden_states.dtype) * 1000 - if not is_chroma: - temb = ( - self.time_text_embed(timestep, pooled_projections) - if guidance is None - else self.time_text_embed(timestep, guidance, pooled_projections) - ) - else: - input_vec = self.time_text_embed(timestep, guidance, pooled_projections) - pooled_temb = self.distilled_guidance_layer(input_vec) + input_vec = self.time_text_embed(timestep, guidance, pooled_projections) + pooled_temb = self.distilled_guidance_layer(input_vec) encoder_hidden_states = self.context_embedder(encoder_hidden_states) @@ -547,18 +503,17 @@ def forward( joint_attention_kwargs.update({"ip_hidden_states": ip_hidden_states}) for index_block, block in enumerate(self.transformer_blocks): - if is_chroma: - img_offset = 3 * len(self.single_transformer_blocks) - txt_offset = img_offset + 6 * len(self.transformer_blocks) - img_modulation = img_offset + 6 * index_block - text_modulation = txt_offset + 6 * index_block - temb = torch.cat( - ( - pooled_temb[:, img_modulation : img_modulation + 6], - pooled_temb[:, text_modulation : text_modulation + 6], - ), - dim=1, - ) + img_offset = 3 * len(self.single_transformer_blocks) + txt_offset = img_offset + 6 * len(self.transformer_blocks) + img_modulation = img_offset + 6 * index_block + text_modulation = txt_offset + 6 * index_block + temb = torch.cat( + ( + pooled_temb[:, img_modulation : img_modulation + 6], + pooled_temb[:, text_modulation : text_modulation + 6], + ), + dim=1, + ) if torch.is_grad_enabled() and self.gradient_checkpointing: encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( block, @@ -591,9 +546,8 @@ def forward( hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) for index_block, block in enumerate(self.single_transformer_blocks): - if is_chroma: - start_idx = 3 * index_block - temb = pooled_temb[:, start_idx : start_idx + 3] + start_idx = 3 * index_block + temb = pooled_temb[:, start_idx : start_idx + 3] if torch.is_grad_enabled() and self.gradient_checkpointing: hidden_states = self._gradient_checkpointing_func( block, @@ -621,8 +575,7 @@ def forward( hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...] - if is_chroma: - temb = pooled_temb[:, -2:] + temb = pooled_temb[:, -2:] hidden_states = self.norm_out(hidden_states, temb) output = self.proj_out(hidden_states) From b0df9691d2ec5caa42a9310eef250bef513f15f7 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Tue, 10 Jun 2025 02:09:52 -0600 Subject: [PATCH 09/89] get decently far in changing variant stuff --- .../pipelines/chroma/pipeline_chroma.py | 182 ++---------------- 1 file changed, 21 insertions(+), 161 deletions(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 50c0c4cedc57..f6d2e366e48e 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -40,7 +40,7 @@ ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline -from .pipeline_output import FluxPipelineOutput +from .pipeline_output import ChromaPipelineOutput if is_torch_xla_available(): @@ -57,15 +57,13 @@ Examples: ```py >>> import torch - >>> from diffusers import FluxPipeline + >>> from diffusers import ChromaPipeline - >>> pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16) + >>> pipe = ChromaPipeline.from_single_file("chroma-unlocked-v35-detail-calibrated.safetensors", torch_dtype=torch.bfloat16) >>> pipe.to("cuda") >>> prompt = "A cat holding a sign that says hello world" - >>> # Depending on the variant being used, the pipeline call will slightly vary. - >>> # Refer to the pipeline documentation for more details. - >>> image = pipe(prompt, num_inference_steps=4, guidance_scale=0.0).images[0] - >>> image.save("flux.png") + >>> image = pipe(prompt, num_inference_steps=28, guidance_scale=4.0).images[0] + >>> image.save("chroma.png") ``` """ @@ -143,7 +141,7 @@ def retrieve_timesteps( return timesteps, num_inference_steps -class FluxPipeline( +class ChromaPipeline( DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin, @@ -151,27 +149,21 @@ class FluxPipeline( FluxIPAdapterMixin, ): r""" - The Flux pipeline for text-to-image generation. + The Chroma pipeline for text-to-image generation. - Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ + Reference: https://huggingface.co/lodestones/Chroma/ Args: - transformer ([`FluxTransformer2DModel`]): + transformer ([`ChromaTransformer2DModel`]): Conditional Transformer (MMDiT) architecture to denoise the encoded image latents. scheduler ([`FlowMatchEulerDiscreteScheduler`]): A scheduler to be used in combination with `transformer` to denoise the encoded image latents. vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - text_encoder_2 ([`T5EncoderModel`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representation + text_encoder ([`T5EncoderModel`]): [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer). - tokenizer_2 (`T5TokenizerFast`): + tokenizer (`T5TokenizerFast`): Second Tokenizer of class [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """ @@ -184,11 +176,9 @@ def __init__( self, scheduler: FlowMatchEulerDiscreteScheduler, vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - text_encoder_2: T5EncoderModel, - tokenizer_2: T5TokenizerFast, - transformer: FluxTransformer2DModel, + text_encoder: T5EncoderModel, + tokenizer: T5TokenizerFast, + transformer: ChromaTransformer2DModel, image_encoder: CLIPVisionModelWithProjection = None, feature_extractor: CLIPImageProcessor = None, variant: str = "flux", @@ -198,9 +188,7 @@ def __init__( self.register_modules( vae=vae, text_encoder=text_encoder, - text_encoder_2=text_encoder_2, tokenizer=tokenizer, - tokenizer_2=tokenizer_2, transformer=transformer, scheduler=scheduler, image_encoder=image_encoder, @@ -214,10 +202,6 @@ def __init__( self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 ) self.default_sample_size = 128 - if variant not in {"flux", "chroma"}: - raise ValueError("`variant` must be `'flux' or `'chroma'`.") - - self.variant = variant def _get_chroma_attn_mask(self, length: torch.Tensor, max_sequence_length: int) -> torch.Tensor: attention_mask = torch.zeros((length.shape[0], max_sequence_length), dtype=torch.bool, device=length.device) @@ -248,7 +232,7 @@ def _get_t5_prompt_embeds( padding="max_length", max_length=max_sequence_length, truncation=True, - return_length=(self.variant == "chroma"), + return_length=True, return_overflowing_tokens=False, return_tensors="pt", ) @@ -267,8 +251,6 @@ def _get_t5_prompt_embeds( output_hidden_states=False, attention_mask=( self._get_chroma_attn_mask(text_inputs.length, max_sequence_length).to(device) - if self.variant == "chroma" - else None ), )[0] @@ -283,58 +265,12 @@ def _get_t5_prompt_embeds( return prompt_embeds - def _get_clip_prompt_embeds( - self, - prompt: Union[str, List[str]], - num_images_per_prompt: int = 1, - device: Optional[torch.device] = None, - ): - device = device or self._execution_device - - prompt = [prompt] if isinstance(prompt, str) else prompt - batch_size = len(prompt) - - if isinstance(self, TextualInversionLoaderMixin): - prompt = self.maybe_convert_prompt(prompt, self.tokenizer) - - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer_max_length, - truncation=True, - return_overflowing_tokens=False, - return_length=False, - return_tensors="pt", - ) - - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): - removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer_max_length} tokens: {removed_text}" - ) - prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) - - # Use pooled output of CLIPTextModel - prompt_embeds = prompt_embeds.pooler_output - prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) - prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) - - return prompt_embeds - def encode_prompt( self, prompt: Union[str, List[str]], - prompt_2: Union[str, List[str]], device: Optional[torch.device] = None, num_images_per_prompt: int = 1, prompt_embeds: Optional[torch.FloatTensor] = None, - pooled_prompt_embeds: Optional[torch.FloatTensor] = None, max_sequence_length: int = 512, lora_scale: Optional[float] = None, ): @@ -343,9 +279,6 @@ def encode_prompt( Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded - prompt_2 (`str` or `List[str]`, *optional*): - The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is - used in all text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): @@ -369,21 +302,11 @@ def encode_prompt( # dynamically adjust the LoRA scale if self.text_encoder is not None and USE_PEFT_BACKEND: scale_lora_layers(self.text_encoder, lora_scale) - if self.text_encoder_2 is not None and USE_PEFT_BACKEND: - scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt_embeds is None: - prompt_2 = prompt_2 or prompt - prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 - # We only use the pooled prompt output from the CLIPTextModel - pooled_prompt_embeds = self._get_clip_prompt_embeds( - prompt=prompt, - device=device, - num_images_per_prompt=num_images_per_prompt, - ) prompt_embeds = self._get_t5_prompt_embeds( prompt=prompt_2, num_images_per_prompt=num_images_per_prompt, @@ -396,15 +319,10 @@ def encode_prompt( # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) - if self.text_encoder_2 is not None: - if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: - # Retrieve the original scale by scaling back the LoRA layers - unscale_lora_layers(self.text_encoder_2, lora_scale) - dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) - return prompt_embeds, pooled_prompt_embeds, text_ids + return prompt_embeds, text_ids def encode_image(self, image, device, num_images_per_prompt): dtype = next(self.image_encoder.parameters()).dtype @@ -456,15 +374,12 @@ def prepare_ip_adapter_image_embeds( def check_inputs( self, prompt, - prompt_2, height, width, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, - pooled_prompt_embeds=None, - negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): @@ -485,39 +400,18 @@ def check_inputs( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) - elif prompt_2 is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): - raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) - elif negative_prompt_2 is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and pooled_prompt_embeds is None: - raise ValueError( - "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." - ) - if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: - raise ValueError( - "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." - ) if max_sequence_length is not None and max_sequence_length > 512: raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") @@ -649,10 +543,7 @@ def interrupt(self): def __call__( self, prompt: Union[str, List[str]] = None, - prompt_2: Optional[Union[str, List[str]]] = None, negative_prompt: Union[str, List[str]] = None, - negative_prompt_2: Optional[Union[str, List[str]]] = None, - true_cfg_scale: float = 1.0, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 28, @@ -662,13 +553,11 @@ def __call__( generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, - pooled_prompt_embeds: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, negative_ip_adapter_image: Optional[PipelineImageInput] = None, negative_ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, - negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, joint_attention_kwargs: Optional[Dict[str, Any]] = None, @@ -683,18 +572,10 @@ def __call__( prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. - prompt_2 (`str` or `List[str]`, *optional*): - The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is - will be used instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is not greater than `1`). - negative_prompt_2 (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and - `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. - true_cfg_scale (`float`, *optional*, defaults to 1.0): - When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): @@ -724,9 +605,6 @@ def __call__( prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. - pooled_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. - If not provided, pooled text embeddings will be generated from `prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of @@ -742,10 +620,6 @@ def __call__( Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. - negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` - input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. @@ -769,7 +643,7 @@ def __call__( Examples: Returns: - [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` + [`~pipelines.chroma.ChromaPipelineOutput`] or `tuple`: [`~pipelines.chroma.ChromaPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ @@ -780,15 +654,11 @@ def __call__( # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, - prompt_2, height, width, negative_prompt=negative_prompt, - negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, - pooled_prompt_embeds=pooled_prompt_embeds, - negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, max_sequence_length=max_sequence_length, ) @@ -811,34 +681,25 @@ def __call__( lora_scale = ( self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None ) - has_neg_prompt = negative_prompt is not None or ( - negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None - ) - do_true_cfg = true_cfg_scale > 1 and has_neg_prompt + do_cfg = guidance_scale > 1 ( prompt_embeds, - pooled_prompt_embeds, text_ids, ) = self.encode_prompt( prompt=prompt, - prompt_2=prompt_2, prompt_embeds=prompt_embeds, - pooled_prompt_embeds=pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, lora_scale=lora_scale, ) - if do_true_cfg: + if do_cfg: ( negative_prompt_embeds, - negative_pooled_prompt_embeds, negative_text_ids, ) = self.encode_prompt( prompt=negative_prompt, - prompt_2=negative_prompt_2, prompt_embeds=negative_prompt_embeds, - pooled_prompt_embeds=negative_pooled_prompt_embeds, device=device, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, @@ -933,7 +794,6 @@ def __call__( hidden_states=latents, timestep=timestep / 1000, guidance=guidance, - pooled_projections=pooled_prompt_embeds, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, @@ -941,7 +801,7 @@ def __call__( return_dict=False, )[0] - if do_true_cfg: + if do_cfg: if negative_image_embeds is not None: self._joint_attention_kwargs["ip_adapter_image_embeds"] = negative_image_embeds neg_noise_pred = self.transformer( From c8cbb31614aa69321ee99f6fe4eadecd0e865d7c Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Tue, 10 Jun 2025 02:22:52 -0600 Subject: [PATCH 10/89] add chroma init --- src/diffusers/pipelines/chroma/__init__.py | 47 ++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 src/diffusers/pipelines/chroma/__init__.py diff --git a/src/diffusers/pipelines/chroma/__init__.py b/src/diffusers/pipelines/chroma/__init__.py new file mode 100644 index 000000000000..9faa7902a15c --- /dev/null +++ b/src/diffusers/pipelines/chroma/__init__.py @@ -0,0 +1,47 @@ +from typing import TYPE_CHECKING + +from ...utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_additional_imports = {} +_import_structure = {"pipeline_output": ["ChromaPipelineOutput"]} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ...utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_chroma"] = ["ChromaPipeline"] +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + else: + from .pipeline_chroma import ChromaPipeline +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) + for name, value in _additional_imports.items(): + setattr(sys.modules[__name__], name, value) From 32659236b22e7e13830726f3a4956bebf306d7db Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Tue, 10 Jun 2025 02:24:23 -0600 Subject: [PATCH 11/89] make chroma output class --- .../pipelines/chroma/pipeline_output.py | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 src/diffusers/pipelines/chroma/pipeline_output.py diff --git a/src/diffusers/pipelines/chroma/pipeline_output.py b/src/diffusers/pipelines/chroma/pipeline_output.py new file mode 100644 index 000000000000..bb0a52ceb53c --- /dev/null +++ b/src/diffusers/pipelines/chroma/pipeline_output.py @@ -0,0 +1,22 @@ +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL.Image +import torch + +from ...utils import BaseOutput + + +@dataclass +class ChromaPipelineOutput(BaseOutput): + """ + Output class for Stable Diffusion pipelines. + + Args: + images (`List[PIL.Image.Image]` or `np.ndarray`) + List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, + num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. + """ + + images: Union[List[PIL.Image.Image], np.ndarray] From b0f7036d9af75c5df0f39d2d6353964e4c520534 Mon Sep 17 00:00:00 2001 From: Meatfucker <74834323+Meatfucker@users.noreply.github.com> Date: Tue, 10 Jun 2025 13:07:22 -0400 Subject: [PATCH 12/89] Update pipeline_flux_inpaint.py to fix padding_mask_crop returning only the inpainted area (#11658) * Update pipeline_flux_inpaint.py to fix padding_mask_crop returning only the inpainted area and not the entire image. * Apply style fixes * Update src/diffusers/pipelines/flux/pipeline_flux_inpaint.py --- src/diffusers/pipelines/flux/pipeline_flux_inpaint.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py b/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py index a67eb5d0d646..29c763a6b17b 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_inpaint.py @@ -1193,6 +1193,11 @@ def __call__( image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) + if padding_mask_crop is not None: + image = [ + self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image + ] + # Offload all models self.maybe_free_model_hooks() From b79803fe089aa5a6ab5baef0545f380ff4ff059b Mon Sep 17 00:00:00 2001 From: Akash Haridas <58511267+akasharidas@users.noreply.github.com> Date: Tue, 10 Jun 2025 19:38:54 -0400 Subject: [PATCH 13/89] Allow remote code repo names to contain "." (#11652) * allow loading from repo with dot in name * put new arg at the end to avoid breaking compatibility * add test for loading repo with dot in name --------- Co-authored-by: Sayak Paul --- src/diffusers/utils/dynamic_modules_utils.py | 24 +++++++++++++++++--- tests/pipelines/test_pipelines.py | 15 ++++++++++++ 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/src/diffusers/utils/dynamic_modules_utils.py b/src/diffusers/utils/dynamic_modules_utils.py index 5d0752af8983..4878937ab202 100644 --- a/src/diffusers/utils/dynamic_modules_utils.py +++ b/src/diffusers/utils/dynamic_modules_utils.py @@ -154,12 +154,30 @@ def check_imports(filename): return get_relative_imports(filename) -def get_class_in_module(class_name, module_path): +def get_class_in_module(class_name, module_path, pretrained_model_name_or_path=None): """ Import a module on the cache directory for modules and extract a class from it. """ module_path = module_path.replace(os.path.sep, ".") - module = importlib.import_module(module_path) + try: + module = importlib.import_module(module_path) + except ModuleNotFoundError as e: + # This can happen when the repo id contains ".", which Python's import machinery interprets as a directory + # separator. We do a bit of monkey patching to detect and fix this case. + if not ( + pretrained_model_name_or_path is not None + and "." in pretrained_model_name_or_path + and module_path.startswith("diffusers_modules") + and pretrained_model_name_or_path.replace("/", "--") in module_path + ): + raise e # We can't figure this one out, just reraise the original error + + corrected_path = os.path.join(HF_MODULES_CACHE, module_path.replace(".", "/")) + ".py" + corrected_path = corrected_path.replace( + pretrained_model_name_or_path.replace("/", "--").replace(".", "/"), + pretrained_model_name_or_path.replace("/", "--"), + ) + module = importlib.machinery.SourceFileLoader(module_path, corrected_path).load_module() if class_name is None: return find_pipeline_class(module) @@ -454,4 +472,4 @@ def get_class_from_dynamic_module( revision=revision, local_files_only=local_files_only, ) - return get_class_in_module(class_name, final_module.replace(".py", "")) + return get_class_in_module(class_name, final_module.replace(".py", ""), pretrained_model_name_or_path) diff --git a/tests/pipelines/test_pipelines.py b/tests/pipelines/test_pipelines.py index f1d9d244e546..65718a254595 100644 --- a/tests/pipelines/test_pipelines.py +++ b/tests/pipelines/test_pipelines.py @@ -1105,6 +1105,21 @@ def test_remote_auto_custom_pipe(self): assert images.shape == (1, 64, 64, 3) + def test_remote_custom_pipe_with_dot_in_name(self): + # make sure that trust remote code has to be passed + with self.assertRaises(ValueError): + pipeline = DiffusionPipeline.from_pretrained("akasharidas/ddpm-cifar10-32-dot.in.name") + + pipeline = DiffusionPipeline.from_pretrained("akasharidas/ddpm-cifar10-32-dot.in.name", trust_remote_code=True) + + assert pipeline.__class__.__name__ == "CustomPipeline" + + pipeline = pipeline.to(torch_device) + images, output_str = pipeline(num_inference_steps=2, output_type="np") + + assert images[0].shape == (1, 32, 32, 3) + assert output_str == "This is a test" + def test_local_custom_pipeline_repo(self): local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline") pipeline = DiffusionPipeline.from_pretrained( From 8e88495da2e5c720ac85e2e6df29904bac173d31 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 11 Jun 2025 08:32:47 +0530 Subject: [PATCH 14/89] [LoRA] support Flux Control LoRA with bnb 8bit. (#11655) support Flux Control LoRA with bnb 8bit. --- src/diffusers/loaders/lora_pipeline.py | 9 ++++- tests/quantization/bnb/test_mixed_int8.py | 48 +++++++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/src/diffusers/loaders/lora_pipeline.py b/src/diffusers/loaders/lora_pipeline.py index 6092eeff0a80..189a9ceba541 100644 --- a/src/diffusers/loaders/lora_pipeline.py +++ b/src/diffusers/loaders/lora_pipeline.py @@ -81,12 +81,17 @@ def _maybe_dequantize_weight_for_expanded_lora(model, module): from ..quantizers.gguf.utils import dequantize_gguf_tensor is_bnb_4bit_quantized = module.weight.__class__.__name__ == "Params4bit" + is_bnb_8bit_quantized = module.weight.__class__.__name__ == "Int8Params" is_gguf_quantized = module.weight.__class__.__name__ == "GGUFParameter" if is_bnb_4bit_quantized and not is_bitsandbytes_available(): raise ValueError( "The checkpoint seems to have been quantized with `bitsandbytes` (4bits). Install `bitsandbytes` to load quantized checkpoints." ) + if is_bnb_8bit_quantized and not is_bitsandbytes_available(): + raise ValueError( + "The checkpoint seems to have been quantized with `bitsandbytes` (8bits). Install `bitsandbytes` to load quantized checkpoints." + ) if is_gguf_quantized and not is_gguf_available(): raise ValueError( "The checkpoint seems to have been quantized with `gguf`. Install `gguf` to load quantized checkpoints." @@ -97,10 +102,10 @@ def _maybe_dequantize_weight_for_expanded_lora(model, module): weight_on_cpu = True device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" - if is_bnb_4bit_quantized: + if is_bnb_4bit_quantized or is_bnb_8bit_quantized: module_weight = dequantize_bnb_weight( module.weight.to(device) if weight_on_cpu else module.weight, - state=module.weight.quant_state, + state=module.weight.quant_state if is_bnb_4bit_quantized else module.state, dtype=model.dtype, ).data elif is_gguf_quantized: diff --git a/tests/quantization/bnb/test_mixed_int8.py b/tests/quantization/bnb/test_mixed_int8.py index 98575b86cdcc..bb0702c00bd9 100644 --- a/tests/quantization/bnb/test_mixed_int8.py +++ b/tests/quantization/bnb/test_mixed_int8.py @@ -19,15 +19,18 @@ import numpy as np import pytest from huggingface_hub import hf_hub_download +from PIL import Image from diffusers import ( BitsAndBytesConfig, DiffusionPipeline, + FluxControlPipeline, FluxTransformer2DModel, SanaTransformer2DModel, SD3Transformer2DModel, logging, ) +from diffusers.quantizers import PipelineQuantizationConfig from diffusers.utils import is_accelerate_version from diffusers.utils.testing_utils import ( CaptureLogger, @@ -39,6 +42,7 @@ numpy_cosine_similarity_distance, require_accelerate, require_bitsandbytes_version_greater, + require_peft_backend, require_peft_version_greater, require_torch, require_torch_accelerator, @@ -697,6 +701,50 @@ def test_lora_loading(self): self.assertTrue(max_diff < 1e-3) +@require_transformers_version_greater("4.44.0") +@require_peft_backend +class SlowBnb4BitFluxControlWithLoraTests(Base8bitTests): + def setUp(self) -> None: + gc.collect() + backend_empty_cache(torch_device) + + self.pipeline_8bit = FluxControlPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + quantization_config=PipelineQuantizationConfig( + quant_backend="bitsandbytes_8bit", + quant_kwargs={"load_in_8bit": True}, + components_to_quantize=["transformer", "text_encoder_2"], + ), + torch_dtype=torch.float16, + ) + self.pipeline_8bit.enable_model_cpu_offload() + + def tearDown(self): + del self.pipeline_8bit + + gc.collect() + backend_empty_cache(torch_device) + + def test_lora_loading(self): + self.pipeline_8bit.load_lora_weights("black-forest-labs/FLUX.1-Canny-dev-lora") + + output = self.pipeline_8bit( + prompt=self.prompt, + control_image=Image.new(mode="RGB", size=(256, 256)), + height=256, + width=256, + max_sequence_length=64, + output_type="np", + num_inference_steps=8, + generator=torch.Generator().manual_seed(42), + ).images + out_slice = output[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.2029, 0.2136, 0.2268, 0.1921, 0.1997, 0.2185, 0.2021, 0.2183, 0.2292]) + + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-3, msg=f"{out_slice=} != {expected_slice=}") + + @slow class BaseBnb8bitSerializationTests(Base8bitTests): def setUp(self): From e27142ac644b1ed77d9d60c55432fe74659520db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tolga=20Cang=C3=B6z?= <46008593+tolgacangoz@users.noreply.github.com> Date: Wed, 11 Jun 2025 11:49:23 +0300 Subject: [PATCH 15/89] [`Wan`] Fix VAE sampling mode in `WanVideoToVideoPipeline` (#11639) * fix: vae sampling mode * fix a typo --- src/diffusers/pipelines/wan/pipeline_wan_video2video.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/diffusers/pipelines/wan/pipeline_wan_video2video.py b/src/diffusers/pipelines/wan/pipeline_wan_video2video.py index 1844f1b49ba1..a4a10d4655a9 100644 --- a/src/diffusers/pipelines/wan/pipeline_wan_video2video.py +++ b/src/diffusers/pipelines/wan/pipeline_wan_video2video.py @@ -419,12 +419,7 @@ def prepare_latents( ) if latents is None: - if isinstance(generator, list): - init_latents = [ - retrieve_latents(self.vae.encode(video[i].unsqueeze(0)), generator[i]) for i in range(batch_size) - ] - else: - init_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), generator) for vid in video] + init_latents = [retrieve_latents(self.vae.encode(vid.unsqueeze(0)), sample_mode="argmax") for vid in video] init_latents = torch.cat(init_latents, dim=0).to(dtype) @@ -441,7 +436,7 @@ def prepare_latents( if hasattr(self.scheduler, "add_noise"): latents = self.scheduler.add_noise(init_latents, noise, timestep) else: - latents = self.scheduelr.scale_noise(init_latents, timestep, noise) + latents = self.scheduler.scale_noise(init_latents, timestep, noise) else: latents = latents.to(device) From 33e636cea557d73d0ff4387c3096375ea00ab0d4 Mon Sep 17 00:00:00 2001 From: Yao Matrix Date: Wed, 11 Jun 2025 17:47:06 +0800 Subject: [PATCH 16/89] enable torchao test cases on XPU and switch to device agnostic APIs for test cases (#11654) * enable torchao cases on XPU Signed-off-by: Matrix YAO * device agnostic APIs Signed-off-by: YAO Matrix * more Signed-off-by: YAO Matrix * fix style Signed-off-by: YAO Matrix * enable test_torch_compile_recompilation_and_graph_break on XPU Signed-off-by: YAO Matrix * resolve comments Signed-off-by: YAO Matrix --------- Signed-off-by: Matrix YAO Signed-off-by: YAO Matrix --- .../quantizers/quantization_config.py | 22 ++++----- src/diffusers/utils/testing_utils.py | 4 +- .../test_models_consistency_decoder_vae.py | 5 ++- tests/models/unets/test_models_unet_2d.py | 3 +- .../unets/test_models_unet_2d_condition.py | 5 +-- tests/pipelines/allegro/test_allegro.py | 5 ++- tests/pipelines/audioldm/test_audioldm.py | 10 ++--- tests/pipelines/audioldm2/test_audioldm2.py | 12 +++-- tests/pipelines/cogvideo/test_cogvideox.py | 5 ++- tests/pipelines/cogview3/test_cogview3plus.py | 5 ++- .../controlnet/test_controlnet_img2img.py | 5 ++- .../controlnet/test_controlnet_inpaint.py | 5 ++- .../controlnet_sd3/test_controlnet_sd3.py | 2 +- tests/pipelines/deepfloyd_if/test_if.py | 3 +- .../pipelines/deepfloyd_if/test_if_img2img.py | 3 +- tests/pipelines/flux/test_pipeline_flux.py | 4 +- .../flux/test_pipeline_flux_redux.py | 2 +- .../pipelines/hunyuandit/test_hunyuan_dit.py | 5 ++- ...test_stable_diffusion_attend_and_excite.py | 5 ++- .../test_stable_diffusion_inpaint.py | 3 +- .../test_pipeline_stable_diffusion_3.py | 2 +- ...est_pipeline_stable_diffusion_3_img2img.py | 2 +- .../test_stable_diffusion_xl.py | 5 ++- .../test_stable_diffusion_xl_img2img.py | 5 ++- tests/pipelines/test_pipelines.py | 4 +- tests/pipelines/wan/test_wan.py | 6 ++- tests/quantization/torchao/test_torchao.py | 45 ++++++++++--------- tests/single_file/test_lumina2_transformer.py | 6 +-- ...test_model_flux_transformer_single_file.py | 6 +-- tests/single_file/test_sana_transformer.py | 6 +-- 30 files changed, 109 insertions(+), 91 deletions(-) diff --git a/src/diffusers/quantizers/quantization_config.py b/src/diffusers/quantizers/quantization_config.py index 609c9ad15a31..871faf076e5a 100644 --- a/src/diffusers/quantizers/quantization_config.py +++ b/src/diffusers/quantizers/quantization_config.py @@ -493,7 +493,7 @@ def __init__(self, quant_type: str, modules_to_not_convert: Optional[List[str]] TORCHAO_QUANT_TYPE_METHODS = self._get_torchao_quant_type_to_method() if self.quant_type not in TORCHAO_QUANT_TYPE_METHODS.keys(): is_floating_quant_type = self.quant_type.startswith("float") or self.quant_type.startswith("fp") - if is_floating_quant_type and not self._is_cuda_capability_atleast_8_9(): + if is_floating_quant_type and not self._is_xpu_or_cuda_capability_atleast_8_9(): raise ValueError( f"Requested quantization type: {self.quant_type} is not supported on GPUs with CUDA capability <= 8.9. You " f"can check the CUDA capability of your GPU using `torch.cuda.get_device_capability()`." @@ -645,7 +645,7 @@ def generate_fpx_quantization_types(bits: int): QUANTIZATION_TYPES.update(INT8_QUANTIZATION_TYPES) QUANTIZATION_TYPES.update(UINTX_QUANTIZATION_DTYPES) - if cls._is_cuda_capability_atleast_8_9(): + if cls._is_xpu_or_cuda_capability_atleast_8_9(): QUANTIZATION_TYPES.update(FLOATX_QUANTIZATION_TYPES) return QUANTIZATION_TYPES @@ -655,14 +655,16 @@ def generate_fpx_quantization_types(bits: int): ) @staticmethod - def _is_cuda_capability_atleast_8_9() -> bool: - if not torch.cuda.is_available(): - raise RuntimeError("TorchAO requires a CUDA compatible GPU and installation of PyTorch.") - - major, minor = torch.cuda.get_device_capability() - if major == 8: - return minor >= 9 - return major >= 9 + def _is_xpu_or_cuda_capability_atleast_8_9() -> bool: + if torch.cuda.is_available(): + major, minor = torch.cuda.get_device_capability() + if major == 8: + return minor >= 9 + return major >= 9 + elif torch.xpu.is_available(): + return True + else: + raise RuntimeError("TorchAO requires a CUDA compatible GPU or Intel XPU and installation of PyTorch.") def get_apply_tensor_subclass(self): TORCHAO_QUANT_TYPE_METHODS = self._get_torchao_quant_type_to_method() diff --git a/src/diffusers/utils/testing_utils.py b/src/diffusers/utils/testing_utils.py index e19a9f83fdb9..ae5a6e6e91eb 100644 --- a/src/diffusers/utils/testing_utils.py +++ b/src/diffusers/utils/testing_utils.py @@ -300,9 +300,7 @@ def require_torch_gpu(test_case): def require_torch_cuda_compatibility(expected_compute_capability): def decorator(test_case): - if not torch.cuda.is_available(): - return unittest.skip(test_case) - else: + if torch.cuda.is_available(): current_compute_capability = get_torch_cuda_device_capability() return unittest.skipUnless( float(current_compute_capability) == float(expected_compute_capability), diff --git a/tests/models/autoencoders/test_models_consistency_decoder_vae.py b/tests/models/autoencoders/test_models_consistency_decoder_vae.py index 77977a78d83b..db87004fcb30 100644 --- a/tests/models/autoencoders/test_models_consistency_decoder_vae.py +++ b/tests/models/autoencoders/test_models_consistency_decoder_vae.py @@ -21,6 +21,7 @@ from diffusers import ConsistencyDecoderVAE, StableDiffusionPipeline from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, load_image, slow, @@ -162,13 +163,13 @@ def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) @torch.no_grad() def test_encode_decode(self): diff --git a/tests/models/unets/test_models_unet_2d.py b/tests/models/unets/test_models_unet_2d.py index 0e5fdc4bba2e..1a7959a877f3 100644 --- a/tests/models/unets/test_models_unet_2d.py +++ b/tests/models/unets/test_models_unet_2d.py @@ -22,6 +22,7 @@ from diffusers import UNet2DModel from diffusers.utils import logging from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, floats_tensor, require_torch_accelerator, @@ -229,7 +230,7 @@ def test_from_pretrained_accelerate_wont_change_results(self): # two models don't need to stay in the device at the same time del model_accelerate - torch.cuda.empty_cache() + backend_empty_cache(torch_device) gc.collect() model_normal_load, _ = UNet2DModel.from_pretrained( diff --git a/tests/models/unets/test_models_unet_2d_condition.py b/tests/models/unets/test_models_unet_2d_condition.py index ab0dcbc1de11..c8ed68c65b40 100644 --- a/tests/models/unets/test_models_unet_2d_condition.py +++ b/tests/models/unets/test_models_unet_2d_condition.py @@ -46,7 +46,6 @@ require_peft_backend, require_torch_accelerator, require_torch_accelerator_with_fp16, - require_torch_gpu, skip_mps, slow, torch_all_close, @@ -978,13 +977,13 @@ def test_ip_adapter_plus(self): assert sample2.allclose(sample5, atol=1e-4, rtol=1e-4) assert sample2.allclose(sample6, atol=1e-4, rtol=1e-4) - @require_torch_gpu @parameterized.expand( [ ("hf-internal-testing/unet2d-sharded-dummy", None), ("hf-internal-testing/tiny-sd-unet-sharded-latest-format", "fp16"), ] ) + @require_torch_accelerator def test_load_sharded_checkpoint_from_hub(self, repo_id, variant): _, inputs_dict = self.prepare_init_args_and_inputs_for_common() loaded_model = self.model_class.from_pretrained(repo_id, variant=variant) @@ -994,13 +993,13 @@ def test_load_sharded_checkpoint_from_hub(self, repo_id, variant): assert loaded_model assert new_output.sample.shape == (4, 4, 16, 16) - @require_torch_gpu @parameterized.expand( [ ("hf-internal-testing/unet2d-sharded-dummy-subfolder", None), ("hf-internal-testing/tiny-sd-unet-sharded-latest-format-subfolder", "fp16"), ] ) + @require_torch_accelerator def test_load_sharded_checkpoint_from_hub_subfolder(self, repo_id, variant): _, inputs_dict = self.prepare_init_args_and_inputs_for_common() loaded_model = self.model_class.from_pretrained(repo_id, subfolder="unet", variant=variant) diff --git a/tests/pipelines/allegro/test_allegro.py b/tests/pipelines/allegro/test_allegro.py index 30fdd68cfd36..30a14ef7f540 100644 --- a/tests/pipelines/allegro/test_allegro.py +++ b/tests/pipelines/allegro/test_allegro.py @@ -24,6 +24,7 @@ from diffusers import AllegroPipeline, AllegroTransformer3DModel, AutoencoderKLAllegro, DDIMScheduler from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_hf_hub_version_greater, @@ -341,12 +342,12 @@ class AllegroPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_allegro(self): generator = torch.Generator("cpu").manual_seed(0) diff --git a/tests/pipelines/audioldm/test_audioldm.py b/tests/pipelines/audioldm/test_audioldm.py index aaf44985aafd..340bc24adc3e 100644 --- a/tests/pipelines/audioldm/test_audioldm.py +++ b/tests/pipelines/audioldm/test_audioldm.py @@ -37,7 +37,7 @@ UNet2DConditionModel, ) from diffusers.utils import is_xformers_available -from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device +from diffusers.utils.testing_utils import backend_empty_cache, enable_full_determinism, nightly, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin @@ -378,12 +378,12 @@ class AudioLDMPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) @@ -423,12 +423,12 @@ class AudioLDMPipelineNightlyTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) diff --git a/tests/pipelines/audioldm2/test_audioldm2.py b/tests/pipelines/audioldm2/test_audioldm2.py index a8f60fb6dcee..14b5510fcafd 100644 --- a/tests/pipelines/audioldm2/test_audioldm2.py +++ b/tests/pipelines/audioldm2/test_audioldm2.py @@ -45,7 +45,13 @@ LMSDiscreteScheduler, PNDMScheduler, ) -from diffusers.utils.testing_utils import enable_full_determinism, is_torch_version, nightly, torch_device +from diffusers.utils.testing_utils import ( + backend_empty_cache, + enable_full_determinism, + is_torch_version, + nightly, + torch_device, +) from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin @@ -540,12 +546,12 @@ class AudioLDM2PipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) diff --git a/tests/pipelines/cogvideo/test_cogvideox.py b/tests/pipelines/cogvideo/test_cogvideox.py index a9de0ff05fe8..a6349c99c5e8 100644 --- a/tests/pipelines/cogvideo/test_cogvideox.py +++ b/tests/pipelines/cogvideo/test_cogvideox.py @@ -22,6 +22,7 @@ from diffusers import AutoencoderKLCogVideoX, CogVideoXPipeline, CogVideoXTransformer3DModel, DDIMScheduler from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, @@ -334,12 +335,12 @@ class CogVideoXPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_cogvideox(self): generator = torch.Generator("cpu").manual_seed(0) diff --git a/tests/pipelines/cogview3/test_cogview3plus.py b/tests/pipelines/cogview3/test_cogview3plus.py index 79dffd230a75..4eca68dd7bd2 100644 --- a/tests/pipelines/cogview3/test_cogview3plus.py +++ b/tests/pipelines/cogview3/test_cogview3plus.py @@ -22,6 +22,7 @@ from diffusers import AutoencoderKL, CogVideoXDDIMScheduler, CogView3PlusPipeline, CogView3PlusTransformer2DModel from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, @@ -244,12 +245,12 @@ class CogView3PlusPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_cogview3plus(self): generator = torch.Generator("cpu").manual_seed(0) diff --git a/tests/pipelines/controlnet/test_controlnet_img2img.py b/tests/pipelines/controlnet/test_controlnet_img2img.py index 100765ee34cb..0147d4a65140 100644 --- a/tests/pipelines/controlnet/test_controlnet_img2img.py +++ b/tests/pipelines/controlnet/test_controlnet_img2img.py @@ -36,6 +36,7 @@ from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, floats_tensor, load_numpy, @@ -412,12 +413,12 @@ class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") diff --git a/tests/pipelines/controlnet/test_controlnet_inpaint.py b/tests/pipelines/controlnet/test_controlnet_inpaint.py index b06590e13cb6..63d5fd466021 100644 --- a/tests/pipelines/controlnet/test_controlnet_inpaint.py +++ b/tests/pipelines/controlnet/test_controlnet_inpaint.py @@ -36,6 +36,7 @@ from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, floats_tensor, load_numpy, @@ -464,12 +465,12 @@ class ControlNetInpaintPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") diff --git a/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py b/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py index 1be15645efd7..7880f744b95c 100644 --- a/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py +++ b/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py @@ -221,7 +221,7 @@ def test_xformers_attention_forwardGenerator_pass(self): @slow @require_big_accelerator -@pytest.mark.big_gpu_with_torch_cuda +@pytest.mark.big_accelerator class StableDiffusion3ControlNetPipelineSlowTests(unittest.TestCase): pipeline_class = StableDiffusion3ControlNetPipeline diff --git a/tests/pipelines/deepfloyd_if/test_if.py b/tests/pipelines/deepfloyd_if/test_if.py index 295b29f12e8c..8445229079f3 100644 --- a/tests/pipelines/deepfloyd_if/test_if.py +++ b/tests/pipelines/deepfloyd_if/test_if.py @@ -25,6 +25,7 @@ from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, + backend_max_memory_allocated, backend_reset_max_memory_allocated, backend_reset_peak_memory_stats, load_numpy, @@ -135,7 +136,7 @@ def test_if_text_to_image(self): image = output.images[0] - mem_bytes = torch.cuda.max_memory_allocated() + mem_bytes = backend_max_memory_allocated(torch_device) assert mem_bytes < 12 * 10**9 expected_image = load_numpy( diff --git a/tests/pipelines/deepfloyd_if/test_if_img2img.py b/tests/pipelines/deepfloyd_if/test_if_img2img.py index da06dc355896..14271a98621b 100644 --- a/tests/pipelines/deepfloyd_if/test_if_img2img.py +++ b/tests/pipelines/deepfloyd_if/test_if_img2img.py @@ -24,6 +24,7 @@ from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, + backend_max_memory_allocated, backend_reset_max_memory_allocated, backend_reset_peak_memory_stats, floats_tensor, @@ -151,7 +152,7 @@ def test_if_img2img(self): ) image = output.images[0] - mem_bytes = torch.cuda.max_memory_allocated() + mem_bytes = backend_max_memory_allocated(torch_device) assert mem_bytes < 12 * 10**9 expected_image = load_numpy( diff --git a/tests/pipelines/flux/test_pipeline_flux.py b/tests/pipelines/flux/test_pipeline_flux.py index 646ad928ec05..cbdf617d71ec 100644 --- a/tests/pipelines/flux/test_pipeline_flux.py +++ b/tests/pipelines/flux/test_pipeline_flux.py @@ -224,7 +224,7 @@ def test_flux_true_cfg(self): @nightly @require_big_accelerator -@pytest.mark.big_gpu_with_torch_cuda +@pytest.mark.big_accelerator class FluxPipelineSlowTests(unittest.TestCase): pipeline_class = FluxPipeline repo_id = "black-forest-labs/FLUX.1-schnell" @@ -312,7 +312,7 @@ def test_flux_inference(self): @slow @require_big_accelerator -@pytest.mark.big_gpu_with_torch_cuda +@pytest.mark.big_accelerator class FluxIPAdapterPipelineSlowTests(unittest.TestCase): pipeline_class = FluxPipeline repo_id = "black-forest-labs/FLUX.1-dev" diff --git a/tests/pipelines/flux/test_pipeline_flux_redux.py b/tests/pipelines/flux/test_pipeline_flux_redux.py index 1f204add1c9a..b8f36dfd3cd3 100644 --- a/tests/pipelines/flux/test_pipeline_flux_redux.py +++ b/tests/pipelines/flux/test_pipeline_flux_redux.py @@ -19,7 +19,7 @@ @slow @require_big_accelerator -@pytest.mark.big_gpu_with_torch_cuda +@pytest.mark.big_accelerator class FluxReduxSlowTests(unittest.TestCase): pipeline_class = FluxPriorReduxPipeline repo_id = "black-forest-labs/FLUX.1-Redux-dev" diff --git a/tests/pipelines/hunyuandit/test_hunyuan_dit.py b/tests/pipelines/hunyuandit/test_hunyuan_dit.py index 66453b73b0b3..05c94262abaa 100644 --- a/tests/pipelines/hunyuandit/test_hunyuan_dit.py +++ b/tests/pipelines/hunyuandit/test_hunyuan_dit.py @@ -23,6 +23,7 @@ from diffusers import AutoencoderKL, DDPMScheduler, HunyuanDiT2DModel, HunyuanDiTPipeline from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, @@ -310,12 +311,12 @@ class HunyuanDiTPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_hunyuan_dit_1024(self): generator = torch.Generator("cpu").manual_seed(0) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py index c66491b15c66..8399e57bfbf1 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py @@ -27,6 +27,7 @@ UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( + backend_empty_cache, load_numpy, nightly, numpy_cosine_similarity_distance, @@ -231,12 +232,12 @@ def tearDownClass(cls): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_attend_and_excite_fp16(self): generator = torch.manual_seed(51) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py index 2feeaaf11c12..ff4a33abf87d 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py @@ -25,6 +25,7 @@ from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( backend_empty_cache, + backend_max_memory_allocated, backend_reset_max_memory_allocated, backend_reset_peak_memory_stats, enable_full_determinism, @@ -287,6 +288,6 @@ def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): output_type="np", ) - mem_bytes = torch.cuda.max_memory_allocated() + mem_bytes = backend_max_memory_allocated(torch_device) # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9 diff --git a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py index 8e2fa77fc083..577ac4ebdd4b 100644 --- a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py +++ b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py @@ -233,7 +233,7 @@ def test_skip_guidance_layers(self): @slow @require_big_accelerator -@pytest.mark.big_gpu_with_torch_cuda +@pytest.mark.big_accelerator class StableDiffusion3PipelineSlowTests(unittest.TestCase): pipeline_class = StableDiffusion3Pipeline repo_id = "stabilityai/stable-diffusion-3-medium-diffusers" diff --git a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py index 80bb35a08e16..f5b5e63a810a 100644 --- a/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py +++ b/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py @@ -168,7 +168,7 @@ def test_multi_vae(self): @slow @require_big_accelerator -@pytest.mark.big_gpu_with_torch_cuda +@pytest.mark.big_accelerator class StableDiffusion3Img2ImgPipelineSlowTests(unittest.TestCase): pipeline_class = StableDiffusion3Img2ImgPipeline repo_id = "stabilityai/stable-diffusion-3-medium-diffusers" diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py index a41e7dc7f342..11f08c882084 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py @@ -35,6 +35,7 @@ UniPCMultistepScheduler, ) from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, load_image, numpy_cosine_similarity_distance, @@ -940,12 +941,12 @@ class StableDiffusionXLPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_stable_diffusion_lcm(self): torch.manual_seed(0) diff --git a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py index 9a141634a364..7d19d745a252 100644 --- a/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py +++ b/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py @@ -39,6 +39,7 @@ UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, floats_tensor, load_image, @@ -670,12 +671,12 @@ class StableDiffusionXLImg2ImgPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def test_stable_diffusion_xl_img2img_playground(self): torch.manual_seed(0) diff --git a/tests/pipelines/test_pipelines.py b/tests/pipelines/test_pipelines.py index 65718a254595..c4db662784f3 100644 --- a/tests/pipelines/test_pipelines.py +++ b/tests/pipelines/test_pipelines.py @@ -1218,13 +1218,13 @@ def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def dummy_image(self): batch_size = 1 diff --git a/tests/pipelines/wan/test_wan.py b/tests/pipelines/wan/test_wan.py index a162e6841d2d..e3a153dd19c7 100644 --- a/tests/pipelines/wan/test_wan.py +++ b/tests/pipelines/wan/test_wan.py @@ -21,9 +21,11 @@ from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanPipeline, WanTransformer3DModel from diffusers.utils.testing_utils import ( + backend_empty_cache, enable_full_determinism, require_torch_accelerator, slow, + torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS @@ -144,12 +146,12 @@ class WanPipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def tearDown(self): super().tearDown() gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) @unittest.skip("TODO: test needs to be implemented") def test_Wanx(self): diff --git a/tests/quantization/torchao/test_torchao.py b/tests/quantization/torchao/test_torchao.py index 0e671307dd18..743da17356f7 100644 --- a/tests/quantization/torchao/test_torchao.py +++ b/tests/quantization/torchao/test_torchao.py @@ -30,13 +30,15 @@ ) from diffusers.models.attention_processor import Attention from diffusers.utils.testing_utils import ( + backend_empty_cache, + backend_synchronize, enable_full_determinism, is_torch_available, is_torchao_available, nightly, numpy_cosine_similarity_distance, require_torch, - require_torch_gpu, + require_torch_accelerator, require_torchao_version_greater_or_equal, slow, torch_device, @@ -61,7 +63,7 @@ @require_torch -@require_torch_gpu +@require_torch_accelerator @require_torchao_version_greater_or_equal("0.7.0") class TorchAoConfigTest(unittest.TestCase): def test_to_dict(self): @@ -79,7 +81,7 @@ def test_post_init_check(self): Test kwargs validations in TorchAoConfig """ _ = TorchAoConfig("int4_weight_only") - with self.assertRaisesRegex(ValueError, "is not supported yet"): + with self.assertRaisesRegex(ValueError, "is not supported"): _ = TorchAoConfig("uint8") with self.assertRaisesRegex(ValueError, "does not support the following keyword arguments"): @@ -119,12 +121,12 @@ def test_repr(self): # Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners @require_torch -@require_torch_gpu +@require_torch_accelerator @require_torchao_version_greater_or_equal("0.7.0") class TorchAoTest(unittest.TestCase): def tearDown(self): gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_dummy_components( self, quantization_config: TorchAoConfig, model_id: str = "hf-internal-testing/tiny-flux-pipe" @@ -269,6 +271,7 @@ def test_int4wo_quant_bfloat16_conversion(self): subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, + device_map=f"{torch_device}:0", ) weight = quantized_model.transformer_blocks[0].ff.net[2].weight @@ -338,7 +341,7 @@ def test_device_map(self): output = quantized_model(**inputs)[0] output_slice = output.flatten()[-9:].detach().float().cpu().numpy() - self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3) + self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 2e-3) with tempfile.TemporaryDirectory() as offload_folder: quantization_config = TorchAoConfig("int4_weight_only", group_size=64) @@ -359,7 +362,7 @@ def test_device_map(self): output = quantized_model(**inputs)[0] output_slice = output.flatten()[-9:].detach().float().cpu().numpy() - self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3) + self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 2e-3) def test_modules_to_not_convert(self): quantization_config = TorchAoConfig("int8_weight_only", modules_to_not_convert=["transformer_blocks.0"]) @@ -518,14 +521,14 @@ def test_sequential_cpu_offload(self): # Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners @require_torch -@require_torch_gpu +@require_torch_accelerator @require_torchao_version_greater_or_equal("0.7.0") class TorchAoSerializationTest(unittest.TestCase): model_name = "hf-internal-testing/tiny-flux-pipe" def tearDown(self): gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_dummy_model(self, quant_method, quant_method_kwargs, device=None): quantization_config = TorchAoConfig(quant_method, **quant_method_kwargs) @@ -593,17 +596,17 @@ def _check_serialization_expected_slice(self, quant_method, quant_method_kwargs, ) self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3) - def test_int_a8w8_cuda(self): + def test_int_a8w8_accelerator(self): quant_method, quant_method_kwargs = "int8_dynamic_activation_int8_weight", {} expected_slice = np.array([0.3633, -0.1357, -0.0188, -0.249, -0.4688, 0.5078, -0.1289, -0.6914, 0.4551]) - device = "cuda" + device = torch_device self._test_original_model_expected_slice(quant_method, quant_method_kwargs, expected_slice) self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device) - def test_int_a16w8_cuda(self): + def test_int_a16w8_accelerator(self): quant_method, quant_method_kwargs = "int8_weight_only", {} expected_slice = np.array([0.3613, -0.127, -0.0223, -0.2539, -0.459, 0.4961, -0.1357, -0.6992, 0.4551]) - device = "cuda" + device = torch_device self._test_original_model_expected_slice(quant_method, quant_method_kwargs, expected_slice) self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device) @@ -624,14 +627,14 @@ def test_int_a16w8_cpu(self): # Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners @require_torch -@require_torch_gpu +@require_torch_accelerator @require_torchao_version_greater_or_equal("0.7.0") @slow @nightly class SlowTorchAoTests(unittest.TestCase): def tearDown(self): gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_dummy_components(self, quantization_config: TorchAoConfig): # This is just for convenience, so that we can modify it at one place for custom environments and locally testing @@ -713,8 +716,8 @@ def test_quantization(self): quantization_config = TorchAoConfig(quant_type=quantization_name, modules_to_not_convert=["x_embedder"]) self._test_quant_type(quantization_config, expected_slice) gc.collect() - torch.cuda.empty_cache() - torch.cuda.synchronize() + backend_empty_cache(torch_device) + backend_synchronize(torch_device) def test_serialization_int8wo(self): quantization_config = TorchAoConfig("int8wo") @@ -733,8 +736,8 @@ def test_serialization_int8wo(self): pipe.remove_all_hooks() del pipe.transformer gc.collect() - torch.cuda.empty_cache() - torch.cuda.synchronize() + backend_empty_cache(torch_device) + backend_synchronize(torch_device) transformer = FluxTransformer2DModel.from_pretrained( tmp_dir, torch_dtype=torch.bfloat16, use_safetensors=False ) @@ -783,14 +786,14 @@ def test_memory_footprint_int8wo(self): @require_torch -@require_torch_gpu +@require_torch_accelerator @require_torchao_version_greater_or_equal("0.7.0") @slow @nightly class SlowTorchAoPreserializedModelTests(unittest.TestCase): def tearDown(self): gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) def get_dummy_inputs(self, device: torch.device, seed: int = 0): if str(device).startswith("mps"): diff --git a/tests/single_file/test_lumina2_transformer.py b/tests/single_file/test_lumina2_transformer.py index d3ffd4fc3a55..2ac681897d4e 100644 --- a/tests/single_file/test_lumina2_transformer.py +++ b/tests/single_file/test_lumina2_transformer.py @@ -16,8 +16,6 @@ import gc import unittest -import torch - from diffusers import ( Lumina2Transformer2DModel, ) @@ -66,9 +64,9 @@ def test_single_file_components(self): def test_checkpoint_loading(self): for ckpt_path in self.alternate_keys_ckpt_paths: - torch.cuda.empty_cache() + backend_empty_cache(torch_device) model = self.model_class.from_single_file(ckpt_path) del model gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) diff --git a/tests/single_file/test_model_flux_transformer_single_file.py b/tests/single_file/test_model_flux_transformer_single_file.py index bf11faaa9c0e..81779cf8faee 100644 --- a/tests/single_file/test_model_flux_transformer_single_file.py +++ b/tests/single_file/test_model_flux_transformer_single_file.py @@ -16,8 +16,6 @@ import gc import unittest -import torch - from diffusers import ( FluxTransformer2DModel, ) @@ -64,9 +62,9 @@ def test_single_file_components(self): def test_checkpoint_loading(self): for ckpt_path in self.alternate_keys_ckpt_paths: - torch.cuda.empty_cache() + backend_empty_cache(torch_device) model = self.model_class.from_single_file(ckpt_path) del model gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) diff --git a/tests/single_file/test_sana_transformer.py b/tests/single_file/test_sana_transformer.py index 802ca37abfc3..e74c5be6ff86 100644 --- a/tests/single_file/test_sana_transformer.py +++ b/tests/single_file/test_sana_transformer.py @@ -1,8 +1,6 @@ import gc import unittest -import torch - from diffusers import ( SanaTransformer2DModel, ) @@ -53,9 +51,9 @@ def test_single_file_components(self): def test_checkpoint_loading(self): for ckpt_path in self.alternate_keys_ckpt_paths: - torch.cuda.empty_cache() + backend_empty_cache(torch_device) model = self.model_class.from_single_file(ckpt_path) del model gc.collect() - torch.cuda.empty_cache() + backend_empty_cache(torch_device) From b6f793304437120239d4b942cac623b0ea86039b Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 11 Jun 2025 21:14:24 +0530 Subject: [PATCH 17/89] [tests] tests for compilation + quantization (bnb) (#11672) * start adding compilation tests for quantization. * fixes * make common utility. * modularize. * add group offloading+compile * xfail * update * Update tests/quantization/test_torch_compile_utils.py Co-authored-by: Dhruv Nair * fixes --------- Co-authored-by: Dhruv Nair --- src/diffusers/utils/testing_utils.py | 12 +++ tests/quantization/bnb/test_4bit.py | 27 ++++++ tests/quantization/bnb/test_mixed_int8.py | 27 ++++++ .../quantization/test_torch_compile_utils.py | 87 +++++++++++++++++++ 4 files changed, 153 insertions(+) create mode 100644 tests/quantization/test_torch_compile_utils.py diff --git a/src/diffusers/utils/testing_utils.py b/src/diffusers/utils/testing_utils.py index ae5a6e6e91eb..5cbe5ff27780 100644 --- a/src/diffusers/utils/testing_utils.py +++ b/src/diffusers/utils/testing_utils.py @@ -291,6 +291,18 @@ def decorator(test_case): return decorator +def require_torch_version_greater(torch_version): + """Decorator marking a test that requires torch with a specific version greater.""" + + def decorator(test_case): + correct_torch_version = is_torch_available() and is_torch_version(">", torch_version) + return unittest.skipUnless( + correct_torch_version, f"test requires torch with the version greater than {torch_version}" + )(test_case) + + return decorator + + def require_torch_gpu(test_case): """Decorator marking a test that requires CUDA and PyTorch.""" return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")( diff --git a/tests/quantization/bnb/test_4bit.py b/tests/quantization/bnb/test_4bit.py index ac1b0cf3ce6b..2d8b9f698bfe 100644 --- a/tests/quantization/bnb/test_4bit.py +++ b/tests/quantization/bnb/test_4bit.py @@ -30,6 +30,7 @@ FluxTransformer2DModel, SD3Transformer2DModel, ) +from diffusers.quantizers import PipelineQuantizationConfig from diffusers.utils import is_accelerate_version, logging from diffusers.utils.testing_utils import ( CaptureLogger, @@ -44,11 +45,14 @@ require_peft_backend, require_torch, require_torch_accelerator, + require_torch_version_greater, require_transformers_version_greater, slow, torch_device, ) +from ..test_torch_compile_utils import QuantCompileTests + def get_some_linear_layer(model): if model.__class__.__name__ in ["SD3Transformer2DModel", "FluxTransformer2DModel"]: @@ -855,3 +859,26 @@ def test_fp4_double_unsafe(self): def test_fp4_double_safe(self): self.test_serialization(quant_type="fp4", double_quant=True, safe_serialization=True) + + +@require_torch_version_greater("2.7.1") +class Bnb4BitCompileTests(QuantCompileTests): + quantization_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_8bit", + quant_kwargs={ + "load_in_4bit": True, + "bnb_4bit_quant_type": "nf4", + "bnb_4bit_compute_dtype": torch.bfloat16, + }, + components_to_quantize=["transformer", "text_encoder_2"], + ) + + def test_torch_compile(self): + torch._dynamo.config.capture_dynamic_output_shape_ops = True + super()._test_torch_compile(quantization_config=self.quantization_config) + + def test_torch_compile_with_cpu_offload(self): + super()._test_torch_compile_with_cpu_offload(quantization_config=self.quantization_config) + + def test_torch_compile_with_group_offload(self): + super()._test_torch_compile_with_group_offload(quantization_config=self.quantization_config) diff --git a/tests/quantization/bnb/test_mixed_int8.py b/tests/quantization/bnb/test_mixed_int8.py index bb0702c00bd9..b15a9f72a8f6 100644 --- a/tests/quantization/bnb/test_mixed_int8.py +++ b/tests/quantization/bnb/test_mixed_int8.py @@ -46,11 +46,14 @@ require_peft_version_greater, require_torch, require_torch_accelerator, + require_torch_version_greater_equal, require_transformers_version_greater, slow, torch_device, ) +from ..test_torch_compile_utils import QuantCompileTests + def get_some_linear_layer(model): if model.__class__.__name__ in ["SD3Transformer2DModel", "FluxTransformer2DModel"]: @@ -821,3 +824,27 @@ def test_serialization_sharded(self): out_0 = self.model_0(**inputs)[0] out_1 = model_1(**inputs)[0] self.assertTrue(torch.equal(out_0, out_1)) + + +@require_torch_version_greater_equal("2.6.0") +class Bnb8BitCompileTests(QuantCompileTests): + quantization_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_8bit", + quant_kwargs={"load_in_8bit": True}, + components_to_quantize=["transformer", "text_encoder_2"], + ) + + def test_torch_compile(self): + torch._dynamo.config.capture_dynamic_output_shape_ops = True + super()._test_torch_compile(quantization_config=self.quantization_config, torch_dtype=torch.float16) + + def test_torch_compile_with_cpu_offload(self): + super()._test_torch_compile_with_cpu_offload( + quantization_config=self.quantization_config, torch_dtype=torch.float16 + ) + + @pytest.mark.xfail(reason="Test fails because of an offloading problem from Accelerate with confusion in hooks.") + def test_torch_compile_with_group_offload(self): + super()._test_torch_compile_with_group_offload( + quantization_config=self.quantization_config, torch_dtype=torch.float16 + ) diff --git a/tests/quantization/test_torch_compile_utils.py b/tests/quantization/test_torch_compile_utils.py new file mode 100644 index 000000000000..1ae77b27d7cd --- /dev/null +++ b/tests/quantization/test_torch_compile_utils.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Team Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a clone of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import unittest + +import torch + +from diffusers import DiffusionPipeline +from diffusers.utils.testing_utils import backend_empty_cache, require_torch_gpu, slow, torch_device + + +@require_torch_gpu +@slow +class QuantCompileTests(unittest.TestCase): + quantization_config = None + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + torch.compiler.reset() + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + torch.compiler.reset() + + def _init_pipeline(self, quantization_config, torch_dtype): + pipe = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-3-medium-diffusers", + quantization_config=quantization_config, + torch_dtype=torch_dtype, + ) + return pipe + + def _test_torch_compile(self, quantization_config, torch_dtype=torch.bfloat16): + pipe = self._init_pipeline(quantization_config, torch_dtype).to("cuda") + # import to ensure fullgraph True + pipe.transformer.compile(fullgraph=True) + + for _ in range(2): + # small resolutions to ensure speedy execution. + pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256) + + def _test_torch_compile_with_cpu_offload(self, quantization_config, torch_dtype=torch.bfloat16): + pipe = self._init_pipeline(quantization_config, torch_dtype) + pipe.enable_model_cpu_offload() + pipe.transformer.compile() + + for _ in range(2): + # small resolutions to ensure speedy execution. + pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256) + + def _test_torch_compile_with_group_offload(self, quantization_config, torch_dtype=torch.bfloat16): + torch._dynamo.config.cache_size_limit = 10000 + + pipe = self._init_pipeline(quantization_config, torch_dtype) + group_offload_kwargs = { + "onload_device": torch.device("cuda"), + "offload_device": torch.device("cpu"), + "offload_type": "leaf_level", + "use_stream": True, + "non_blocking": True, + } + pipe.transformer.enable_group_offload(**group_offload_kwargs) + pipe.transformer.compile() + for name, component in pipe.components.items(): + if name != "transformer" and isinstance(component, torch.nn.Module): + if torch.device(component.device).type == "cpu": + component.to("cuda") + + for _ in range(2): + # small resolutions to ensure speedy execution. + pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256) From 91545666e05384f4d6161d90fa9c306bc70f937e Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 11 Jun 2025 22:41:59 +0530 Subject: [PATCH 18/89] [tests] model-level `device_map` clarifications (#11681) * add clarity in documentation for device_map * docs * fix how compiler tester mixins are used. * propagate * more * typo. * fix tests * fix order of decroators. * clarify more. * more test cases. * fix doc * fix device_map docstring in pipeline_utils. * more examples * more * update * remove code for stuff that is already supported. * fix stuff. --- src/diffusers/models/modeling_utils.py | 35 ++++++++++++++++-- src/diffusers/pipelines/pipeline_utils.py | 13 +++---- .../unets/test_models_unet_2d_condition.py | 37 +++++++++++++++++++ 3 files changed, 74 insertions(+), 11 deletions(-) diff --git a/src/diffusers/models/modeling_utils.py b/src/diffusers/models/modeling_utils.py index 55ce0cf79fb9..1e9e28471d89 100644 --- a/src/diffusers/models/modeling_utils.py +++ b/src/diffusers/models/modeling_utils.py @@ -814,14 +814,43 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P Mirror source to resolve accessibility issues if you're downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. - device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): + device_map (`Union[int, str, torch.device]` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Defaults to `None`, meaning that the model will be loaded on CPU. + Examples: + + ```py + >>> from diffusers import AutoModel + >>> import torch + + >>> # This works. + >>> model = AutoModel.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet", device_map="cuda" + ... ) + >>> # This also works (integer accelerator device ID). + >>> model = AutoModel.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet", device_map=0 + ... ) + >>> # Specifying a supported offloading strategy like "auto" also works. + >>> model = AutoModel.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet", device_map="auto" + ... ) + >>> # Specifying a dictionary as `device_map` also works. + >>> model = AutoModel.from_pretrained( + ... "stabilityai/stable-diffusion-xl-base-1.0", + ... subfolder="unet", + ... device_map={"": torch.device("cuda")}, + ... ) + ``` + Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device - map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + map](https://huggingface.co/docs/accelerate/en/concept_guides/big_model_inference#the-devicemap). You + can also refer to the [Diffusers-specific + documentation](https://huggingface.co/docs/diffusers/main/en/training/distributed_inference#model-sharding) + for more concrete examples. max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. @@ -1387,7 +1416,7 @@ def _load_pretrained_model( low_cpu_mem_usage: bool = True, dtype: Optional[Union[str, torch.dtype]] = None, keep_in_fp32_modules: Optional[List[str]] = None, - device_map: Dict[str, Union[int, str, torch.device]] = None, + device_map: Union[str, int, torch.device, Dict[str, Union[int, str, torch.device]]] = None, offload_state_dict: Optional[bool] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, dduf_entries: Optional[Dict[str, DDUFEntry]] = None, diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 0ac4251ec6d3..efeb085a723b 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -669,14 +669,11 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. - device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): - A map that specifies where each submodule should go. It doesn’t need to be defined for each - parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the - same device. - - Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For - more information about each option see [designing a device - map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). + device_map (`str`, *optional*): + Strategy that dictates how the different components of a pipeline should be placed on available + devices. Currently, only "balanced" `device_map` is supported. Check out + [this](https://huggingface.co/docs/diffusers/main/en/tutorials/inference_with_big_models#device-placement) + to know more. max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. diff --git a/tests/models/unets/test_models_unet_2d_condition.py b/tests/models/unets/test_models_unet_2d_condition.py index c8ed68c65b40..e0331d15dd04 100644 --- a/tests/models/unets/test_models_unet_2d_condition.py +++ b/tests/models/unets/test_models_unet_2d_condition.py @@ -46,6 +46,7 @@ require_peft_backend, require_torch_accelerator, require_torch_accelerator_with_fp16, + require_torch_gpu, skip_mps, slow, torch_all_close, @@ -1083,6 +1084,42 @@ def test_load_sharded_checkpoint_device_map_from_hub_local_subfolder(self): assert loaded_model assert new_output.sample.shape == (4, 4, 16, 16) + @parameterized.expand( + [ + (-1, "You can't pass device_map as a negative int"), + ("foo", "When passing device_map as a string, the value needs to be a device name"), + ] + ) + def test_wrong_device_map_raises_error(self, device_map, msg_substring): + with self.assertRaises(ValueError) as err_ctx: + _ = self.model_class.from_pretrained( + "hf-internal-testing/unet2d-sharded-dummy-subfolder", subfolder="unet", device_map=device_map + ) + + assert msg_substring in str(err_ctx.exception) + + @parameterized.expand([0, "cuda", torch.device("cuda"), torch.device("cuda:0")]) + @require_torch_gpu + def test_passing_non_dict_device_map_works(self, device_map): + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + loaded_model = self.model_class.from_pretrained( + "hf-internal-testing/unet2d-sharded-dummy-subfolder", subfolder="unet", device_map=device_map + ) + output = loaded_model(**inputs_dict) + assert output.sample.shape == (4, 4, 16, 16) + + @parameterized.expand([("", "cuda"), ("", torch.device("cuda"))]) + @require_torch_gpu + def test_passing_dict_device_map_works(self, name, device_map): + # There are other valid dict-based `device_map` values too. It's best to refer to + # the docs for those: https://huggingface.co/docs/accelerate/en/concept_guides/big_model_inference#the-devicemap. + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + loaded_model = self.model_class.from_pretrained( + "hf-internal-testing/unet2d-sharded-dummy-subfolder", subfolder="unet", device_map={name: device_map} + ) + output = loaded_model(**inputs_dict) + assert output.sample.shape == (4, 4, 16, 16) + @require_peft_backend def test_load_attn_procs_raise_warning(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() From f3e09114f200f8fbf88b060e218ac5a50464fe8d Mon Sep 17 00:00:00 2001 From: Aryan Date: Thu, 12 Jun 2025 01:18:40 +0530 Subject: [PATCH 19/89] Improve Wan docstrings (#11689) * improve docstrings for wan * Apply suggestions from code review Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * make style --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- src/diffusers/pipelines/wan/pipeline_wan.py | 11 ++++--- .../pipelines/wan/pipeline_wan_i2v.py | 10 +++--- .../pipelines/wan/pipeline_wan_vace.py | 32 +++++++++++++++++-- .../pipelines/wan/pipeline_wan_video2video.py | 9 ++++-- 4 files changed, 46 insertions(+), 16 deletions(-) diff --git a/src/diffusers/pipelines/wan/pipeline_wan.py b/src/diffusers/pipelines/wan/pipeline_wan.py index 3c0ac30bb6dc..6df66118b068 100644 --- a/src/diffusers/pipelines/wan/pipeline_wan.py +++ b/src/diffusers/pipelines/wan/pipeline_wan.py @@ -388,8 +388,10 @@ def __call__( Args: prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. - instead. + The prompt or prompts to guide the image generation. If not defined, pass `prompt_embeds` instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to avoid during image generation. If not defined, pass `negative_prompt_embeds` + instead. Ignored when not using guidance (`guidance_scale` < `1`). height (`int`, defaults to `480`): The height in pixels of the generated image. width (`int`, defaults to `832`): @@ -434,8 +436,9 @@ def __call__( The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. - autocast_dtype (`torch.dtype`, *optional*, defaults to `torch.bfloat16`): - The dtype to use for the torch.amp.autocast. + max_sequence_length (`int`, defaults to `512`): + The maximum sequence length of the text encoder. If the prompt is longer than this, it will be + truncated. If the prompt is shorter, it will be padded to this length. Examples: diff --git a/src/diffusers/pipelines/wan/pipeline_wan_i2v.py b/src/diffusers/pipelines/wan/pipeline_wan_i2v.py index 77f0e4d56a53..c71138a97dd9 100644 --- a/src/diffusers/pipelines/wan/pipeline_wan_i2v.py +++ b/src/diffusers/pipelines/wan/pipeline_wan_i2v.py @@ -562,12 +562,10 @@ def __call__( The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. - max_sequence_length (`int`, *optional*, defaults to `512`): - The maximum sequence length of the prompt. - shift (`float`, *optional*, defaults to `5.0`): - The shift of the flow. - autocast_dtype (`torch.dtype`, *optional*, defaults to `torch.bfloat16`): - The dtype to use for the torch.amp.autocast. + max_sequence_length (`int`, defaults to `512`): + The maximum sequence length of the text encoder. If the prompt is longer than this, it will be + truncated. If the prompt is shorter, it will be padded to this length. + Examples: Returns: diff --git a/src/diffusers/pipelines/wan/pipeline_wan_vace.py b/src/diffusers/pipelines/wan/pipeline_wan_vace.py index e029006aa175..a0b5ed93c9bf 100644 --- a/src/diffusers/pipelines/wan/pipeline_wan_vace.py +++ b/src/diffusers/pipelines/wan/pipeline_wan_vace.py @@ -687,8 +687,33 @@ def __call__( Args: prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds` instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + video (`List[PIL.Image.Image]`, *optional*): + The input video or videos to be used as a starting point for the generation. The video should be a list + of PIL images, a numpy array, or a torch tensor. Currently, the pipeline only supports generating one + video at a time. + mask (`List[PIL.Image.Image]`, *optional*): + The input mask defines which video regions to condition on and which to generate. Black areas in the + mask indicate conditioning regions, while white areas indicate regions for generation. The mask should + be a list of PIL images, a numpy array, or a torch tensor. Currently supports generating a single video + at a time. + reference_images (`List[PIL.Image.Image]`, *optional*): + A list of one or more reference images as extra conditioning for the generation. For example, if you + are trying to inpaint a video to change the character, you can pass reference images of the new + character here. Refer to the Diffusers [examples](https://github.com/huggingface/diffusers/pull/11582) + and original [user + guide](https://github.com/ali-vilab/VACE/blob/0897c6d055d7d9ea9e191dce763006664d9780f8/UserGuide.md) + for a full list of supported tasks and use cases. + conditioning_scale (`float`, `List[float]`, `torch.Tensor`, defaults to `1.0`): + The conditioning scale to be applied when adding the control conditioning latent stream to the + denoising latent stream in each control layer of the model. If a float is provided, it will be applied + uniformly to all layers. If a list or tensor is provided, it should have the same length as the number + of control layers in the model (`len(transformer.config.vace_layers)`). height (`int`, defaults to `480`): The height in pixels of the generated image. width (`int`, defaults to `832`): @@ -733,8 +758,9 @@ def __call__( The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. - autocast_dtype (`torch.dtype`, *optional*, defaults to `torch.bfloat16`): - The dtype to use for the torch.amp.autocast. + max_sequence_length (`int`, defaults to `512`): + The maximum sequence length of the text encoder. If the prompt is longer than this, it will be + truncated. If the prompt is shorter, it will be padded to this length. Examples: diff --git a/src/diffusers/pipelines/wan/pipeline_wan_video2video.py b/src/diffusers/pipelines/wan/pipeline_wan_video2video.py index a4a10d4655a9..1a2d2e9c2232 100644 --- a/src/diffusers/pipelines/wan/pipeline_wan_video2video.py +++ b/src/diffusers/pipelines/wan/pipeline_wan_video2video.py @@ -508,7 +508,7 @@ def __call__( Args: prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds` instead. height (`int`, defaults to `480`): The height in pixels of the generated image. @@ -525,6 +525,8 @@ def __call__( of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. + strength (`float`, defaults to `0.8`): + Higher strength leads to more differences between original image and generated video. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): @@ -554,8 +556,9 @@ def __call__( The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. - autocast_dtype (`torch.dtype`, *optional*, defaults to `torch.bfloat16`): - The dtype to use for the torch.amp.autocast. + max_sequence_length (`int`, defaults to `512`): + The maximum sequence length of the text encoder. If the prompt is longer than this, it will be + truncated. If the prompt is shorter, it will be padded to this length. Examples: From 447ccd0679f180ef1387bc9d990c4306a85eb226 Mon Sep 17 00:00:00 2001 From: rasmi Date: Wed, 11 Jun 2025 17:59:54 -0400 Subject: [PATCH 20/89] Set _torch_version to N/A if torch is disabled. (#11645) --- src/diffusers/utils/import_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/utils/import_utils.py b/src/diffusers/utils/import_utils.py index f7244e97b878..b64cecc412ca 100644 --- a/src/diffusers/utils/import_utils.py +++ b/src/diffusers/utils/import_utils.py @@ -99,6 +99,7 @@ def _is_package_available(pkg_name: str, get_dist_name: bool = False) -> Tuple[b else: logger.info("Disabling PyTorch because USE_TORCH is set") _torch_available = False + _torch_version = "N/A" _jax_version = "N/A" _flax_version = "N/A" From b272807bc898a314cde536c1d7d1e43592af1fce Mon Sep 17 00:00:00 2001 From: Joel Schlosser <75754324+jbschlosser@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:03:40 -0400 Subject: [PATCH 21/89] Avoid DtoH sync from access of nonzero() item in scheduler (#11696) --- src/diffusers/pipelines/flux/pipeline_flux.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/pipelines/flux/pipeline_flux.py b/src/diffusers/pipelines/flux/pipeline_flux.py index 6dbcd5c6db54..b9d0b9256109 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux.py +++ b/src/diffusers/pipelines/flux/pipeline_flux.py @@ -898,6 +898,7 @@ def __call__( ) # 6. Denoising loop + self.scheduler.set_begin_index(0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: From 47ef79464f0bd9229b8f67956d12cae61e4a3b81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tolga=20Cang=C3=B6z?= <46008593+tolgacangoz@users.noreply.github.com> Date: Thu, 12 Jun 2025 02:47:37 +0300 Subject: [PATCH 22/89] Apply Occam's Razor in position embedding calculation (#11562) * fix: remove redundant indexing * style --- src/diffusers/models/embeddings.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index c25e9997e3fb..09e3621c2c7b 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -1149,9 +1149,7 @@ def get_1d_rotary_pos_embed( theta = theta * ntk_factor freqs = ( - 1.0 - / (theta ** (torch.arange(0, dim, 2, dtype=freqs_dtype, device=pos.device)[: (dim // 2)] / dim)) - / linear_factor + 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=freqs_dtype, device=pos.device) / dim)) / linear_factor ) # [D/2] freqs = torch.outer(pos, freqs) # type: ignore # [S, D/2] is_npu = freqs.device.type == "npu" From 7400278857cd1bac5af4572d45cdd0af9d0d4534 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:16:44 -0600 Subject: [PATCH 23/89] add chroma transformer to dummy tp --- src/diffusers/utils/dummy_pt_objects.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/diffusers/utils/dummy_pt_objects.py b/src/diffusers/utils/dummy_pt_objects.py index 24b3c3d7be59..200e15c7abc0 100644 --- a/src/diffusers/utils/dummy_pt_objects.py +++ b/src/diffusers/utils/dummy_pt_objects.py @@ -324,6 +324,20 @@ def from_config(cls, *args, **kwargs): def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) +class ChromaTransformer2DModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + class CogVideoXTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] From c22930d7ccdb5ff90099a4a9e2e34e0784e5410c Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:18:56 -0600 Subject: [PATCH 24/89] add chroma to init --- src/diffusers/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index ce0777fdef68..f660ab0521aa 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -159,6 +159,7 @@ "AutoencoderTiny", "AutoModel", "CacheMixin", + "ChromaTransformer2DModel", "CogVideoXTransformer3DModel", "CogView3PlusTransformer2DModel", "CogView4Transformer2DModel", From 4e698b1088c5ee5588692028803cba12baf4604b Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:21:10 -0600 Subject: [PATCH 25/89] add chroma to init --- src/diffusers/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index f660ab0521aa..2067e7d9d55c 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -354,6 +354,7 @@ "BlipDiffusionControlNetPipeline", "BlipDiffusionPipeline", "CLIPImageProjection", + "ChromaPipeline", "CogVideoXFunControlPipeline", "CogVideoXImageToVideoPipeline", "CogVideoXPipeline", @@ -769,6 +770,7 @@ AutoencoderTiny, AutoModel, CacheMixin, + ChromaTransformer2DModel, CogVideoXTransformer3DModel, CogView3PlusTransformer2DModel, CogView4Transformer2DModel, @@ -942,6 +944,7 @@ AudioLDMPipeline, AuraFlowPipeline, CLIPImageProjection, + ChromaPipeline, CogVideoXFunControlPipeline, CogVideoXImageToVideoPipeline, CogVideoXPipeline, From 5eb4b822aee0e9ebe10e96a29cb81ef641fe9502 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:38:58 -0600 Subject: [PATCH 26/89] fix single file --- src/diffusers/loaders/single_file_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/loaders/single_file_model.py b/src/diffusers/loaders/single_file_model.py index 82e4db7283cc..e07370130889 100644 --- a/src/diffusers/loaders/single_file_model.py +++ b/src/diffusers/loaders/single_file_model.py @@ -101,7 +101,7 @@ "ChromaTransformer2DModel": { "checkpoint_mapping_fn": convert_chroma_transformer_checkpoint_to_diffusers, "default_subfolder": "transformer", - } + }, "LTXVideoTransformer3DModel": { "checkpoint_mapping_fn": convert_ltx_transformer_checkpoint_to_diffusers, "default_subfolder": "transformer", From f0c75b6b6ffd6619afbb0b0cf625806cbd677766 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:46:51 -0600 Subject: [PATCH 27/89] update --- src/diffusers/models/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index 8723fbca2187..db8b5fc7eb7f 100755 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -60,6 +60,7 @@ _import_structure["embeddings"] = ["ImageProjection"] _import_structure["modeling_utils"] = ["ModelMixin"] _import_structure["transformers.auraflow_transformer_2d"] = ["AuraFlowTransformer2DModel"] + _import_structure["transformers.chroma_transformer_2d"] = ["ChromaTransformer2DModel"] _import_structure["transformers.cogvideox_transformer_3d"] = ["CogVideoXTransformer3DModel"] _import_structure["transformers.consisid_transformer_3d"] = ["ConsisIDTransformer3DModel"] _import_structure["transformers.dit_transformer_2d"] = ["DiTTransformer2DModel"] @@ -151,6 +152,7 @@ from .transformers import ( AllegroTransformer3DModel, AuraFlowTransformer2DModel, + ChromaTransformer2DModel, CogVideoXTransformer3DModel, CogView3PlusTransformer2DModel, CogView4Transformer2DModel, From 6441e70defff84b7855b83ad01010d369626586f Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:48:44 -0600 Subject: [PATCH 28/89] update --- src/diffusers/models/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py index db8b5fc7eb7f..b493d651f4ba 100755 --- a/src/diffusers/models/__init__.py +++ b/src/diffusers/models/__init__.py @@ -60,7 +60,6 @@ _import_structure["embeddings"] = ["ImageProjection"] _import_structure["modeling_utils"] = ["ModelMixin"] _import_structure["transformers.auraflow_transformer_2d"] = ["AuraFlowTransformer2DModel"] - _import_structure["transformers.chroma_transformer_2d"] = ["ChromaTransformer2DModel"] _import_structure["transformers.cogvideox_transformer_3d"] = ["CogVideoXTransformer3DModel"] _import_structure["transformers.consisid_transformer_3d"] = ["ConsisIDTransformer3DModel"] _import_structure["transformers.dit_transformer_2d"] = ["DiTTransformer2DModel"] @@ -75,6 +74,7 @@ _import_structure["transformers.t5_film_transformer"] = ["T5FilmDecoder"] _import_structure["transformers.transformer_2d"] = ["Transformer2DModel"] _import_structure["transformers.transformer_allegro"] = ["AllegroTransformer3DModel"] + _import_structure["transformers.transformer_chroma"] = ["ChromaTransformer2DModel"] _import_structure["transformers.transformer_cogview3plus"] = ["CogView3PlusTransformer2DModel"] _import_structure["transformers.transformer_cogview4"] = ["CogView4Transformer2DModel"] _import_structure["transformers.transformer_cosmos"] = ["CosmosTransformer3DModel"] From a6f231c7ce48e0200185056dcc86dca376a24ea3 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:51:45 -0600 Subject: [PATCH 29/89] add chroma to auto pipeline --- src/diffusers/pipelines/auto_pipeline.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index ed8ad79ca781..29aa321f5ca3 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -21,6 +21,7 @@ from ..models.controlnets import ControlNetUnionModel from ..utils import is_sentencepiece_available from .aura_flow import AuraFlowPipeline +from .chroma import ChromaPipeline from .cogview3 import CogView3PlusPipeline from .cogview4 import CogView4ControlPipeline, CogView4Pipeline from .controlnet import ( @@ -143,6 +144,7 @@ ("flux-controlnet", FluxControlNetPipeline), ("lumina", LuminaPipeline), ("lumina2", Lumina2Pipeline), + ("chroma", ChromaPipeline) ("cogview3", CogView3PlusPipeline), ("cogview4", CogView4Pipeline), ("cogview4-control", CogView4ControlPipeline), From 7445cf422aff613bb6745920795d4b6cdf7d69d6 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:53:06 -0600 Subject: [PATCH 30/89] add chroma to pipeline init --- src/diffusers/pipelines/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 268e5c2a8c39..d20d609ff9c4 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -148,6 +148,7 @@ "AudioLDM2UNet2DConditionModel", ] _import_structure["blip_diffusion"] = ["BlipDiffusionPipeline"] + _import_structure["chroma"] = ["ChromaPipeline"] _import_structure["cogvideo"] = [ "CogVideoXPipeline", "CogVideoXImageToVideoPipeline", From af918c89dd9fe3c3355ad3a0ad43fa505d3fccfa Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:55:03 -0600 Subject: [PATCH 31/89] change to chroma transformer --- src/diffusers/pipelines/chroma/pipeline_chroma.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index f6d2e366e48e..7ef191a54de4 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -28,7 +28,7 @@ from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FluxIPAdapterMixin, FluxLoraLoaderMixin, FromSingleFileMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, FluxTransformer2DModel +from ...models import AutoencoderKL, ChromaTransformer2DModel from ...schedulers import FlowMatchEulerDiscreteScheduler from ...utils import ( USE_PEFT_BACKEND, From 2fcc75a6d89ab010789f20963c1b38b872801afd Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 18:55:56 -0600 Subject: [PATCH 32/89] take out variant from blocks --- src/diffusers/models/transformers/transformer_chroma.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/diffusers/models/transformers/transformer_chroma.py b/src/diffusers/models/transformers/transformer_chroma.py index 1f726f5cb4b0..7b46ef9c4376 100644 --- a/src/diffusers/models/transformers/transformer_chroma.py +++ b/src/diffusers/models/transformers/transformer_chroma.py @@ -290,7 +290,6 @@ def __init__( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, - variant=variant, ) for _ in range(num_layers) ] @@ -302,7 +301,6 @@ def __init__( dim=self.inner_dim, num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, - variant=variant, ) for _ in range(num_single_layers) ] From 0b027a24533890171b1536f2942bb662ca1466d4 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:04:52 -0600 Subject: [PATCH 33/89] swap embedder location --- src/diffusers/loaders/single_file_utils.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/diffusers/loaders/single_file_utils.py b/src/diffusers/loaders/single_file_utils.py index aace8fc7bffb..f406ba5ce7e4 100644 --- a/src/diffusers/loaders/single_file_utils.py +++ b/src/diffusers/loaders/single_file_utils.py @@ -3333,20 +3333,20 @@ def swap_scale_shift(weight): return new_weight # guidance - converted_state_dict["time_text_embed.embedder.in_proj.bias"] = checkpoint.pop( + converted_state_dict["distilled_guidance_layer.in_proj.bias"] = checkpoint.pop( "distilled_guidance_layer.in_proj.bias" ) - converted_state_dict["time_text_embed.embedder.in_proj.weight"] = checkpoint.pop( + converted_state_dict["distilled_guidance_layer.in_proj.weight"] = checkpoint.pop( "distilled_guidance_layer.in_proj.weight" ) - converted_state_dict["time_text_embed.embedder.out_proj.bias"] = checkpoint.pop( + converted_state_dict["distilled_guidance_layer.out_proj.bias"] = checkpoint.pop( "distilled_guidance_layer.out_proj.bias" ) - converted_state_dict["time_text_embed.embedder.out_proj.weight"] = checkpoint.pop( + converted_state_dict["distilled_guidance_layer.out_proj.weight"] = checkpoint.pop( "distilled_guidance_layer.out_proj.weight" ) for i in range(num_guidance_layers): - block_prefix = f"time_text_embed.embedder.layers.{i}." + block_prefix = f"distilled_guidance_layer.layers.{i}." converted_state_dict[f"{block_prefix}linear_1.bias"] = checkpoint.pop( f"distilled_guidance_layer.layers.{i}.in_layer.bias" ) @@ -3359,7 +3359,7 @@ def swap_scale_shift(weight): converted_state_dict[f"{block_prefix}linear_2.weight"] = checkpoint.pop( f"distilled_guidance_layer.layers.{i}.out_layer.weight" ) - converted_state_dict[f"time_text_embed.embedder.norms.{i}.weight"] = checkpoint.pop( + converted_state_dict[f"distilled_guidance_layer.norms.{i}.weight"] = checkpoint.pop( f"distilled_guidance_layer.norms.{i}.scale" ) From 6c0aed14dbaab0fc76c7d90e2ae382c3dab18fe9 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:06:45 -0600 Subject: [PATCH 34/89] remove prompt_2 --- src/diffusers/pipelines/chroma/pipeline_chroma.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 7ef191a54de4..2c5f7988534c 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -308,7 +308,7 @@ def encode_prompt( if prompt_embeds is None: prompt_embeds = self._get_t5_prompt_embeds( - prompt=prompt_2, + prompt=prompt, num_images_per_prompt=num_images_per_prompt, max_sequence_length=max_sequence_length, device=device, @@ -377,7 +377,6 @@ def check_inputs( height, width, negative_prompt=None, - negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, From f190c02af71b9dfbfa64bff2921d47b5b76220a0 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:09:37 -0600 Subject: [PATCH 35/89] work on swapping text encoders --- .../pipelines/chroma/pipeline_chroma.py | 24 +++++-------------- 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 2c5f7988534c..88b435fb2917 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -19,8 +19,6 @@ import torch from transformers import ( CLIPImageProcessor, - CLIPTextModel, - CLIPTokenizer, CLIPVisionModelWithProjection, T5EncoderModel, T5TokenizerFast, @@ -168,7 +166,7 @@ class ChromaPipeline( [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast). """ - model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->transformer->vae" + model_cpu_offload_seq = "text_encoder->image_encoder->transformer->vae" _optional_components = ["image_encoder", "feature_extractor"] _callback_tensor_inputs = ["latents", "prompt_embeds"] @@ -198,9 +196,6 @@ def __init__( # Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible # by the patch size. So the vae scale factor is multiplied by the patch size to account for this self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) - self.tokenizer_max_length = ( - self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 - ) self.default_sample_size = 128 def _get_chroma_attn_mask(self, length: torch.Tensor, max_sequence_length: int) -> torch.Tensor: @@ -225,9 +220,9 @@ def _get_t5_prompt_embeds( batch_size = len(prompt) if isinstance(self, TextualInversionLoaderMixin): - prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2) + prompt = self.maybe_convert_prompt(prompt, self.tokenizer) - text_inputs = self.tokenizer_2( + text_inputs = self.tokenizer( prompt, padding="max_length", max_length=max_sequence_length, @@ -237,16 +232,9 @@ def _get_t5_prompt_embeds( return_tensors="pt", ) text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): - removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) - logger.warning( - "The following part of your input was truncated because `max_sequence_length` is set to " - f" {max_sequence_length} tokens: {removed_text}" - ) + untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - prompt_embeds = self.text_encoder_2( + prompt_embeds = self.text_encoder( text_input_ids.to(device), output_hidden_states=False, attention_mask=( @@ -254,7 +242,7 @@ def _get_t5_prompt_embeds( ), )[0] - dtype = self.text_encoder_2.dtype + dtype = self.text_encoder.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) _, seq_len, _ = prompt_embeds.shape From 38429ffcaccb49632c4f32804ab75082e78c2bc3 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:11:47 -0600 Subject: [PATCH 36/89] remove mask function --- src/diffusers/pipelines/chroma/pipeline_chroma.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 88b435fb2917..09883f54c7b1 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -198,13 +198,6 @@ def __init__( self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) self.default_sample_size = 128 - def _get_chroma_attn_mask(self, length: torch.Tensor, max_sequence_length: int) -> torch.Tensor: - attention_mask = torch.zeros((length.shape[0], max_sequence_length), dtype=torch.bool, device=length.device) - for i, n_tokens in enumerate(length): - n_tokens = torch.max(n_tokens + 1, max_sequence_length) - attention_mask[i, :n_tokens] = True - return attention_mask - def _get_t5_prompt_embeds( self, prompt: Union[str, List[str]] = None, @@ -234,12 +227,12 @@ def _get_t5_prompt_embeds( text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids + text_inputs.attention_mask[:, : text_inputs.length + 1] = 1.0 + prompt_embeds = self.text_encoder( text_input_ids.to(device), output_hidden_states=False, - attention_mask=( - self._get_chroma_attn_mask(text_inputs.length, max_sequence_length).to(device) - ), + attention_mask=text_inputs.attention_mask.to(device), )[0] dtype = self.text_encoder.dtype From 7c75d8e98d88816f2a2d76d542b2814ec446f0dc Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:15:18 -0600 Subject: [PATCH 37/89] dont modify mask (for now) --- src/diffusers/pipelines/chroma/pipeline_chroma.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 09883f54c7b1..1ddce5fb717b 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -227,7 +227,7 @@ def _get_t5_prompt_embeds( text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - text_inputs.attention_mask[:, : text_inputs.length + 1] = 1.0 + #text_inputs.attention_mask[:, : text_inputs.length + 1] = 1.0 prompt_embeds = self.text_encoder( text_input_ids.to(device), From c9b46af65f4cd51bf5c32cb2795bd5069b1a61a6 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:16:24 -0600 Subject: [PATCH 38/89] wrap attn mask --- src/diffusers/pipelines/chroma/pipeline_chroma.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 1ddce5fb717b..62f601c0dc9c 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -227,12 +227,12 @@ def _get_t5_prompt_embeds( text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - #text_inputs.attention_mask[:, : text_inputs.length + 1] = 1.0 + text_inputs.attention_mask[:, : text_inputs.length + 1] = 1.0 prompt_embeds = self.text_encoder( text_input_ids.to(device), output_hidden_states=False, - attention_mask=text_inputs.attention_mask.to(device), + attention_mask=(text_inputs.attention_mask.to(device),), )[0] dtype = self.text_encoder.dtype From 146255aba134360d4d11357d2711a205402528b1 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:17:29 -0600 Subject: [PATCH 39/89] no attn mask (can't get it to work) --- src/diffusers/pipelines/chroma/pipeline_chroma.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 62f601c0dc9c..04c05372c488 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -227,12 +227,12 @@ def _get_t5_prompt_embeds( text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - text_inputs.attention_mask[:, : text_inputs.length + 1] = 1.0 + #text_inputs.attention_mask[:, : text_inputs.length + 1] = 1.0 prompt_embeds = self.text_encoder( text_input_ids.to(device), output_hidden_states=False, - attention_mask=(text_inputs.attention_mask.to(device),), + #attention_mask=(text_inputs.attention_mask.to(device),), )[0] dtype = self.text_encoder.dtype From 3309ffef1ce43d4c74ff1beba7da97c1fd4c0a1b Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:33:17 -0600 Subject: [PATCH 40/89] remove pooled prompt embeds --- src/diffusers/pipelines/chroma/pipeline_chroma.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 04c05372c488..32135d2c21fe 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -788,7 +788,6 @@ def __call__( hidden_states=latents, timestep=timestep / 1000, guidance=guidance, - pooled_projections=negative_pooled_prompt_embeds, encoder_hidden_states=negative_prompt_embeds, txt_ids=negative_text_ids, img_ids=latent_image_ids, From 77b429eda416f0f6645b591b370971913f6bdbf5 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:35:10 -0600 Subject: [PATCH 41/89] change to my own unpooled embeddeer --- src/diffusers/models/embeddings.py | 32 ++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index 8aa2ea5841e9..0ba64eadf2c1 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -1636,36 +1636,46 @@ def forward(self, timestep, guidance, pooled_projection): return conditioning - class CombinedTimestepTextProjChromaEmbeddings(nn.Module): def __init__(self, factor: int, hidden_dim: int, out_dim: int, n_layers: int, embedding_dim: int): super().__init__() self.time_proj = Timesteps(num_channels=factor, flip_sin_to_cos=True, downscale_freq_shift=0) self.guidance_proj = Timesteps(num_channels=factor, flip_sin_to_cos=True, downscale_freq_shift=0) + self.embedder = ChromaApproximator( + in_dim=factor * 4, + out_dim=out_dim, + hidden_dim=hidden_dim, + n_layers=n_layers, + ) + self.embedding_dim = embedding_dim self.register_buffer( "mod_proj", - get_timestep_embedding(torch.arange(out_dim)*1000, 2 * factor, flip_sin_to_cos=True, downscale_freq_shift=0, ), + get_timestep_embedding(torch.arange(344), 2 * factor, flip_sin_to_cos=True, downscale_freq_shift=0), persistent=False, ) def forward( - self, timestep: torch.Tensor, guidance: Optional[torch.Tensor], pooled_projections: torch.Tensor + self, timestep: torch.Tensor, guidance: Optional[torch.Tensor] ) -> torch.Tensor: mod_index_length = self.mod_proj.shape[0] - timesteps_proj = self.time_proj(timestep).to(dtype=timestep.dtype) - guidance_proj = self.guidance_proj(torch.tensor([0])).to(dtype=timestep.dtype, device=timestep.device) - - mod_proj = self.mod_proj.to(dtype=timesteps_proj.dtype, device=timesteps_proj.device) + timesteps_proj = self.time_proj(timestep) + if guidance is not None: + guidance_proj = self.guidance_proj(guidance.repeat(timesteps_proj.shape[0])) + else: + guidance_proj = torch.zeros( + (1, self.guidance_proj.num_channels), + dtype=timesteps_proj.dtype, + device=timesteps_proj.device, + ) + mod_proj = self.mod_proj.unsqueeze(0).repeat(timesteps_proj.shape[0], 1, 1).to(dtype=timesteps_proj.dtype, device=timesteps_proj.device) timestep_guidance = ( - torch.cat([timesteps_proj, guidance_proj], dim=1).unsqueeze(1).repeat(1, mod_index_length, 1) + torch.cat([timesteps_proj, guidance_proj], dim=1).repeat(1, mod_index_length, 1) ) - input_vec = torch.cat([timestep_guidance, mod_proj.unsqueeze(0)], dim=-1) - + input_vec = torch.cat([timestep_guidance, mod_proj], dim=-1) return input_vec - class CogView3CombinedTimestepSizeEmbeddings(nn.Module): def __init__(self, embedding_dim: int, condition_dim: int, pooled_projection_dim: int, timesteps_dim: int = 256): super().__init__() From df7fde7a6d32b03a8ad77d337e6a2125edf4e9c8 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:36:34 -0600 Subject: [PATCH 42/89] fix load --- src/diffusers/models/embeddings.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index 0ba64eadf2c1..8a89a5d1366a 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -1642,17 +1642,10 @@ def __init__(self, factor: int, hidden_dim: int, out_dim: int, n_layers: int, em self.time_proj = Timesteps(num_channels=factor, flip_sin_to_cos=True, downscale_freq_shift=0) self.guidance_proj = Timesteps(num_channels=factor, flip_sin_to_cos=True, downscale_freq_shift=0) - self.embedder = ChromaApproximator( - in_dim=factor * 4, - out_dim=out_dim, - hidden_dim=hidden_dim, - n_layers=n_layers, - ) - self.embedding_dim = embedding_dim self.register_buffer( "mod_proj", - get_timestep_embedding(torch.arange(344), 2 * factor, flip_sin_to_cos=True, downscale_freq_shift=0), + get_timestep_embedding(torch.arange(out_dim)*1000, 2 * factor, flip_sin_to_cos=True, downscale_freq_shift=0), persistent=False, ) From 68f771bf43cc4732ddbb714341242f2ac37ce983 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:38:38 -0600 Subject: [PATCH 43/89] take pooled projections out of transformer --- src/diffusers/models/transformers/transformer_chroma.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/diffusers/models/transformers/transformer_chroma.py b/src/diffusers/models/transformers/transformer_chroma.py index 7b46ef9c4376..72cde1f60b67 100644 --- a/src/diffusers/models/transformers/transformer_chroma.py +++ b/src/diffusers/models/transformers/transformer_chroma.py @@ -236,8 +236,6 @@ class ChromaTransformer2DModel( joint_attention_dim (`int`, defaults to `4096`): The number of dimensions to use for the joint attention (embedding/channel dimension of `encoder_hidden_states`). - pooled_projection_dim (`int`, defaults to `768`): - The number of dimensions to use for the pooled projection. guidance_embeds (`bool`, defaults to `False`): Whether to use guidance embeddings for guidance-distilled variant of the model. axes_dims_rope (`Tuple[int]`, defaults to `(16, 56, 56)`): @@ -259,7 +257,6 @@ def __init__( attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 4096, - pooled_projection_dim: int = 768, guidance_embeds: bool = False, axes_dims_rope: Tuple[int, ...] = (16, 56, 56), approximator_in_factor: int = 16, @@ -416,7 +413,6 @@ def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor = None, - pooled_projections: torch.Tensor = None, timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, @@ -435,8 +431,6 @@ def forward( Input `hidden_states`. encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`): Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. - pooled_projections (`torch.Tensor` of shape `(batch_size, projection_dim)`): Embeddings projected - from the embeddings of input conditions. timestep ( `torch.LongTensor`): Used to indicate denoising step. block_controlnet_hidden_states: (`list` of `torch.Tensor`): @@ -474,7 +468,7 @@ def forward( if guidance is not None: guidance = guidance.to(hidden_states.dtype) * 1000 - input_vec = self.time_text_embed(timestep, guidance, pooled_projections) + input_vec = self.time_text_embed(timestep, guidance) pooled_temb = self.distilled_guidance_layer(input_vec) encoder_hidden_states = self.context_embedder(encoder_hidden_states) From f783f38883f6f9c04c6ccb0a5bb630cc76c07e98 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:52:43 -0600 Subject: [PATCH 44/89] ensure correct dtype for chroma embeddings --- src/diffusers/models/embeddings.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index a31999267506..dc39480b6506 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -1665,6 +1665,7 @@ def forward( torch.cat([timesteps_proj, guidance_proj], dim=1).repeat(1, mod_index_length, 1) ) input_vec = torch.cat([timestep_guidance, mod_proj], dim=-1) + input_vec.to(dtype=timestep.dtype) return input_vec class CogView3CombinedTimestepSizeEmbeddings(nn.Module): From f6de1afc3febd680b41ba4b16d643cb3b897c091 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:54:27 -0600 Subject: [PATCH 45/89] update --- src/diffusers/models/embeddings.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index dc39480b6506..8d3f7cbbe378 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -1665,8 +1665,7 @@ def forward( torch.cat([timesteps_proj, guidance_proj], dim=1).repeat(1, mod_index_length, 1) ) input_vec = torch.cat([timestep_guidance, mod_proj], dim=-1) - input_vec.to(dtype=timestep.dtype) - return input_vec + return input_vec.to(dtype=timestep.dtype) class CogView3CombinedTimestepSizeEmbeddings(nn.Module): def __init__(self, embedding_dim: int, condition_dim: int, pooled_projection_dim: int, timesteps_dim: int = 256): From ab7942174ad9debd5f3a41b1df54c1868e863e75 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:57:31 -0600 Subject: [PATCH 46/89] use dn6 attn mask + fix true_cfg_scale --- src/diffusers/pipelines/chroma/pipeline_chroma.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 32135d2c21fe..de7e5deb201e 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -232,9 +232,14 @@ def _get_t5_prompt_embeds( prompt_embeds = self.text_encoder( text_input_ids.to(device), output_hidden_states=False, - #attention_mask=(text_inputs.attention_mask.to(device),), + attention_mask=text_inputs.attention_mask.to(device), )[0] + max_len = min(text_inputs.attention_mask.sum() + 1, max_sequence_length) + prompt_embeds = prompt_embeds[ + :, :max_len + ] + dtype = self.text_encoder.dtype prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) @@ -554,7 +559,7 @@ def __call__( instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is not greater than `1`). height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. @@ -794,7 +799,7 @@ def __call__( joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, )[0] - noise_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred) + noise_pred = neg_noise_pred + guidance_scale * (noise_pred - neg_noise_pred) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype From 442f77a2d7fc12f67310763b8e157d5751617205 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 19:59:43 -0600 Subject: [PATCH 47/89] use chroma pipeline output --- src/diffusers/pipelines/chroma/pipeline_chroma.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index de7e5deb201e..7a2fc90841b2 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -609,7 +609,7 @@ def __call__( The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. + Whether or not to return a [`~pipelines.flux.ChromaPipelineOutput`] instead of a plain tuple. joint_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in @@ -842,4 +842,4 @@ def __call__( if not return_dict: return (image,) - return FluxPipelineOutput(images=image) + return ChromaPipelineOutput(images=image) From e69d73099d0572748f0f078d7c97f94ff5fb5a6c Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 20:05:28 -0600 Subject: [PATCH 48/89] use DN6 embeddings --- src/diffusers/models/embeddings.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index 8d3f7cbbe378..adb00b247560 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -1651,20 +1651,15 @@ def forward( self, timestep: torch.Tensor, guidance: Optional[torch.Tensor] ) -> torch.Tensor: mod_index_length = self.mod_proj.shape[0] - timesteps_proj = self.time_proj(timestep) - if guidance is not None: - guidance_proj = self.guidance_proj(guidance.repeat(timesteps_proj.shape[0])) - else: - guidance_proj = torch.zeros( - (1, self.guidance_proj.num_channels), - dtype=timesteps_proj.dtype, - device=timesteps_proj.device, - ) - mod_proj = self.mod_proj.unsqueeze(0).repeat(timesteps_proj.shape[0], 1, 1).to(dtype=timesteps_proj.dtype, device=timesteps_proj.device) + + timesteps_proj = self.time_proj(timestep).to(dtype=timestep.dtype) + guidance_proj = self.guidance_proj(torch.tensor([0])).to(dtype=timestep.dtype, device=timestep.device) + + mod_proj = self.mod_proj.to(dtype=timesteps_proj.dtype, device=timesteps_proj.device) timestep_guidance = ( - torch.cat([timesteps_proj, guidance_proj], dim=1).repeat(1, mod_index_length, 1) + torch.cat([timesteps_proj, guidance_proj], dim=1).unsqueeze(1).repeat(1, mod_index_length, 1) ) - input_vec = torch.cat([timestep_guidance, mod_proj], dim=-1) + input_vec = torch.cat([timestep_guidance, mod_proj.unsqueeze(0)], dim=-1) return input_vec.to(dtype=timestep.dtype) class CogView3CombinedTimestepSizeEmbeddings(nn.Module): From 01bc0dcc56b93d3df77a220920a2df037df15701 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 20:45:45 -0600 Subject: [PATCH 49/89] remove guidance --- src/diffusers/models/transformers/transformer_chroma.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/diffusers/models/transformers/transformer_chroma.py b/src/diffusers/models/transformers/transformer_chroma.py index 72cde1f60b67..fd5b01d1ee53 100644 --- a/src/diffusers/models/transformers/transformer_chroma.py +++ b/src/diffusers/models/transformers/transformer_chroma.py @@ -416,7 +416,6 @@ def forward( timestep: torch.LongTensor = None, img_ids: torch.Tensor = None, txt_ids: torch.Tensor = None, - guidance: torch.Tensor = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_block_samples=None, controlnet_single_block_samples=None, @@ -465,10 +464,8 @@ def forward( hidden_states = self.x_embedder(hidden_states) timestep = timestep.to(hidden_states.dtype) * 1000 - if guidance is not None: - guidance = guidance.to(hidden_states.dtype) * 1000 - input_vec = self.time_text_embed(timestep, guidance) + input_vec = self.time_text_embed(timestep) pooled_temb = self.distilled_guidance_layer(input_vec) encoder_hidden_states = self.context_embedder(encoder_hidden_states) From e31c94866d9c56433184f1ef906218b220f12b10 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 20:46:59 -0600 Subject: [PATCH 50/89] remove guidance embed (pipeline) --- src/diffusers/pipelines/chroma/pipeline_chroma.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index 7a2fc90841b2..e2081405c05e 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -724,13 +724,6 @@ def __call__( num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) - # handle guidance - if self.transformer.config.guidance_embeds: - guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) - guidance = guidance.expand(latents.shape[0]) - else: - guidance = None - if (ip_adapter_image is not None or ip_adapter_image_embeds is not None) and ( negative_ip_adapter_image is None and negative_ip_adapter_image_embeds is None ): @@ -778,7 +771,6 @@ def __call__( noise_pred = self.transformer( hidden_states=latents, timestep=timestep / 1000, - guidance=guidance, encoder_hidden_states=prompt_embeds, txt_ids=text_ids, img_ids=latent_image_ids, @@ -792,7 +784,6 @@ def __call__( neg_noise_pred = self.transformer( hidden_states=latents, timestep=timestep / 1000, - guidance=guidance, encoder_hidden_states=negative_prompt_embeds, txt_ids=negative_text_ids, img_ids=latent_image_ids, From 406ab3b1e9696fbcd723658b45a5e2010109ddd5 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 20:47:59 -0600 Subject: [PATCH 51/89] remove guidance from embeddings --- src/diffusers/models/embeddings.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index adb00b247560..01a8f316be1e 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -1647,9 +1647,7 @@ def __init__(self, factor: int, hidden_dim: int, out_dim: int, n_layers: int, em persistent=False, ) - def forward( - self, timestep: torch.Tensor, guidance: Optional[torch.Tensor] - ) -> torch.Tensor: + def forward(self, timestep: torch.Tensor) -> torch.Tensor: mod_index_length = self.mod_proj.shape[0] timesteps_proj = self.time_proj(timestep).to(dtype=timestep.dtype) From 1bd8fdfcb6e43622a04e9477afd7cd7cfae4e441 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 20:56:27 -0600 Subject: [PATCH 52/89] don't return length --- src/diffusers/pipelines/chroma/pipeline_chroma.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index e2081405c05e..e376a402e52b 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -220,14 +220,12 @@ def _get_t5_prompt_embeds( padding="max_length", max_length=max_sequence_length, truncation=True, - return_length=True, + return_length=False, return_overflowing_tokens=False, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - #text_inputs.attention_mask[:, : text_inputs.length + 1] = 1.0 prompt_embeds = self.text_encoder( text_input_ids.to(device), From 00b179fb1afc147f87bd311f03b1ef7d747e1792 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Thu, 12 Jun 2025 08:49:24 +0530 Subject: [PATCH 53/89] [docs] add compilation bits to the bitsandbytes docs. (#11693) * add compilation bits to the bitsandbytes docs. * Apply suggestions from code review Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * finish --------- Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> --- docs/source/en/quantization/bitsandbytes.md | 39 +++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/docs/source/en/quantization/bitsandbytes.md b/docs/source/en/quantization/bitsandbytes.md index b1c130b792c3..dc095054e1d8 100644 --- a/docs/source/en/quantization/bitsandbytes.md +++ b/docs/source/en/quantization/bitsandbytes.md @@ -416,6 +416,45 @@ text_encoder_2_4bit.dequantize() transformer_4bit.dequantize() ``` +## torch.compile + +Speed up inference with `torch.compile`. Make sure you have the latest `bitsandbytes` installed and we also recommend installing [PyTorch nightly](https://pytorch.org/get-started/locally/). + + + +```py +torch._dynamo.config.capture_dynamic_output_shape_ops = True + +quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True) +transformer_4bit = AutoModel.from_pretrained( + "black-forest-labs/FLUX.1-dev", + subfolder="transformer", + quantization_config=quant_config, + torch_dtype=torch.float16, +) +transformer_4bit.compile(fullgraph=True) +``` + + + + +```py +quant_config = DiffusersBitsAndBytesConfig(load_in_4bit=True) +transformer_4bit = AutoModel.from_pretrained( + "black-forest-labs/FLUX.1-dev", + subfolder="transformer", + quantization_config=quant_config, + torch_dtype=torch.float16, +) +transformer_4bit.compile(fullgraph=True) +``` + + + +On an RTX 4090 with compilation, 4-bit Flux generation completed in 25.809 seconds versus 32.570 seconds without. + +Check out the [benchmarking script](https://gist.github.com/sayakpaul/0db9d8eeeb3d2a0e5ed7cf0d9ca19b7d) for more details. + ## Resources * [End-to-end notebook showing Flux.1 Dev inference in a free-tier Colab](https://gist.github.com/sayakpaul/c76bd845b48759e11687ac550b99d8b4) From 3e2452ded0ce07306dae684b8b74549bd30ca6dd Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 21:23:35 -0600 Subject: [PATCH 54/89] dont change dtype --- src/diffusers/models/embeddings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index 01a8f316be1e..0708f93299ab 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -1658,7 +1658,7 @@ def forward(self, timestep: torch.Tensor) -> torch.Tensor: torch.cat([timesteps_proj, guidance_proj], dim=1).unsqueeze(1).repeat(1, mod_index_length, 1) ) input_vec = torch.cat([timestep_guidance, mod_proj.unsqueeze(0)], dim=-1) - return input_vec.to(dtype=timestep.dtype) + return input_vec class CogView3CombinedTimestepSizeEmbeddings(nn.Module): def __init__(self, embedding_dim: int, condition_dim: int, pooled_projection_dim: int, timesteps_dim: int = 256): From 1efa772f696c1e2d7026110c17e25306224726b0 Mon Sep 17 00:00:00 2001 From: BuildTools Date: Wed, 11 Jun 2025 21:46:40 -0600 Subject: [PATCH 55/89] remove unused stuff, fix up docs --- src/diffusers/models/transformers/transformer_chroma.py | 5 ----- src/diffusers/pipelines/chroma/pipeline_chroma.py | 4 ---- 2 files changed, 9 deletions(-) diff --git a/src/diffusers/models/transformers/transformer_chroma.py b/src/diffusers/models/transformers/transformer_chroma.py index fd5b01d1ee53..65ff7ac14763 100644 --- a/src/diffusers/models/transformers/transformer_chroma.py +++ b/src/diffusers/models/transformers/transformer_chroma.py @@ -34,9 +34,7 @@ ) from ..cache_utils import CacheMixin from ..embeddings import ( - CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjChromaEmbeddings, - CombinedTimestepTextProjEmbeddings, ChromaApproximator, FluxPosEmbed, ) @@ -236,8 +234,6 @@ class ChromaTransformer2DModel( joint_attention_dim (`int`, defaults to `4096`): The number of dimensions to use for the joint attention (embedding/channel dimension of `encoder_hidden_states`). - guidance_embeds (`bool`, defaults to `False`): - Whether to use guidance embeddings for guidance-distilled variant of the model. axes_dims_rope (`Tuple[int]`, defaults to `(16, 56, 56)`): The dimensions to use for the rotary positional embeddings. """ @@ -257,7 +253,6 @@ def __init__( attention_head_dim: int = 128, num_attention_heads: int = 24, joint_attention_dim: int = 4096, - guidance_embeds: bool = False, axes_dims_rope: Tuple[int, ...] = (16, 56, 56), approximator_in_factor: int = 16, approximator_hidden_dim: int = 5120, diff --git a/src/diffusers/pipelines/chroma/pipeline_chroma.py b/src/diffusers/pipelines/chroma/pipeline_chroma.py index e376a402e52b..d0aabed2a9e1 100644 --- a/src/diffusers/pipelines/chroma/pipeline_chroma.py +++ b/src/diffusers/pipelines/chroma/pipeline_chroma.py @@ -225,7 +225,6 @@ def _get_t5_prompt_embeds( return_tensors="pt", ) text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids prompt_embeds = self.text_encoder( text_input_ids.to(device), @@ -270,9 +269,6 @@ def encode_prompt( prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. - pooled_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. - If not provided, pooled text embeddings will be generated from `prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ From 619921ca22602577b09c69279b939ace00551264 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 21:53:27 -0600 Subject: [PATCH 56/89] add chroma autodoc --- docs/source/en/api/models/chroma_transformer | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 docs/source/en/api/models/chroma_transformer diff --git a/docs/source/en/api/models/chroma_transformer b/docs/source/en/api/models/chroma_transformer new file mode 100644 index 000000000000..f8ee50165c64 --- /dev/null +++ b/docs/source/en/api/models/chroma_transformer @@ -0,0 +1,19 @@ + + +# ChromaTransformer2DModel + +A modified flux Transformer model from [Chroma](https://huggingface.co/lodestones/Chroma) + +## ChromaTransformer2DModel + +[[autodoc]] ChromaTransformer2DModel From f821f2ad5ef544955271ee406d8b0ca8bf9d169e Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 21:54:43 -0600 Subject: [PATCH 57/89] add .md (oops) --- .../en/api/models/{chroma_transformer => chroma_transformer.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/source/en/api/models/{chroma_transformer => chroma_transformer.md} (100%) diff --git a/docs/source/en/api/models/chroma_transformer b/docs/source/en/api/models/chroma_transformer.md similarity index 100% rename from docs/source/en/api/models/chroma_transformer rename to docs/source/en/api/models/chroma_transformer.md From b0cf6803a74a5f96efd3c83430c40263df0a5f3a Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Wed, 11 Jun 2025 22:07:21 -0600 Subject: [PATCH 58/89] initial chroma docs --- docs/source/en/api/pipelines/chroma.md | 90 ++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 docs/source/en/api/pipelines/chroma.md diff --git a/docs/source/en/api/pipelines/chroma.md b/docs/source/en/api/pipelines/chroma.md new file mode 100644 index 000000000000..d11bcfabdc99 --- /dev/null +++ b/docs/source/en/api/pipelines/chroma.md @@ -0,0 +1,90 @@ + + +# Chroma + +
+ LoRA + MPS +
+ +Chroma is a text to image generation model based on Flux. + +Original model checkpoints for Chroma can be found [here](https://huggingface.co/lodestones/Chroma). + + + +Chroma can use all the same optimizations as Flux. + + +### Inference + +```python +import torch +from diffusers import ChromaPipeline + +pipe = ChromaPipeline.from_pretrained("chroma-diffusers-repo", torch_dtype=torch.bfloat16) +pipe.enable_model_cpu_offload() + +prompt = "A cat holding a sign that says hello world" +out = pipe( + prompt=prompt, + guidance_scale=4.0, + height=1024, + width=1024, + num_inference_steps=26, +).images[0] +out.save("image.png") +``` + +## Single File Loading for the `ChromaTransformer2DModel` + +The `ChromaTransformer2DModel` supports loading checkpoints in the original format. This is also useful when trying to load finetunes or quantized versions of the models that have been published by the community. + +The following example demonstrates how to run Chroma from a single file. + +Then run the following example + +```python +import torch +from diffusers import ChromaTransformer2DModel, ChromaPipeline +from transformers import T5EncoderModel + +bfl_repo = "black-forest-labs/FLUX.1-dev" +dtype = torch.bfloat16 + +transformer = ChromaTransformer2DModel.from_single_file("https://huggingface.co/lodestones/Chroma/blob/main/chroma-unlocked-v35.safetensors", torch_dtype=dtype) + +text_encoder = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype) +tokenizer = T5Tokenizer.from_pretrained(bfl_repo, subfolder="tokenizer_2", torch_dtype=dtype) + +pipe = ChromaPipeline.from_pretrained(bfl_repo, transformer=transformer, text_encoder=text_encoder, tokenizer=tokenizer, torch_dtype=dtype) + +pipe.enable_model_cpu_offload() + +prompt = "A cat holding a sign that says hello world" +image = pipe( + prompt, + guidance_scale=4.0, + output_type="pil", + num_inference_steps=26, + generator=torch.Generator("cpu").manual_seed(0) +).images[0] + +image.save("image.png") +``` + +## ChromaPipeline + +[[autodoc]] ChromaPipeline + - all + - __call__ From 0c5eb4470164b30118644d6dbffb427b7fde2c33 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Thu, 12 Jun 2025 00:46:41 -0600 Subject: [PATCH 59/89] undo don't change dtype --- src/diffusers/models/embeddings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index 0708f93299ab..641944d67f0d 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -1658,7 +1658,7 @@ def forward(self, timestep: torch.Tensor) -> torch.Tensor: torch.cat([timesteps_proj, guidance_proj], dim=1).unsqueeze(1).repeat(1, mod_index_length, 1) ) input_vec = torch.cat([timestep_guidance, mod_proj.unsqueeze(0)], dim=-1) - return input_vec + return input_vec.to(timestep.dtype) class CogView3CombinedTimestepSizeEmbeddings(nn.Module): def __init__(self, embedding_dim: int, condition_dim: int, pooled_projection_dim: int, timesteps_dim: int = 256): From 42c0e8ecbebd3717b5cd7978fd2eb1ba30e84561 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Thu, 12 Jun 2025 00:50:36 -0600 Subject: [PATCH 60/89] undo arxiv change unsure why that happened --- src/diffusers/models/embeddings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/models/embeddings.py b/src/diffusers/models/embeddings.py index 641944d67f0d..1a43994c1116 100644 --- a/src/diffusers/models/embeddings.py +++ b/src/diffusers/models/embeddings.py @@ -1399,7 +1399,7 @@ class ImagePositionalEmbeddings(nn.Module): Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the height and width of the latent space. - For more details, see figure 10 of the dall-e paper: https://arxiv.org/abs/2102.12092 + For more details, see figure 10 of the dall-e paper: https://huggingface.co/papers/2102.12092 For VQ-diffusion: From da846d1fff09c4d4e1a1125e5d5b10d655b07469 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Thu, 12 Jun 2025 00:53:40 -0600 Subject: [PATCH 61/89] fix hf papers regression in more places --- src/diffusers/models/normalization.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/diffusers/models/normalization.py b/src/diffusers/models/normalization.py index f2b71bb6888e..b07ed2ca893c 100644 --- a/src/diffusers/models/normalization.py +++ b/src/diffusers/models/normalization.py @@ -306,7 +306,7 @@ class AdaLayerNormSingle(nn.Module): r""" Norm layer adaptive layer norm single (adaLN-single). - As proposed in PixArt-Alpha (see: https://arxiv.org/abs/2310.00426; Section 2.3). + As proposed in PixArt-Alpha (see: https://huggingface.co/papers/2310.00426; Section 2.3). Parameters: embedding_dim (`int`): The size of each embedding vector. @@ -623,7 +623,7 @@ def forward(self, input): class RMSNorm(nn.Module): r""" - RMS Norm as introduced in https://arxiv.org/abs/1910.07467 by Zhang et al. + RMS Norm as introduced in https://huggingface.co/papers/1910.07467 by Zhang et al. Args: dim (`int`): Number of dimensions to use for `weights`. Only effective when `elementwise_affine` is True. @@ -713,7 +713,7 @@ def forward(self, hidden_states): class GlobalResponseNorm(nn.Module): r""" - Global response normalization as introduced in ConvNeXt-v2 (https://arxiv.org/abs/2301.00808). + Global response normalization as introduced in ConvNeXt-v2 (https://huggingface.co/papers/2301.00808). Args: dim (`int`): Number of dimensions to use for the `gamma` and `beta`. From 18327cb57cad4e1e0916fc2c7e50bf41bd7e5ea5 Mon Sep 17 00:00:00 2001 From: Edna <88869424+Ednaordinary@users.noreply.github.com> Date: Thu, 12 Jun 2025 02:52:39 -0600 Subject: [PATCH 62/89] Update docs/source/en/api/pipelines/chroma.md Co-authored-by: Dhruv Nair --- docs/source/en/api/pipelines/chroma.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/api/pipelines/chroma.md b/docs/source/en/api/pipelines/chroma.md index d11bcfabdc99..b4d718244fc7 100644 --- a/docs/source/en/api/pipelines/chroma.md +++ b/docs/source/en/api/pipelines/chroma.md @@ -1,4 +1,4 @@ -