Skip to content

Commit 64eaf85

Browse files
committed
up
1 parent 6ac18d0 commit 64eaf85

8 files changed

Lines changed: 18 additions & 33 deletions

examples/community/llm_grounded_diffusion.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1534,30 +1534,30 @@ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32
15341534
return emb
15351535

15361536
@property
1537-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale
1537+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixinguidance_scale
15381538
def guidance_scale(self):
15391539
return self._guidance_scale
15401540

15411541
@property
1542-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_rescale
1542+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixin.guidance_rescale
15431543
def guidance_rescale(self):
15441544
return self._guidance_rescale
15451545

15461546
@property
1547-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip
1547+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixin.clip_skip
15481548
def clip_skip(self):
15491549
return self._clip_skip
15501550

15511551
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
15521552
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
15531553
# corresponds to doing no classifier free guidance.
15541554
@property
1555-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance
1555+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixin.do_classifier_free_guidance
15561556
def do_classifier_free_guidance(self):
15571557
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
15581558

15591559
@property
1560-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs
1560+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixin.cross_attention_kwargs
15611561
def cross_attention_kwargs(self):
15621562
return self._cross_attention_kwargs
15631563

src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -616,22 +616,22 @@ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype
616616
return latents
617617

618618
@property
619-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale
619+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixinguidance_scale
620620
def guidance_scale(self):
621621
return self._guidance_scale
622622

623623
@property
624-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip
624+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixin.clip_skip
625625
def clip_skip(self):
626626
return self._clip_skip
627627

628628
@property
629-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance
629+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixin.do_classifier_free_guidance
630630
def do_classifier_free_guidance(self):
631631
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
632632

633633
@property
634-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs
634+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixin.cross_attention_kwargs
635635
def cross_attention_kwargs(self):
636636
return self._cross_attention_kwargs
637637

src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -702,22 +702,22 @@ def upcast_vae(self):
702702
self.vae.decoder.mid_block.to(dtype)
703703

704704
@property
705-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale
705+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixinguidance_scale
706706
def guidance_scale(self):
707707
return self._guidance_scale
708708

709709
@property
710-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip
710+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixin.clip_skip
711711
def clip_skip(self):
712712
return self._clip_skip
713713

714714
@property
715-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance
715+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixin.do_classifier_free_guidance
716716
def do_classifier_free_guidance(self):
717717
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
718718

719719
@property
720-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs
720+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_utils.StableDiffusionMixin.cross_attention_kwargs
721721
def cross_attention_kwargs(self):
722722
return self._cross_attention_kwargs
723723

src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -940,10 +940,7 @@ def upcast_vae(self):
940940
self.vae.to(dtype=torch.float32)
941941
use_torch_2_0_or_xformers = isinstance(
942942
self.vae.decoder.mid_block.attentions[0].processor,
943-
(
944-
AttnProcessor2_0,
945-
XFormersAttnProcessor,
946-
),
943+
(AttnProcessor2_0, XFormersAttnProcessor),
947944
)
948945
# if xformers or torch_2_0 is used attention block does not need
949946
# to be in float32 which can save lots of memory

src/diffusers/pipelines/pag/pipeline_pag_controlnet_sd_xl_img2img.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1053,10 +1053,7 @@ def upcast_vae(self):
10531053
self.vae.to(dtype=torch.float32)
10541054
use_torch_2_0_or_xformers = isinstance(
10551055
self.vae.decoder.mid_block.attentions[0].processor,
1056-
(
1057-
AttnProcessor2_0,
1058-
XFormersAttnProcessor,
1059-
),
1056+
(AttnProcessor2_0, XFormersAttnProcessor),
10601057
)
10611058
# if xformers or torch_2_0 is used attention block does not need
10621059
# to be in float32 which can save lots of memory

src/diffusers/pipelines/pag/pipeline_pag_sd_xl_img2img.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -914,10 +914,7 @@ def upcast_vae(self):
914914
self.vae.to(dtype=torch.float32)
915915
use_torch_2_0_or_xformers = isinstance(
916916
self.vae.decoder.mid_block.attentions[0].processor,
917-
(
918-
AttnProcessor2_0,
919-
XFormersAttnProcessor,
920-
),
917+
(AttnProcessor2_0, XFormersAttnProcessor),
921918
)
922919
# if xformers or torch_2_0 is used attention block does not need
923920
# to be in float32 which can save lots of memory

src/diffusers/pipelines/pag/pipeline_pag_sd_xl_inpaint.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1005,10 +1005,7 @@ def upcast_vae(self):
10051005
self.vae.to(dtype=torch.float32)
10061006
use_torch_2_0_or_xformers = isinstance(
10071007
self.vae.decoder.mid_block.attentions[0].processor,
1008-
(
1009-
AttnProcessor2_0,
1010-
XFormersAttnProcessor,
1011-
),
1008+
(AttnProcessor2_0, XFormersAttnProcessor),
10121009
)
10131010
# if xformers or torch_2_0 is used attention block does not need
10141011
# to be in float32 which can save lots of memory

src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -783,10 +783,7 @@ def upcast_vae(self):
783783
self.vae.to(dtype=torch.float32)
784784
use_torch_2_0_or_xformers = isinstance(
785785
self.vae.decoder.mid_block.attentions[0].processor,
786-
(
787-
AttnProcessor2_0,
788-
XFormersAttnProcessor,
789-
),
786+
(AttnProcessor2_0, XFormersAttnProcessor),
790787
)
791788
# if xformers or torch_2_0 is used attention block does not need
792789
# to be in float32 which can save lots of memory

0 commit comments

Comments
 (0)