From 4441337223cc2e14e5272f91fcd2a22bda0a5a9a Mon Sep 17 00:00:00 2001 From: stevhliu Date: Tue, 17 Feb 2026 11:58:09 -0800 Subject: [PATCH 1/8] deprecate --- docs/source/en/_toctree.yml | 47 ----- src/diffusers/pipelines/__init__.py | 161 +++++++++--------- .../pipeline_controlnet_blip_diffusion.py | 6 +- .../pipelines/deprecated/__init__.py | 67 ++++++++ .../{ => deprecated}/amused/__init__.py | 6 +- .../amused/pipeline_amused.py | 10 +- .../amused/pipeline_amused_img2img.py | 10 +- .../amused/pipeline_amused_inpaint.py | 10 +- .../{ => deprecated}/audioldm/__init__.py | 6 +- .../audioldm/pipeline_audioldm.py | 10 +- .../blip_diffusion/__init__.py | 4 +- .../blip_diffusion/blip_image_processing.py | 0 .../blip_diffusion/modeling_blip2.py | 0 .../blip_diffusion/modeling_ctx_clip.py | 0 .../blip_diffusion/pipeline_blip_diffusion.py | 10 +- .../controlnet_xs/__init__.py | 136 +++++++-------- .../controlnet_xs/pipeline_controlnet_xs.py | 22 +-- .../pipeline_controlnet_xs_sd_xl.py | 24 +-- .../dance_diffusion/__init__.py | 2 +- .../pipeline_dance_diffusion.py | 10 +- .../{ => deprecated}/i2vgen_xl/__init__.py | 6 +- .../i2vgen_xl/pipeline_i2vgen_xl.py | 16 +- .../{ => deprecated}/musicldm/__init__.py | 6 +- .../musicldm/pipeline_musicldm.py | 12 +- .../paint_by_example/__init__.py | 6 +- .../paint_by_example/image_encoder.py | 4 +- .../pipeline_paint_by_example.py | 16 +- .../{ => deprecated}/pia/__init__.py | 6 +- .../{ => deprecated}/pia/pipeline_pia.py | 22 +-- .../semantic_stable_diffusion/__init__.py | 6 +- .../pipeline_output.py | 2 +- .../pipeline_semantic_stable_diffusion.py | 14 +- .../__init__.py | 6 +- ...line_stable_diffusion_attend_and_excite.py | 22 +-- .../stable_diffusion_diffedit/__init__.py | 6 +- .../pipeline_stable_diffusion_diffedit.py | 22 +-- .../stable_diffusion_gligen/__init__.py | 6 +- .../pipeline_stable_diffusion_gligen.py | 22 +-- ...line_stable_diffusion_gligen_text_image.py | 24 +-- .../stable_diffusion_ldm3d/__init__.py | 6 +- .../pipeline_stable_diffusion_ldm3d.py | 18 +- .../stable_diffusion_panorama/__init__.py | 6 +- .../pipeline_stable_diffusion_panorama.py | 20 +-- .../stable_diffusion_safe/__init__.py | 6 +- .../stable_diffusion_safe/pipeline_output.py | 2 +- .../pipeline_stable_diffusion_safe.py | 16 +- .../stable_diffusion_safe/safety_checker.py | 2 +- .../stable_diffusion_sag/__init__.py | 6 +- .../pipeline_stable_diffusion_sag.py | 20 +-- .../text_to_video_synthesis/__init__.py | 6 +- .../pipeline_output.py | 2 +- .../pipeline_text_to_video_synth.py | 16 +- .../pipeline_text_to_video_synth_img2img.py | 16 +- .../pipeline_text_to_video_zero.py | 18 +- .../pipeline_text_to_video_zero_sdxl.py | 20 +-- .../{ => deprecated}/unclip/__init__.py | 6 +- .../unclip/pipeline_unclip.py | 10 +- .../unclip/pipeline_unclip_image_variation.py | 10 +- .../{ => deprecated}/unclip/text_proj.py | 4 +- .../{ => deprecated}/unidiffuser/__init__.py | 6 +- .../unidiffuser/modeling_text_decoder.py | 4 +- .../unidiffuser/modeling_uvit.py | 16 +- .../unidiffuser/pipeline_unidiffuser.py | 18 +- .../{ => deprecated}/wuerstchen/__init__.py | 6 +- .../wuerstchen/modeling_paella_vq_model.py | 10 +- .../wuerstchen/modeling_wuerstchen_common.py | 2 +- .../modeling_wuerstchen_diffnext.py | 4 +- .../wuerstchen/modeling_wuerstchen_prior.py | 10 +- .../wuerstchen/pipeline_wuerstchen.py | 8 +- .../pipeline_wuerstchen_combined.py | 6 +- .../wuerstchen/pipeline_wuerstchen_prior.py | 10 +- .../stable_cascade/pipeline_stable_cascade.py | 2 +- .../pipeline_stable_cascade_combined.py | 2 +- .../stable_diffusion/convert_from_ckpt.py | 2 +- 74 files changed, 546 insertions(+), 535 deletions(-) rename src/diffusers/pipelines/{ => deprecated}/amused/__init__.py (91%) rename src/diffusers/pipelines/{ => deprecated}/amused/pipeline_amused.py (98%) rename src/diffusers/pipelines/{ => deprecated}/amused/pipeline_amused_img2img.py (98%) rename src/diffusers/pipelines/{ => deprecated}/amused/pipeline_amused_inpaint.py (98%) rename src/diffusers/pipelines/{ => deprecated}/audioldm/__init__.py (88%) rename src/diffusers/pipelines/{ => deprecated}/audioldm/pipeline_audioldm.py (98%) rename src/diffusers/pipelines/{ => deprecated}/blip_diffusion/__init__.py (73%) rename src/diffusers/pipelines/{ => deprecated}/blip_diffusion/blip_image_processing.py (100%) rename src/diffusers/pipelines/{ => deprecated}/blip_diffusion/modeling_blip2.py (100%) rename src/diffusers/pipelines/{ => deprecated}/blip_diffusion/modeling_ctx_clip.py (100%) rename src/diffusers/pipelines/{ => deprecated}/blip_diffusion/pipeline_blip_diffusion.py (97%) rename src/diffusers/pipelines/{ => deprecated}/controlnet_xs/__init__.py (84%) rename src/diffusers/pipelines/{ => deprecated}/controlnet_xs/pipeline_controlnet_xs.py (98%) rename src/diffusers/pipelines/{ => deprecated}/controlnet_xs/pipeline_controlnet_xs_sd_xl.py (98%) rename src/diffusers/pipelines/{ => deprecated}/dance_diffusion/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/dance_diffusion/pipeline_dance_diffusion.py (95%) rename src/diffusers/pipelines/{ => deprecated}/i2vgen_xl/__init__.py (86%) rename src/diffusers/pipelines/{ => deprecated}/i2vgen_xl/pipeline_i2vgen_xl.py (98%) rename src/diffusers/pipelines/{ => deprecated}/musicldm/__init__.py (88%) rename src/diffusers/pipelines/{ => deprecated}/musicldm/pipeline_musicldm.py (98%) rename src/diffusers/pipelines/{ => deprecated}/paint_by_example/__init__.py (89%) rename src/diffusers/pipelines/{ => deprecated}/paint_by_example/image_encoder.py (96%) rename src/diffusers/pipelines/{ => deprecated}/paint_by_example/pipeline_paint_by_example.py (98%) rename src/diffusers/pipelines/{ => deprecated}/pia/__init__.py (88%) rename src/diffusers/pipelines/{ => deprecated}/pia/pipeline_pia.py (98%) rename src/diffusers/pipelines/{ => deprecated}/semantic_stable_diffusion/__init__.py (88%) rename src/diffusers/pipelines/{ => deprecated}/semantic_stable_diffusion/pipeline_output.py (95%) rename src/diffusers/pipelines/{ => deprecated}/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_attend_and_excite/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_diffedit/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py (99%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_gligen/__init__.py (89%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_ldm3d/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_panorama/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_safe/__init__.py (94%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_safe/pipeline_output.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_safe/pipeline_stable_diffusion_safe.py (98%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_safe/safety_checker.py (99%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_sag/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/stable_diffusion_sag/pipeline_stable_diffusion_sag.py (98%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/__init__.py (90%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/pipeline_output.py (96%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/pipeline_text_to_video_synth.py (98%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py (98%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/pipeline_text_to_video_zero.py (98%) rename src/diffusers/pipelines/{ => deprecated}/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py (99%) rename src/diffusers/pipelines/{ => deprecated}/unclip/__init__.py (87%) rename src/diffusers/pipelines/{ => deprecated}/unclip/pipeline_unclip.py (98%) rename src/diffusers/pipelines/{ => deprecated}/unclip/pipeline_unclip_image_variation.py (98%) rename src/diffusers/pipelines/{ => deprecated}/unclip/text_proj.py (97%) rename src/diffusers/pipelines/{ => deprecated}/unidiffuser/__init__.py (91%) rename src/diffusers/pipelines/{ => deprecated}/unidiffuser/modeling_text_decoder.py (99%) rename src/diffusers/pipelines/{ => deprecated}/unidiffuser/modeling_uvit.py (99%) rename src/diffusers/pipelines/{ => deprecated}/unidiffuser/pipeline_unidiffuser.py (99%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/__init__.py (91%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/modeling_paella_vq_model.py (95%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/modeling_wuerstchen_common.py (98%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/modeling_wuerstchen_diffnext.py (98%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/modeling_wuerstchen_prior.py (93%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/pipeline_wuerstchen.py (98%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/pipeline_wuerstchen_combined.py (98%) rename src/diffusers/pipelines/{ => deprecated}/wuerstchen/pipeline_wuerstchen_prior.py (98%) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 098660ec3f39..5c67403cd56d 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -472,28 +472,16 @@ - local: api/pipelines/auto_pipeline title: AutoPipeline - sections: - - local: api/pipelines/audioldm - title: AudioLDM - local: api/pipelines/audioldm2 title: AudioLDM 2 - - local: api/pipelines/dance_diffusion - title: Dance Diffusion - - local: api/pipelines/musicldm - title: MusicLDM - local: api/pipelines/stable_audio title: Stable Audio title: Audio - sections: - - local: api/pipelines/amused - title: aMUSEd - local: api/pipelines/animatediff title: AnimateDiff - - local: api/pipelines/attend_and_excite - title: Attend-and-Excite - local: api/pipelines/aura_flow title: AuraFlow - - local: api/pipelines/blip_diffusion - title: BLIP-Diffusion - local: api/pipelines/bria_3_2 title: Bria 3.2 - local: api/pipelines/bria_fibo @@ -520,10 +508,6 @@ title: ControlNet with Stable Diffusion XL - local: api/pipelines/controlnet_sana title: ControlNet-Sana - - local: api/pipelines/controlnetxs - title: ControlNet-XS - - local: api/pipelines/controlnetxs_sdxl - title: ControlNet-XS with Stable Diffusion XL - local: api/pipelines/controlnet_union title: ControlNetUnion - local: api/pipelines/cosmos @@ -534,8 +518,6 @@ title: DDPM - local: api/pipelines/deepfloyd_if title: DeepFloyd IF - - local: api/pipelines/diffedit - title: DiffEdit - local: api/pipelines/dit title: DiT - local: api/pipelines/easyanimate @@ -580,16 +562,12 @@ title: Lumina-T2X - local: api/pipelines/marigold title: Marigold - - local: api/pipelines/panorama - title: MultiDiffusion - local: api/pipelines/omnigen title: OmniGen - local: api/pipelines/ovis_image title: Ovis-Image - local: api/pipelines/pag title: PAG - - local: api/pipelines/paint_by_example - title: Paint by Example - local: api/pipelines/pixart title: PixArt-α - local: api/pipelines/pixart_sigma @@ -604,10 +582,6 @@ title: Sana Sprint - local: api/pipelines/sana_video title: Sana Video - - local: api/pipelines/self_attention_guidance - title: Self-Attention Guidance - - local: api/pipelines/semantic_stable_diffusion - title: Semantic Guidance - local: api/pipelines/shap_e title: Shap-E - local: api/pipelines/stable_cascade @@ -617,8 +591,6 @@ title: Overview - local: api/pipelines/stable_diffusion/depth2img title: Depth-to-image - - local: api/pipelines/stable_diffusion/gligen - title: GLIGEN (Grounded Language-to-Image Generation) - local: api/pipelines/stable_diffusion/image_variation title: Image variation - local: api/pipelines/stable_diffusion/img2img @@ -628,11 +600,6 @@ - local: api/pipelines/stable_diffusion/latent_upscale title: Latent upscaler - - local: api/pipelines/stable_diffusion/ldm3d_diffusion - title: LDM3D Text-to-(RGB, Depth), Text-to-(RGB-pano, Depth-pano), LDM3D - Upscaler - - local: api/pipelines/stable_diffusion/stable_diffusion_safe - title: Safe Stable Diffusion - local: api/pipelines/stable_diffusion/sdxl_turbo title: SDXL Turbo - local: api/pipelines/stable_diffusion/stable_diffusion_2 @@ -650,16 +617,10 @@ title: Stable Diffusion - local: api/pipelines/stable_unclip title: Stable unCLIP - - local: api/pipelines/unclip - title: unCLIP - - local: api/pipelines/unidiffuser - title: UniDiffuser - local: api/pipelines/value_guided_sampling title: Value-guided sampling - local: api/pipelines/visualcloze title: VisualCloze - - local: api/pipelines/wuerstchen - title: Wuerstchen - local: api/pipelines/z_image title: Z-Image title: Image @@ -678,8 +639,6 @@ title: HunyuanVideo - local: api/pipelines/hunyuan_video15 title: HunyuanVideo1.5 - - local: api/pipelines/i2vgenxl - title: I2VGen-XL - local: api/pipelines/kandinsky5_video title: Kandinsky 5.0 Video - local: api/pipelines/latte @@ -690,16 +649,10 @@ title: LTXVideo - local: api/pipelines/mochi title: Mochi - - local: api/pipelines/pia - title: Personalized Image Animator (PIA) - local: api/pipelines/skyreels_v2 title: SkyReels-V2 - local: api/pipelines/stable_diffusion/svd title: Stable Video Diffusion - - local: api/pipelines/text_to_video - title: Text-to-video - - local: api/pipelines/text_to_video_zero - title: Text2Video-Zero - local: api/pipelines/wan title: Wan title: Video diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 638598051d64..1011e7f5d05a 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -24,7 +24,6 @@ "controlnet": [], "controlnet_hunyuandit": [], "controlnet_sd3": [], - "controlnet_xs": [], "deprecated": [], "latent_diffusion": [], "ledits_pp": [], @@ -48,7 +47,6 @@ "AutoPipelineForText2Image", ] _import_structure["consistency_models"] = ["ConsistencyModelPipeline"] - _import_structure["dance_diffusion"] = ["DanceDiffusionPipeline"] _import_structure["ddim"] = ["DDIMPipeline"] _import_structure["ddpm"] = ["DDPMPipeline"] _import_structure["dit"] = ["DiTPipeline"] @@ -61,6 +59,7 @@ ] _import_structure["deprecated"].extend( [ + "DanceDiffusionPipeline", "PNDMPipeline", "LDMPipeline", "RePaintPipeline", @@ -103,6 +102,35 @@ else: _import_structure["deprecated"].extend( [ + "AmusedImg2ImgPipeline", + "AmusedInpaintPipeline", + "AmusedPipeline", + "AudioLDMPipeline", + "BlipDiffusionPipeline", + "I2VGenXLPipeline", + "ImageTextPipelineOutput", + "MusicLDMPipeline", + "PIAPipeline", + "PaintByExamplePipeline", + "SemanticStableDiffusionPipeline", + "StableDiffusionAttendAndExcitePipeline", + "StableDiffusionControlNetXSPipeline", + "StableDiffusionDiffEditPipeline", + "StableDiffusionGLIGENPipeline", + "StableDiffusionGLIGENTextImagePipeline", + "StableDiffusionLDM3DPipeline", + "StableDiffusionPanoramaPipeline", + "StableDiffusionPipelineSafe", + "StableDiffusionSAGPipeline", + "StableDiffusionXLControlNetXSPipeline", + "TextToVideoSDPipeline", + "TextToVideoZeroPipeline", + "TextToVideoZeroSDXLPipeline", + "UnCLIPImageVariationPipeline", + "UnCLIPPipeline", + "UniDiffuserModel", + "UniDiffuserPipeline", + "UniDiffuserTextDecoder", "VQDiffusionPipeline", "AltDiffusionPipeline", "AltDiffusionImg2ImgPipeline", @@ -115,10 +143,13 @@ "VersatileDiffusionImageVariationPipeline", "VersatileDiffusionPipeline", "VersatileDiffusionTextToImagePipeline", + "VideoToVideoSDPipeline", + "WuerstchenCombinedPipeline", + "WuerstchenDecoderPipeline", + "WuerstchenPriorPipeline", ] ) _import_structure["allegro"] = ["AllegroPipeline"] - _import_structure["amused"] = ["AmusedImg2ImgPipeline", "AmusedInpaintPipeline", "AmusedPipeline"] _import_structure["animatediff"] = [ "AnimateDiffPipeline", "AnimateDiffControlNetPipeline", @@ -147,13 +178,11 @@ "FluxKontextInpaintPipeline", ] _import_structure["prx"] = ["PRXPipeline"] - _import_structure["audioldm"] = ["AudioLDMPipeline"] _import_structure["audioldm2"] = [ "AudioLDM2Pipeline", "AudioLDM2ProjectionModel", "AudioLDM2UNet2DConditionModel", ] - _import_structure["blip_diffusion"] = ["BlipDiffusionPipeline"] _import_structure["chroma"] = ["ChromaPipeline", "ChromaImg2ImgPipeline", "ChromaInpaintPipeline"] _import_structure["cogvideo"] = [ "CogVideoXPipeline", @@ -207,12 +236,6 @@ "SanaPAGPipeline", ] ) - _import_structure["controlnet_xs"].extend( - [ - "StableDiffusionControlNetXSPipeline", - "StableDiffusionXLControlNetXSPipeline", - ] - ) _import_structure["controlnet_hunyuandit"].extend( [ "HunyuanDiTControlNetPipeline", @@ -304,12 +327,9 @@ ] ) _import_structure["mochi"] = ["MochiPipeline"] - _import_structure["musicldm"] = ["MusicLDMPipeline"] _import_structure["omnigen"] = ["OmniGenPipeline"] _import_structure["ovis_image"] = ["OvisImagePipeline"] _import_structure["visualcloze"] = ["VisualClozePipeline", "VisualClozeGenerationPipeline"] - _import_structure["paint_by_example"] = ["PaintByExamplePipeline"] - _import_structure["pia"] = ["PIAPipeline"] _import_structure["pixart_alpha"] = ["PixArtAlphaPipeline", "PixArtSigmaPipeline"] _import_structure["sana"] = [ "SanaPipeline", @@ -321,7 +341,6 @@ "SanaVideoPipeline", "SanaImageToVideoPipeline", ] - _import_structure["semantic_stable_diffusion"] = ["SemanticStableDiffusionPipeline"] _import_structure["shap_e"] = ["ShapEImg2ImgPipeline", "ShapEPipeline"] _import_structure["stable_audio"] = [ "StableAudioProjectionModel", @@ -345,7 +364,6 @@ "StableDiffusionUpscalePipeline", "StableUnCLIPImg2ImgPipeline", "StableUnCLIPPipeline", - "StableDiffusionLDM3DPipeline", ] ) _import_structure["aura_flow"] = ["AuraFlowPipeline"] @@ -354,13 +372,6 @@ "StableDiffusion3Img2ImgPipeline", "StableDiffusion3InpaintPipeline", ] - _import_structure["stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] - _import_structure["stable_diffusion_safe"] = ["StableDiffusionPipelineSafe"] - _import_structure["stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"] - _import_structure["stable_diffusion_gligen"] = [ - "StableDiffusionGLIGENPipeline", - "StableDiffusionGLIGENTextImagePipeline", - ] _import_structure["stable_video_diffusion"] = ["StableVideoDiffusionPipeline"] _import_structure["stable_diffusion_xl"].extend( [ @@ -370,32 +381,10 @@ "StableDiffusionXLPipeline", ] ) - _import_structure["stable_diffusion_diffedit"] = ["StableDiffusionDiffEditPipeline"] - _import_structure["stable_diffusion_ldm3d"] = ["StableDiffusionLDM3DPipeline"] - _import_structure["stable_diffusion_panorama"] = ["StableDiffusionPanoramaPipeline"] _import_structure["t2i_adapter"] = [ "StableDiffusionAdapterPipeline", "StableDiffusionXLAdapterPipeline", ] - _import_structure["text_to_video_synthesis"] = [ - "TextToVideoSDPipeline", - "TextToVideoZeroPipeline", - "TextToVideoZeroSDXLPipeline", - "VideoToVideoSDPipeline", - ] - _import_structure["i2vgen_xl"] = ["I2VGenXLPipeline"] - _import_structure["unclip"] = ["UnCLIPImageVariationPipeline", "UnCLIPPipeline"] - _import_structure["unidiffuser"] = [ - "ImageTextPipelineOutput", - "UniDiffuserModel", - "UniDiffuserPipeline", - "UniDiffuserTextDecoder", - ] - _import_structure["wuerstchen"] = [ - "WuerstchenCombinedPipeline", - "WuerstchenDecoderPipeline", - "WuerstchenPriorPipeline", - ] _import_structure["wan"] = [ "WanPipeline", "WanImageToVideoPipeline", @@ -537,10 +526,16 @@ AutoPipelineForText2Image, ) from .consistency_models import ConsistencyModelPipeline - from .dance_diffusion import DanceDiffusionPipeline from .ddim import DDIMPipeline from .ddpm import DDPMPipeline - from .deprecated import KarrasVePipeline, LDMPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline + from .deprecated import ( + DanceDiffusionPipeline, + KarrasVePipeline, + LDMPipeline, + PNDMPipeline, + RePaintPipeline, + ScoreSdeVePipeline, + ) from .dit import DiTPipeline from .latent_diffusion import LDMSuperResolutionPipeline from .pipeline_utils import ( @@ -565,7 +560,6 @@ from ..utils.dummy_torch_and_transformers_objects import * else: from .allegro import AllegroPipeline - from .amused import AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline from .animatediff import ( AnimateDiffControlNetPipeline, AnimateDiffPipeline, @@ -574,14 +568,12 @@ AnimateDiffVideoToVideoControlNetPipeline, AnimateDiffVideoToVideoPipeline, ) - from .audioldm import AudioLDMPipeline from .audioldm2 import ( AudioLDM2Pipeline, AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel, ) from .aura_flow import AuraFlowPipeline - from .blip_diffusion import BlipDiffusionPipeline from .bria import BriaPipeline from .bria_fibo import BriaFiboEditPipeline, BriaFiboPipeline from .chroma import ChromaImg2ImgPipeline, ChromaInpaintPipeline, ChromaPipeline @@ -610,10 +602,6 @@ HunyuanDiTControlNetPipeline, ) from .controlnet_sd3 import StableDiffusion3ControlNetInpaintingPipeline, StableDiffusion3ControlNetPipeline - from .controlnet_xs import ( - StableDiffusionControlNetXSPipeline, - StableDiffusionXLControlNetXSPipeline, - ) from .cosmos import ( Cosmos2_5_PredictBasePipeline, Cosmos2_5_TransferPipeline, @@ -633,16 +621,49 @@ from .deprecated import ( AltDiffusionImg2ImgPipeline, AltDiffusionPipeline, + AmusedImg2ImgPipeline, + AmusedInpaintPipeline, + AmusedPipeline, + AudioLDMPipeline, + BlipDiffusionPipeline, CycleDiffusionPipeline, + I2VGenXLPipeline, + ImageTextPipelineOutput, + MusicLDMPipeline, + PIAPipeline, + PaintByExamplePipeline, + SemanticStableDiffusionPipeline, + StableDiffusionAttendAndExcitePipeline, + StableDiffusionControlNetXSPipeline, + StableDiffusionDiffEditPipeline, + StableDiffusionGLIGENPipeline, + StableDiffusionGLIGENTextImagePipeline, StableDiffusionInpaintPipelineLegacy, + StableDiffusionLDM3DPipeline, StableDiffusionModelEditingPipeline, + StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, + StableDiffusionPipelineSafe, StableDiffusionPix2PixZeroPipeline, + StableDiffusionSAGPipeline, + StableDiffusionXLControlNetXSPipeline, + TextToVideoSDPipeline, + TextToVideoZeroPipeline, + TextToVideoZeroSDXLPipeline, + UnCLIPImageVariationPipeline, + UnCLIPPipeline, + UniDiffuserModel, + UniDiffuserPipeline, + UniDiffuserTextDecoder, + VQDiffusionPipeline, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, - VQDiffusionPipeline, + VideoToVideoSDPipeline, + WuerstchenCombinedPipeline, + WuerstchenDecoderPipeline, + WuerstchenPriorPipeline, ) from .easyanimate import ( EasyAnimateControlPipeline, @@ -677,7 +698,6 @@ ) from .hunyuan_video1_5 import HunyuanVideo15ImageToVideoPipeline, HunyuanVideo15Pipeline from .hunyuandit import HunyuanDiTPipeline - from .i2vgen_xl import I2VGenXLPipeline from .kandinsky import ( KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, @@ -739,7 +759,6 @@ MarigoldNormalsPipeline, ) from .mochi import MochiPipeline - from .musicldm import MusicLDMPipeline from .omnigen import OmniGenPipeline from .ovis_image import OvisImagePipeline from .pag import ( @@ -761,8 +780,6 @@ StableDiffusionXLPAGInpaintPipeline, StableDiffusionXLPAGPipeline, ) - from .paint_by_example import PaintByExamplePipeline - from .pia import PIAPipeline from .pixart_alpha import PixArtAlphaPipeline, PixArtSigmaPipeline from .prx import PRXPipeline from .qwenimage import ( @@ -783,7 +800,6 @@ SanaSprintPipeline, ) from .sana_video import SanaImageToVideoPipeline, SanaVideoPipeline - from .semantic_stable_diffusion import SemanticStableDiffusionPipeline from .shap_e import ShapEImg2ImgPipeline, ShapEPipeline from .stable_audio import StableAudioPipeline, StableAudioProjectionModel from .stable_cascade import ( @@ -809,13 +825,6 @@ StableDiffusion3InpaintPipeline, StableDiffusion3Pipeline, ) - from .stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline - from .stable_diffusion_diffedit import StableDiffusionDiffEditPipeline - from .stable_diffusion_gligen import StableDiffusionGLIGENPipeline, StableDiffusionGLIGENTextImagePipeline - from .stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline - from .stable_diffusion_panorama import StableDiffusionPanoramaPipeline - from .stable_diffusion_safe import StableDiffusionPipelineSafe - from .stable_diffusion_sag import StableDiffusionSAGPipeline from .stable_diffusion_xl import ( StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, @@ -827,19 +836,6 @@ StableDiffusionAdapterPipeline, StableDiffusionXLAdapterPipeline, ) - from .text_to_video_synthesis import ( - TextToVideoSDPipeline, - TextToVideoZeroPipeline, - TextToVideoZeroSDXLPipeline, - VideoToVideoSDPipeline, - ) - from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline - from .unidiffuser import ( - ImageTextPipelineOutput, - UniDiffuserModel, - UniDiffuserPipeline, - UniDiffuserTextDecoder, - ) from .visualcloze import VisualClozeGenerationPipeline, VisualClozePipeline from .wan import ( WanAnimatePipeline, @@ -848,11 +844,6 @@ WanVACEPipeline, WanVideoToVideoPipeline, ) - from .wuerstchen import ( - WuerstchenCombinedPipeline, - WuerstchenDecoderPipeline, - WuerstchenPriorPipeline, - ) from .z_image import ( ZImageControlNetInpaintPipeline, ZImageControlNetPipeline, diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py index 4ca92b906842..482a6b52e19b 100644 --- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py +++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py @@ -20,9 +20,9 @@ from ...schedulers import PNDMScheduler from ...utils import is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor -from ..blip_diffusion.blip_image_processing import BlipImageProcessor -from ..blip_diffusion.modeling_blip2 import Blip2QFormerModel -from ..blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel +from ..deprecated.blip_diffusion.blip_image_processing import BlipImageProcessor +from ..deprecated.blip_diffusion.modeling_blip2 import Blip2QFormerModel +from ..deprecated.blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput diff --git a/src/diffusers/pipelines/deprecated/__init__.py b/src/diffusers/pipelines/deprecated/__init__.py index 9936323170ad..3eec8e849592 100644 --- a/src/diffusers/pipelines/deprecated/__init__.py +++ b/src/diffusers/pipelines/deprecated/__init__.py @@ -23,6 +23,7 @@ _dummy_objects.update(get_objects_from_module(dummy_pt_objects)) else: + _import_structure["dance_diffusion"] = ["DanceDiffusionPipeline"] _import_structure["latent_diffusion_uncond"] = ["LDMPipeline"] _import_structure["pndm"] = ["PNDMPipeline"] _import_structure["repaint"] = ["RePaintPipeline"] @@ -49,6 +50,28 @@ "VersatileDiffusionTextToImagePipeline", ] _import_structure["vq_diffusion"] = ["VQDiffusionPipeline"] + _import_structure["amused"] = ["AmusedImg2ImgPipeline", "AmusedInpaintPipeline", "AmusedPipeline"] + _import_structure["audioldm"] = ["AudioLDMPipeline"] + _import_structure["blip_diffusion"] = ["BlipDiffusionPipeline"] + _import_structure["controlnet_xs"] = [ + "StableDiffusionControlNetXSPipeline", + "StableDiffusionXLControlNetXSPipeline", + ] + _import_structure["i2vgen_xl"] = ["I2VGenXLPipeline"] + _import_structure["musicldm"] = ["MusicLDMPipeline"] + _import_structure["paint_by_example"] = ["PaintByExamplePipeline"] + _import_structure["pia"] = ["PIAPipeline"] + _import_structure["semantic_stable_diffusion"] = ["SemanticStableDiffusionPipeline"] + _import_structure["stable_diffusion_attend_and_excite"] = ["StableDiffusionAttendAndExcitePipeline"] + _import_structure["stable_diffusion_diffedit"] = ["StableDiffusionDiffEditPipeline"] + _import_structure["stable_diffusion_gligen"] = [ + "StableDiffusionGLIGENPipeline", + "StableDiffusionGLIGENTextImagePipeline", + ] + _import_structure["stable_diffusion_ldm3d"] = ["StableDiffusionLDM3DPipeline"] + _import_structure["stable_diffusion_panorama"] = ["StableDiffusionPanoramaPipeline"] + _import_structure["stable_diffusion_safe"] = ["StableDiffusionPipelineSafe"] + _import_structure["stable_diffusion_sag"] = ["StableDiffusionSAGPipeline"] _import_structure["stable_diffusion_variants"] = [ "CycleDiffusionPipeline", "StableDiffusionInpaintPipelineLegacy", @@ -56,6 +79,24 @@ "StableDiffusionParadigmsPipeline", "StableDiffusionModelEditingPipeline", ] + _import_structure["text_to_video_synthesis"] = [ + "TextToVideoSDPipeline", + "TextToVideoZeroPipeline", + "TextToVideoZeroSDXLPipeline", + "VideoToVideoSDPipeline", + ] + _import_structure["unclip"] = ["UnCLIPImageVariationPipeline", "UnCLIPPipeline"] + _import_structure["unidiffuser"] = [ + "ImageTextPipelineOutput", + "UniDiffuserModel", + "UniDiffuserPipeline", + "UniDiffuserTextDecoder", + ] + _import_structure["wuerstchen"] = [ + "WuerstchenCombinedPipeline", + "WuerstchenDecoderPipeline", + "WuerstchenPriorPipeline", + ] try: if not (is_torch_available() and is_librosa_available()): @@ -88,6 +129,7 @@ from ...utils.dummy_pt_objects import * else: + from .dance_diffusion import DanceDiffusionPipeline from .latent_diffusion_uncond import LDMPipeline from .pndm import PNDMPipeline from .repaint import RePaintPipeline @@ -102,8 +144,24 @@ else: from .alt_diffusion import AltDiffusionImg2ImgPipeline, AltDiffusionPipeline, AltDiffusionPipelineOutput + from .amused import AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline from .audio_diffusion import AudioDiffusionPipeline, Mel + from .audioldm import AudioLDMPipeline + from .blip_diffusion import BlipDiffusionPipeline + from .controlnet_xs import StableDiffusionControlNetXSPipeline, StableDiffusionXLControlNetXSPipeline + from .i2vgen_xl import I2VGenXLPipeline + from .musicldm import MusicLDMPipeline + from .paint_by_example import PaintByExamplePipeline + from .pia import PIAPipeline + from .semantic_stable_diffusion import SemanticStableDiffusionPipeline from .spectrogram_diffusion import SpectrogramDiffusionPipeline + from .stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline + from .stable_diffusion_diffedit import StableDiffusionDiffEditPipeline + from .stable_diffusion_gligen import StableDiffusionGLIGENPipeline, StableDiffusionGLIGENTextImagePipeline + from .stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline + from .stable_diffusion_panorama import StableDiffusionPanoramaPipeline + from .stable_diffusion_safe import StableDiffusionPipelineSafe + from .stable_diffusion_sag import StableDiffusionSAGPipeline from .stable_diffusion_variants import ( CycleDiffusionPipeline, StableDiffusionInpaintPipelineLegacy, @@ -112,6 +170,14 @@ StableDiffusionPix2PixZeroPipeline, ) from .stochastic_karras_ve import KarrasVePipeline + from .text_to_video_synthesis import ( + TextToVideoSDPipeline, + TextToVideoZeroPipeline, + TextToVideoZeroSDXLPipeline, + VideoToVideoSDPipeline, + ) + from .unclip import UnCLIPImageVariationPipeline, UnCLIPPipeline + from .unidiffuser import ImageTextPipelineOutput, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder from .versatile_diffusion import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, @@ -119,6 +185,7 @@ VersatileDiffusionTextToImagePipeline, ) from .vq_diffusion import VQDiffusionPipeline + from .wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline, WuerstchenPriorPipeline try: if not (is_torch_available() and is_librosa_available()): diff --git a/src/diffusers/pipelines/amused/__init__.py b/src/diffusers/pipelines/deprecated/amused/__init__.py similarity index 91% rename from src/diffusers/pipelines/amused/__init__.py rename to src/diffusers/pipelines/deprecated/amused/__init__.py index 3c4d07a426b5..2812eadf6f99 100644 --- a/src/diffusers/pipelines/amused/__init__.py +++ b/src/diffusers/pipelines/deprecated/amused/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -16,7 +16,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline, @@ -40,7 +40,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( AmusedPipeline, ) else: diff --git a/src/diffusers/pipelines/amused/pipeline_amused.py b/src/diffusers/pipelines/deprecated/amused/pipeline_amused.py similarity index 98% rename from src/diffusers/pipelines/amused/pipeline_amused.py rename to src/diffusers/pipelines/deprecated/amused/pipeline_amused.py index b23adf0d2152..e1400d04116f 100644 --- a/src/diffusers/pipelines/amused/pipeline_amused.py +++ b/src/diffusers/pipelines/deprecated/amused/pipeline_amused.py @@ -17,11 +17,11 @@ import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...models import UVit2DModel, VQModel -from ...schedulers import AmusedScheduler -from ...utils import is_torch_xla_available, replace_example_docstring -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput +from ....image_processor import VaeImageProcessor +from ....models import UVit2DModel, VQModel +from ....schedulers import AmusedScheduler +from ....utils import is_torch_xla_available, replace_example_docstring +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/amused/pipeline_amused_img2img.py b/src/diffusers/pipelines/deprecated/amused/pipeline_amused_img2img.py similarity index 98% rename from src/diffusers/pipelines/amused/pipeline_amused_img2img.py rename to src/diffusers/pipelines/deprecated/amused/pipeline_amused_img2img.py index 79ebd96dedeb..1c64e7978b75 100644 --- a/src/diffusers/pipelines/amused/pipeline_amused_img2img.py +++ b/src/diffusers/pipelines/deprecated/amused/pipeline_amused_img2img.py @@ -17,11 +17,11 @@ import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...models import UVit2DModel, VQModel -from ...schedulers import AmusedScheduler -from ...utils import is_torch_xla_available, replace_example_docstring -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....models import UVit2DModel, VQModel +from ....schedulers import AmusedScheduler +from ....utils import is_torch_xla_available, replace_example_docstring +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/amused/pipeline_amused_inpaint.py b/src/diffusers/pipelines/deprecated/amused/pipeline_amused_inpaint.py similarity index 98% rename from src/diffusers/pipelines/amused/pipeline_amused_inpaint.py rename to src/diffusers/pipelines/deprecated/amused/pipeline_amused_inpaint.py index 55302401832c..3af0c9448914 100644 --- a/src/diffusers/pipelines/amused/pipeline_amused_inpaint.py +++ b/src/diffusers/pipelines/deprecated/amused/pipeline_amused_inpaint.py @@ -18,11 +18,11 @@ import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...models import UVit2DModel, VQModel -from ...schedulers import AmusedScheduler -from ...utils import is_torch_xla_available, replace_example_docstring -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....models import UVit2DModel, VQModel +from ....schedulers import AmusedScheduler +from ....utils import is_torch_xla_available, replace_example_docstring +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/audioldm/__init__.py b/src/diffusers/pipelines/deprecated/audioldm/__init__.py similarity index 88% rename from src/diffusers/pipelines/audioldm/__init__.py rename to src/diffusers/pipelines/deprecated/audioldm/__init__.py index a002b4aa72e0..75b11bf2789f 100644 --- a/src/diffusers/pipelines/audioldm/__init__.py +++ b/src/diffusers/pipelines/deprecated/audioldm/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( AudioLDMPipeline, ) @@ -31,7 +31,7 @@ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( AudioLDMPipeline, ) diff --git a/src/diffusers/pipelines/audioldm/pipeline_audioldm.py b/src/diffusers/pipelines/deprecated/audioldm/pipeline_audioldm.py similarity index 98% rename from src/diffusers/pipelines/audioldm/pipeline_audioldm.py rename to src/diffusers/pipelines/deprecated/audioldm/pipeline_audioldm.py index 357c3582b21c..16a66f2b9a2a 100644 --- a/src/diffusers/pipelines/audioldm/pipeline_audioldm.py +++ b/src/diffusers/pipelines/deprecated/audioldm/pipeline_audioldm.py @@ -20,11 +20,11 @@ import torch.nn.functional as F from transformers import ClapTextModelWithProjection, RobertaTokenizer, RobertaTokenizerFast, SpeechT5HifiGan -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import is_torch_xla_available, logging, replace_example_docstring -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import AudioPipelineOutput, DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import is_torch_xla_available, logging, replace_example_docstring +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import AudioPipelineOutput, DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/blip_diffusion/__init__.py b/src/diffusers/pipelines/deprecated/blip_diffusion/__init__.py similarity index 73% rename from src/diffusers/pipelines/blip_diffusion/__init__.py rename to src/diffusers/pipelines/deprecated/blip_diffusion/__init__.py index c245313e2f8a..48ed40a4eee7 100644 --- a/src/diffusers/pipelines/blip_diffusion/__init__.py +++ b/src/diffusers/pipelines/deprecated/blip_diffusion/__init__.py @@ -4,14 +4,14 @@ import PIL from PIL import Image -from ...utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available +from ....utils import OptionalDependencyNotAvailable, is_torch_available, is_transformers_available try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline + from ....utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .blip_image_processing import BlipImageProcessor from .modeling_blip2 import Blip2QFormerModel diff --git a/src/diffusers/pipelines/blip_diffusion/blip_image_processing.py b/src/diffusers/pipelines/deprecated/blip_diffusion/blip_image_processing.py similarity index 100% rename from src/diffusers/pipelines/blip_diffusion/blip_image_processing.py rename to src/diffusers/pipelines/deprecated/blip_diffusion/blip_image_processing.py diff --git a/src/diffusers/pipelines/blip_diffusion/modeling_blip2.py b/src/diffusers/pipelines/deprecated/blip_diffusion/modeling_blip2.py similarity index 100% rename from src/diffusers/pipelines/blip_diffusion/modeling_blip2.py rename to src/diffusers/pipelines/deprecated/blip_diffusion/modeling_blip2.py diff --git a/src/diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py b/src/diffusers/pipelines/deprecated/blip_diffusion/modeling_ctx_clip.py similarity index 100% rename from src/diffusers/pipelines/blip_diffusion/modeling_ctx_clip.py rename to src/diffusers/pipelines/deprecated/blip_diffusion/modeling_ctx_clip.py diff --git a/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py b/src/diffusers/pipelines/deprecated/blip_diffusion/pipeline_blip_diffusion.py similarity index 97% rename from src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py rename to src/diffusers/pipelines/deprecated/blip_diffusion/pipeline_blip_diffusion.py index aa3dbdae966b..085300f74eef 100644 --- a/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py +++ b/src/diffusers/pipelines/deprecated/blip_diffusion/pipeline_blip_diffusion.py @@ -15,11 +15,11 @@ import torch from transformers import CLIPTokenizer -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import PNDMScheduler -from ...utils import is_torch_xla_available, logging, replace_example_docstring -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import PNDMScheduler +from ....utils import is_torch_xla_available, logging, replace_example_docstring +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput from .blip_image_processing import BlipImageProcessor from .modeling_blip2 import Blip2QFormerModel from .modeling_ctx_clip import ContextCLIPTextModel diff --git a/src/diffusers/pipelines/controlnet_xs/__init__.py b/src/diffusers/pipelines/deprecated/controlnet_xs/__init__.py similarity index 84% rename from src/diffusers/pipelines/controlnet_xs/__init__.py rename to src/diffusers/pipelines/deprecated/controlnet_xs/__init__.py index 978278b184f9..34950fb704f8 100644 --- a/src/diffusers/pipelines/controlnet_xs/__init__.py +++ b/src/diffusers/pipelines/deprecated/controlnet_xs/__init__.py @@ -1,68 +1,68 @@ -from typing import TYPE_CHECKING - -from ...utils import ( - DIFFUSERS_SLOW_IMPORT, - OptionalDependencyNotAvailable, - _LazyModule, - get_objects_from_module, - is_flax_available, - is_torch_available, - is_transformers_available, -) - - -_dummy_objects = {} -_import_structure = {} - -try: - if not (is_transformers_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 - - _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) -else: - _import_structure["pipeline_controlnet_xs"] = ["StableDiffusionControlNetXSPipeline"] - _import_structure["pipeline_controlnet_xs_sd_xl"] = ["StableDiffusionXLControlNetXSPipeline"] -try: - if not (is_transformers_available() and is_flax_available()): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - from ...utils import dummy_flax_and_transformers_objects # noqa F403 - - _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) -else: - pass # _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"] - - -if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: - try: - if not (is_transformers_available() and is_torch_available()): - raise OptionalDependencyNotAvailable() - - except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * - else: - from .pipeline_controlnet_xs import StableDiffusionControlNetXSPipeline - from .pipeline_controlnet_xs_sd_xl import StableDiffusionXLControlNetXSPipeline - - try: - if not (is_transformers_available() and is_flax_available()): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 - else: - pass # from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline - - -else: - import sys - - sys.modules[__name__] = _LazyModule( - __name__, - globals()["__file__"], - _import_structure, - module_spec=__spec__, - ) - for name, value in _dummy_objects.items(): - setattr(sys.modules[__name__], name, value) +from typing import TYPE_CHECKING + +from ....utils import ( + DIFFUSERS_SLOW_IMPORT, + OptionalDependencyNotAvailable, + _LazyModule, + get_objects_from_module, + is_flax_available, + is_torch_available, + is_transformers_available, +) + + +_dummy_objects = {} +_import_structure = {} + +try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils import dummy_torch_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) +else: + _import_structure["pipeline_controlnet_xs"] = ["StableDiffusionControlNetXSPipeline"] + _import_structure["pipeline_controlnet_xs_sd_xl"] = ["StableDiffusionXLControlNetXSPipeline"] +try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + from ....utils import dummy_flax_and_transformers_objects # noqa F403 + + _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) +else: + pass # _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"] + + +if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: + try: + if not (is_transformers_available() and is_torch_available()): + raise OptionalDependencyNotAvailable() + + except OptionalDependencyNotAvailable: + from ....utils.dummy_torch_and_transformers_objects import * + else: + from .pipeline_controlnet_xs import StableDiffusionControlNetXSPipeline + from .pipeline_controlnet_xs_sd_xl import StableDiffusionXLControlNetXSPipeline + + try: + if not (is_transformers_available() and is_flax_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from ....utils.dummy_flax_and_transformers_objects import * # noqa F403 + else: + pass # from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline + + +else: + import sys + + sys.modules[__name__] = _LazyModule( + __name__, + globals()["__file__"], + _import_structure, + module_spec=__spec__, + ) + for name, value in _dummy_objects.items(): + setattr(sys.modules[__name__], name, value) diff --git a/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py b/src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs.py similarity index 98% rename from src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py rename to src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs.py index 9c81eb57e6c5..d3fe2488a922 100644 --- a/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs.py +++ b/src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs.py @@ -21,13 +21,13 @@ import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...callbacks import MultiPipelineCallbacks, PipelineCallback -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....callbacks import MultiPipelineCallbacks, PipelineCallback +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -36,10 +36,10 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import empty_device_cache, is_compiled_module, is_torch_version, randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin -from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import empty_device_cache, is_compiled_module, is_torch_version, randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.pipeline_output import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py b/src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs_sd_xl.py similarity index 98% rename from src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py rename to src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs_sd_xl.py index e7a5862b5b7a..ff1fb23a7d0b 100644 --- a/src/diffusers/pipelines/controlnet_xs/pipeline_controlnet_xs_sd_xl.py +++ b/src/diffusers/pipelines/deprecated/controlnet_xs/pipeline_controlnet_xs_sd_xl.py @@ -28,13 +28,13 @@ from diffusers.utils.import_utils import is_invisible_watermark_available -from ...callbacks import MultiPipelineCallbacks, PipelineCallback -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....callbacks import MultiPipelineCallbacks, PipelineCallback +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, logging, @@ -42,16 +42,16 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline -from ..stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput +from ....utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline +from ...stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput if is_invisible_watermark_available(): - from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + from ...stable_diffusion_xl.watermark import StableDiffusionXLWatermarker -from ...utils import is_torch_xla_available +from ....utils import is_torch_xla_available if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/dance_diffusion/__init__.py b/src/diffusers/pipelines/deprecated/dance_diffusion/__init__.py similarity index 87% rename from src/diffusers/pipelines/dance_diffusion/__init__.py rename to src/diffusers/pipelines/deprecated/dance_diffusion/__init__.py index 0d3e466dfa65..8dcd7467875f 100644 --- a/src/diffusers/pipelines/dance_diffusion/__init__.py +++ b/src/diffusers/pipelines/deprecated/dance_diffusion/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule +from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_dance_diffusion": ["DanceDiffusionPipeline"]} diff --git a/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py b/src/diffusers/pipelines/deprecated/dance_diffusion/pipeline_dance_diffusion.py similarity index 95% rename from src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py rename to src/diffusers/pipelines/deprecated/dance_diffusion/pipeline_dance_diffusion.py index eb8f8106061d..13936f035aaa 100644 --- a/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py +++ b/src/diffusers/pipelines/deprecated/dance_diffusion/pipeline_dance_diffusion.py @@ -15,11 +15,11 @@ import torch -from ...models import UNet1DModel -from ...schedulers import SchedulerMixin -from ...utils import is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import AudioPipelineOutput, DeprecatedPipelineMixin, DiffusionPipeline +from ....models import UNet1DModel +from ....schedulers import SchedulerMixin +from ....utils import is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import AudioPipelineOutput, DeprecatedPipelineMixin, DiffusionPipeline if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/i2vgen_xl/__init__.py b/src/diffusers/pipelines/deprecated/i2vgen_xl/__init__.py similarity index 86% rename from src/diffusers/pipelines/i2vgen_xl/__init__.py rename to src/diffusers/pipelines/deprecated/i2vgen_xl/__init__.py index b24a7e4cee7f..43646542d9ea 100644 --- a/src/diffusers/pipelines/i2vgen_xl/__init__.py +++ b/src/diffusers/pipelines/deprecated/i2vgen_xl/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -29,7 +29,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + from ....utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_i2vgen_xl import I2VGenXLPipeline diff --git a/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py b/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py similarity index 98% rename from src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py rename to src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py index 731ac27a0ff5..c81f5e4fdac9 100644 --- a/src/diffusers/pipelines/i2vgen_xl/pipeline_i2vgen_xl.py +++ b/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py @@ -21,19 +21,19 @@ import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...models import AutoencoderKL -from ...models.unets.unet_i2vgen_xl import I2VGenXLUNet -from ...schedulers import DDIMScheduler -from ...utils import ( +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....models import AutoencoderKL +from ....models.unets.unet_i2vgen_xl import I2VGenXLUNet +from ....schedulers import DDIMScheduler +from ....utils import ( BaseOutput, is_torch_xla_available, logging, replace_example_docstring, ) -from ...utils.torch_utils import randn_tensor -from ...video_processor import VideoProcessor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ....utils.torch_utils import randn_tensor +from ....video_processor import VideoProcessor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/musicldm/__init__.py b/src/diffusers/pipelines/deprecated/musicldm/__init__.py similarity index 88% rename from src/diffusers/pipelines/musicldm/__init__.py rename to src/diffusers/pipelines/deprecated/musicldm/__init__.py index ed71eeb1d99b..bc9f8d550401 100644 --- a/src/diffusers/pipelines/musicldm/__init__.py +++ b/src/diffusers/pipelines/deprecated/musicldm/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -31,7 +31,7 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_musicldm import MusicLDMPipeline diff --git a/src/diffusers/pipelines/musicldm/pipeline_musicldm.py b/src/diffusers/pipelines/deprecated/musicldm/pipeline_musicldm.py similarity index 98% rename from src/diffusers/pipelines/musicldm/pipeline_musicldm.py rename to src/diffusers/pipelines/deprecated/musicldm/pipeline_musicldm.py index e7747a4f8c3d..865b359f0690 100644 --- a/src/diffusers/pipelines/musicldm/pipeline_musicldm.py +++ b/src/diffusers/pipelines/deprecated/musicldm/pipeline_musicldm.py @@ -26,24 +26,24 @@ SpeechT5HifiGan, ) -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( is_accelerate_available, is_accelerate_version, is_librosa_available, logging, replace_example_docstring, ) -from ...utils.torch_utils import empty_device_cache, get_device, randn_tensor -from ..pipeline_utils import AudioPipelineOutput, DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ....utils.torch_utils import empty_device_cache, get_device, randn_tensor +from ...pipeline_utils import AudioPipelineOutput, DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin if is_librosa_available(): import librosa -from ...utils import is_torch_xla_available +from ....utils import is_torch_xla_available if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/paint_by_example/__init__.py b/src/diffusers/pipelines/deprecated/paint_by_example/__init__.py similarity index 89% rename from src/diffusers/pipelines/paint_by_example/__init__.py rename to src/diffusers/pipelines/deprecated/paint_by_example/__init__.py index d2906b540c6e..1441d87fe382 100644 --- a/src/diffusers/pipelines/paint_by_example/__init__.py +++ b/src/diffusers/pipelines/deprecated/paint_by_example/__init__.py @@ -5,7 +5,7 @@ import PIL from PIL import Image -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -22,7 +22,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -36,7 +36,7 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .image_encoder import PaintByExampleImageEncoder from .pipeline_paint_by_example import PaintByExamplePipeline diff --git a/src/diffusers/pipelines/paint_by_example/image_encoder.py b/src/diffusers/pipelines/deprecated/paint_by_example/image_encoder.py similarity index 96% rename from src/diffusers/pipelines/paint_by_example/image_encoder.py rename to src/diffusers/pipelines/deprecated/paint_by_example/image_encoder.py index 74c575ed8653..02c4515c97e1 100644 --- a/src/diffusers/pipelines/paint_by_example/image_encoder.py +++ b/src/diffusers/pipelines/deprecated/paint_by_example/image_encoder.py @@ -15,8 +15,8 @@ from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel -from ...models.attention import BasicTransformerBlock -from ...utils import logging +from ....models.attention import BasicTransformerBlock +from ....utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py b/src/diffusers/pipelines/deprecated/paint_by_example/pipeline_paint_by_example.py similarity index 98% rename from src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py rename to src/diffusers/pipelines/deprecated/paint_by_example/pipeline_paint_by_example.py index aa7dbaa720e5..32529266787f 100644 --- a/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +++ b/src/diffusers/pipelines/deprecated/paint_by_example/pipeline_paint_by_example.py @@ -20,14 +20,14 @@ import torch from transformers import CLIPImageProcessor -from ...image_processor import VaeImageProcessor -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler -from ...utils import deprecate, is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin -from ..stable_diffusion import StableDiffusionPipelineOutput -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, UNet2DConditionModel +from ....schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from ....utils import deprecate, is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker from .image_encoder import PaintByExampleImageEncoder diff --git a/src/diffusers/pipelines/pia/__init__.py b/src/diffusers/pipelines/deprecated/pia/__init__.py similarity index 88% rename from src/diffusers/pipelines/pia/__init__.py rename to src/diffusers/pipelines/deprecated/pia/__init__.py index 16e8004966e5..8d0cae93a642 100644 --- a/src/diffusers/pipelines/pia/__init__.py +++ b/src/diffusers/pipelines/deprecated/pia/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects + from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -28,7 +28,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_pia import PIAPipeline, PIAPipelineOutput diff --git a/src/diffusers/pipelines/pia/pipeline_pia.py b/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py similarity index 98% rename from src/diffusers/pipelines/pia/pipeline_pia.py rename to src/diffusers/pipelines/deprecated/pia/pipeline_pia.py index d108deb9c5da..583b29c736bf 100644 --- a/src/diffusers/pipelines/pia/pipeline_pia.py +++ b/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py @@ -21,12 +21,12 @@ import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection -from ...image_processor import PipelineImageInput -from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...models.unets.unet_motion_model import MotionAdapter -from ...schedulers import ( +from ....image_processor import PipelineImageInput +from ....loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....models.unets.unet_motion_model import MotionAdapter +from ....schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, @@ -34,7 +34,7 @@ LMSDiscreteScheduler, PNDMScheduler, ) -from ...utils import ( +from ....utils import ( USE_PEFT_BACKEND, BaseOutput, is_torch_xla_available, @@ -43,10 +43,10 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ...video_processor import VideoProcessor -from ..free_init_utils import FreeInitMixin -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ....utils.torch_utils import randn_tensor +from ....video_processor import VideoProcessor +from ...free_init_utils import FreeInitMixin +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/__init__.py similarity index 88% rename from src/diffusers/pipelines/semantic_stable_diffusion/__init__.py rename to src/diffusers/pipelines/deprecated/semantic_stable_diffusion/__init__.py index 70f5b1a547c4..f55af15469fa 100644 --- a/src/diffusers/pipelines/semantic_stable_diffusion/__init__.py +++ b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -31,7 +31,7 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline diff --git a/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_output.py similarity index 95% rename from src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py rename to src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_output.py index 8e5429ce2a8d..b9f5cfb8ddd8 100644 --- a/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_output.py +++ b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_output.py @@ -3,7 +3,7 @@ import numpy as np import PIL.Image -from ...utils import BaseOutput +from ....utils import BaseOutput @dataclass diff --git a/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py similarity index 98% rename from src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py rename to src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py index 05d28896e117..bb3009d238a4 100644 --- a/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py +++ b/src/diffusers/pipelines/deprecated/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py @@ -5,13 +5,13 @@ import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...models import AutoencoderKL, UNet2DConditionModel -from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import deprecate, is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ....image_processor import VaeImageProcessor +from ....models import AutoencoderKL, UNet2DConditionModel +from ....pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin from .pipeline_output import SemanticStableDiffusionPipelineOutput diff --git a/src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/__init__.py similarity index 87% rename from src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/__init__.py index cce556fceb23..2087f09ea580 100644 --- a/src/diffusers/pipelines/stable_diffusion_attend_and_excite/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -30,7 +30,7 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline diff --git a/src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py b/src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py index 80b0c09bc9a5..20240d07dfa5 100644 --- a/src/diffusers/pipelines/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_attend_and_excite/pipeline_stable_diffusion_attend_and_excite.py @@ -21,13 +21,13 @@ from torch.nn import functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.attention_processor import Attention -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.attention_processor import Attention +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -36,10 +36,10 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin -from ..stable_diffusion import StableDiffusionPipelineOutput -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/__init__.py similarity index 87% rename from src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/__init__.py index e2145edb96c6..3924c610274c 100644 --- a/src/diffusers/pipelines/stable_diffusion_diffedit/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -30,7 +30,7 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline diff --git a/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py b/src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py similarity index 99% rename from src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py index 43bc2eb955c7..ee8675678f2d 100644 --- a/src/diffusers/pipelines/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_diffedit/pipeline_stable_diffusion_diffedit.py @@ -22,13 +22,13 @@ from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...configuration_utils import FrozenDict -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import DDIMInverseScheduler, KarrasDiffusionSchedulers -from ...utils import ( +from ....configuration_utils import FrozenDict +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import DDIMInverseScheduler, KarrasDiffusionSchedulers +from ....utils import ( PIL_INTERPOLATION, USE_PEFT_BACKEND, BaseOutput, @@ -39,10 +39,10 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin -from ..stable_diffusion import StableDiffusionPipelineOutput -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/stable_diffusion_gligen/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/__init__.py similarity index 89% rename from src/diffusers/pipelines/stable_diffusion_gligen/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_gligen/__init__.py index 147980cbf9e5..81c8b8b99cd8 100644 --- a/src/diffusers/pipelines/stable_diffusion_gligen/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -31,7 +31,7 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_gligen import StableDiffusionGLIGENPipeline from .pipeline_stable_diffusion_gligen_text_image import StableDiffusionGLIGENTextImagePipeline diff --git a/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py index fa5bc9376e53..ce5d3397ed47 100644 --- a/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen.py @@ -20,13 +20,13 @@ import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.attention import GatedSelfAttentionDense -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.attention import GatedSelfAttentionDense +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -35,10 +35,10 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin -from ..stable_diffusion import StableDiffusionPipelineOutput -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py index 62e8a9fa95ae..d72d12a64945 100644 --- a/src/diffusers/pipelines/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_gligen/pipeline_stable_diffusion_gligen_text_image.py @@ -26,13 +26,13 @@ CLIPVisionModelWithProjection, ) -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.attention import GatedSelfAttentionDense -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.attention import GatedSelfAttentionDense +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, is_torch_xla_available, logging, @@ -40,11 +40,11 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin -from ..stable_diffusion import StableDiffusionPipelineOutput -from ..stable_diffusion.clip_image_project_model import CLIPImageProjection -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion import StableDiffusionPipelineOutput +from ...stable_diffusion.clip_image_project_model import CLIPImageProjection +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/__init__.py similarity index 87% rename from src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/__init__.py index dae2affddd1f..a2fcf3ab8369 100644 --- a/src/diffusers/pipelines/stable_diffusion_ldm3d/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -30,7 +30,7 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_ldm3d import StableDiffusionLDM3DPipeline diff --git a/src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py b/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py index 6de144aa7e8b..17c60b6a147d 100644 --- a/src/diffusers/pipelines/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py @@ -21,12 +21,12 @@ import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection -from ...image_processor import PipelineImageInput, VaeImageProcessorLDM3D -from ...loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import PipelineImageInput, VaeImageProcessorLDM3D +from ....loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, BaseOutput, deprecate, @@ -36,9 +36,9 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/stable_diffusion_panorama/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_panorama/__init__.py similarity index 87% rename from src/diffusers/pipelines/stable_diffusion_panorama/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_panorama/__init__.py index f7572db7236c..ce0601ed2649 100644 --- a/src/diffusers/pipelines/stable_diffusion_panorama/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_panorama/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -30,7 +30,7 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline diff --git a/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py b/src/diffusers/pipelines/deprecated/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py index 259fbd933430..481c9c93ddde 100644 --- a/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py @@ -18,12 +18,12 @@ import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import DDIMScheduler -from ...utils import ( +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import DDIMScheduler +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -32,10 +32,10 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin -from ..stable_diffusion import StableDiffusionPipelineOutput -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/stable_diffusion_safe/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/__init__.py similarity index 94% rename from src/diffusers/pipelines/stable_diffusion_safe/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_safe/__init__.py index b35015a9f729..e911109b2e6e 100644 --- a/src/diffusers/pipelines/stable_diffusion_safe/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/__init__.py @@ -6,7 +6,7 @@ import PIL from PIL import Image -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, BaseOutput, OptionalDependencyNotAvailable, @@ -59,7 +59,7 @@ class SafetyConfig(object): if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects + from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -77,7 +77,7 @@ class SafetyConfig(object): if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_output import StableDiffusionSafePipelineOutput from .pipeline_stable_diffusion_safe import StableDiffusionPipelineSafe diff --git a/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_output.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_output.py index 6b784bb0e102..21fc7fec07d1 100644 --- a/src/diffusers/pipelines/stable_diffusion_safe/pipeline_output.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_output.py @@ -3,7 +3,7 @@ import numpy as np import PIL.Image -from ...utils import ( +from ....utils import ( BaseOutput, ) diff --git a/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_stable_diffusion_safe.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_stable_diffusion_safe.py index 26bb5128ba9b..35c7f9b970b9 100644 --- a/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/pipeline_stable_diffusion_safe.py @@ -7,14 +7,14 @@ from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection -from ...configuration_utils import FrozenDict -from ...image_processor import PipelineImageInput -from ...loaders import IPAdapterMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import deprecate, is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ....configuration_utils import FrozenDict +from ....image_processor import PipelineImageInput +from ....loaders import IPAdapterMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import deprecate, is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin from . import StableDiffusionSafePipelineOutput from .safety_checker import SafeStableDiffusionSafetyChecker diff --git a/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/safety_checker.py similarity index 99% rename from src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_safe/safety_checker.py index 1f6ad5f2a348..792e4596b156 100644 --- a/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_safe/safety_checker.py @@ -16,7 +16,7 @@ import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel -from ...utils import logging +from ....utils import logging logger = logging.get_logger(__name__) diff --git a/src/diffusers/pipelines/stable_diffusion_sag/__init__.py b/src/diffusers/pipelines/deprecated/stable_diffusion_sag/__init__.py similarity index 87% rename from src/diffusers/pipelines/stable_diffusion_sag/__init__.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_sag/__init__.py index 378e0e57817f..8cdd1ec6bdf0 100644 --- a/src/diffusers/pipelines/stable_diffusion_sag/__init__.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_sag/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -18,7 +18,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -30,7 +30,7 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * + from ....utils.dummy_torch_and_transformers_objects import * else: from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline diff --git a/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py b/src/diffusers/pipelines/deprecated/stable_diffusion_sag/pipeline_stable_diffusion_sag.py similarity index 98% rename from src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py rename to src/diffusers/pipelines/deprecated/stable_diffusion_sag/pipeline_stable_diffusion_sag.py index 3cf604911f0b..678ef74f387c 100644 --- a/src/diffusers/pipelines/stable_diffusion_sag/pipeline_stable_diffusion_sag.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_sag/pipeline_stable_diffusion_sag.py @@ -19,12 +19,12 @@ import torch.nn.functional as F from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection -from ...image_processor import PipelineImageInput, VaeImageProcessor -from ...loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import PipelineImageInput, VaeImageProcessor +from ....loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -33,10 +33,10 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin -from ..stable_diffusion import StableDiffusionPipelineOutput -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion import StableDiffusionPipelineOutput +from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/text_to_video_synthesis/__init__.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/__init__.py similarity index 90% rename from src/diffusers/pipelines/text_to_video_synthesis/__init__.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/__init__.py index 8d8fdb92769b..6c32f5f16cae 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/__init__.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects # noqa F403 + from ....utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -33,7 +33,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + from ....utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_output import TextToVideoSDPipelineOutput from .pipeline_text_to_video_synth import TextToVideoSDPipeline diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_output.py similarity index 96% rename from src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_output.py index c94c5d2d144a..c93609a2dd6f 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_output.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_output.py @@ -4,7 +4,7 @@ import PIL import torch -from ...utils import ( +from ....utils import ( BaseOutput, ) diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth.py similarity index 98% rename from src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth.py index 0ca64d33acda..f67008fb98c3 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth.py @@ -18,11 +18,11 @@ import torch from transformers import CLIPTextModel, CLIPTokenizer -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet3DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet3DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -31,9 +31,9 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ...video_processor import VideoProcessor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ....utils.torch_utils import randn_tensor +from ....video_processor import VideoProcessor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin from . import TextToVideoSDPipelineOutput diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py similarity index 98% rename from src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py index 6908f51eb21b..a823760368e5 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py @@ -19,11 +19,11 @@ import torch from transformers import CLIPTextModel, CLIPTokenizer -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet3DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet3DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -32,9 +32,9 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ...video_processor import VideoProcessor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ....utils.torch_utils import randn_tensor +from ....video_processor import VideoProcessor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin from . import TextToVideoSDPipelineOutput diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero.py similarity index 98% rename from src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero.py index 66defb2f3745..6ea24ae2c817 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero.py @@ -10,12 +10,12 @@ from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer -from ...image_processor import VaeImageProcessor -from ...loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, BaseOutput, is_torch_xla_available, @@ -23,9 +23,9 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import empty_device_cache, randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin -from ..stable_diffusion import StableDiffusionSafetyChecker +from ....utils.torch_utils import empty_device_cache, randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ...stable_diffusion import StableDiffusionSafetyChecker if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py similarity index 99% rename from src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py rename to src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py index a3286cd940fd..8e2c6e572295 100644 --- a/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py @@ -16,12 +16,12 @@ CLIPVisionModelWithProjection, ) -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL, UNet2DConditionModel +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, BaseOutput, deprecate, @@ -30,15 +30,15 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, StableDiffusionMixin if is_invisible_watermark_available(): - from ..stable_diffusion_xl.watermark import StableDiffusionXLWatermarker + from ...stable_diffusion_xl.watermark import StableDiffusionXLWatermarker -from ...utils import is_torch_xla_available +from ....utils import is_torch_xla_available if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/unclip/__init__.py b/src/diffusers/pipelines/deprecated/unclip/__init__.py similarity index 87% rename from src/diffusers/pipelines/unclip/__init__.py rename to src/diffusers/pipelines/deprecated/unclip/__init__.py index c89e899463be..7444df491273 100644 --- a/src/diffusers/pipelines/unclip/__init__.py +++ b/src/diffusers/pipelines/deprecated/unclip/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline + from ....utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline _dummy_objects.update( {"UnCLIPImageVariationPipeline": UnCLIPImageVariationPipeline, "UnCLIPPipeline": UnCLIPPipeline} @@ -33,7 +33,7 @@ if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + from ....utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline diff --git a/src/diffusers/pipelines/unclip/pipeline_unclip.py b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip.py similarity index 98% rename from src/diffusers/pipelines/unclip/pipeline_unclip.py rename to src/diffusers/pipelines/deprecated/unclip/pipeline_unclip.py index 430f1a1e5265..cf3697c6354f 100644 --- a/src/diffusers/pipelines/unclip/pipeline_unclip.py +++ b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip.py @@ -19,11 +19,11 @@ from transformers import CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput -from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel -from ...schedulers import UnCLIPScheduler -from ...utils import is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput +from ....models import PriorTransformer, UNet2DConditionModel, UNet2DModel +from ....schedulers import UnCLIPScheduler +from ....utils import is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput from .text_proj import UnCLIPTextProjModel diff --git a/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py similarity index 98% rename from src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py rename to src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py index d0d8bdc44787..eb16217bc39f 100644 --- a/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +++ b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py @@ -24,11 +24,11 @@ CLIPVisionModelWithProjection, ) -from ...models import UNet2DConditionModel, UNet2DModel -from ...schedulers import UnCLIPScheduler -from ...utils import is_torch_xla_available, logging -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput +from ....models import UNet2DConditionModel, UNet2DModel +from ....schedulers import UnCLIPScheduler +from ....utils import is_torch_xla_available, logging +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput from .text_proj import UnCLIPTextProjModel diff --git a/src/diffusers/pipelines/unclip/text_proj.py b/src/diffusers/pipelines/deprecated/unclip/text_proj.py similarity index 97% rename from src/diffusers/pipelines/unclip/text_proj.py rename to src/diffusers/pipelines/deprecated/unclip/text_proj.py index 5e04e48ba621..5493df794acc 100644 --- a/src/diffusers/pipelines/unclip/text_proj.py +++ b/src/diffusers/pipelines/deprecated/unclip/text_proj.py @@ -15,8 +15,8 @@ import torch from torch import nn -from ...configuration_utils import ConfigMixin, register_to_config -from ...models import ModelMixin +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin class UnCLIPTextProjModel(ModelMixin, ConfigMixin): diff --git a/src/diffusers/pipelines/unidiffuser/__init__.py b/src/diffusers/pipelines/deprecated/unidiffuser/__init__.py similarity index 91% rename from src/diffusers/pipelines/unidiffuser/__init__.py rename to src/diffusers/pipelines/deprecated/unidiffuser/__init__.py index 1ac2b09a6e57..aeaba167dc11 100644 --- a/src/diffusers/pipelines/unidiffuser/__init__.py +++ b/src/diffusers/pipelines/deprecated/unidiffuser/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -16,7 +16,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) @@ -35,7 +35,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import ( + from ....utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) diff --git a/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py b/src/diffusers/pipelines/deprecated/unidiffuser/modeling_text_decoder.py similarity index 99% rename from src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py rename to src/diffusers/pipelines/deprecated/unidiffuser/modeling_text_decoder.py index c68b5d9ab5a8..a068f99c6368 100644 --- a/src/diffusers/pipelines/unidiffuser/modeling_text_decoder.py +++ b/src/diffusers/pipelines/deprecated/unidiffuser/modeling_text_decoder.py @@ -4,8 +4,8 @@ from transformers import GPT2Config, GPT2LMHeadModel from transformers.modeling_utils import ModuleUtilsMixin -from ...configuration_utils import ConfigMixin, register_to_config -from ...models import ModelMixin +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin # Modified from ClipCaptionModel in https://github.com/thu-ml/unidiffuser/blob/main/libs/caption_decoder.py diff --git a/src/diffusers/pipelines/unidiffuser/modeling_uvit.py b/src/diffusers/pipelines/deprecated/unidiffuser/modeling_uvit.py similarity index 99% rename from src/diffusers/pipelines/unidiffuser/modeling_uvit.py rename to src/diffusers/pipelines/deprecated/unidiffuser/modeling_uvit.py index 125188196c1e..6fd4ff50285f 100644 --- a/src/diffusers/pipelines/unidiffuser/modeling_uvit.py +++ b/src/diffusers/pipelines/deprecated/unidiffuser/modeling_uvit.py @@ -3,14 +3,14 @@ import torch from torch import nn -from ...configuration_utils import ConfigMixin, register_to_config -from ...models import ModelMixin -from ...models.attention import FeedForward -from ...models.attention_processor import Attention -from ...models.embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed -from ...models.modeling_outputs import Transformer2DModelOutput -from ...models.normalization import AdaLayerNorm -from ...utils import logging +from ....configuration_utils import ConfigMixin, register_to_config +from ....models import ModelMixin +from ....models.attention import FeedForward +from ....models.attention_processor import Attention +from ....models.embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed +from ....models.modeling_outputs import Transformer2DModelOutput +from ....models.normalization import AdaLayerNorm +from ....utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py b/src/diffusers/pipelines/deprecated/unidiffuser/pipeline_unidiffuser.py similarity index 99% rename from src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py rename to src/diffusers/pipelines/deprecated/unidiffuser/pipeline_unidiffuser.py index 81d2ce95dc53..7e55075cc209 100644 --- a/src/diffusers/pipelines/unidiffuser/pipeline_unidiffuser.py +++ b/src/diffusers/pipelines/deprecated/unidiffuser/pipeline_unidiffuser.py @@ -13,12 +13,12 @@ GPT2Tokenizer, ) -from ...image_processor import VaeImageProcessor -from ...loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL -from ...models.lora import adjust_lora_scale_text_encoder -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( +from ....image_processor import VaeImageProcessor +from ....loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....models import AutoencoderKL +from ....models.lora import adjust_lora_scale_text_encoder +from ....schedulers import KarrasDiffusionSchedulers +from ....utils import ( USE_PEFT_BACKEND, deprecate, is_torch_xla_available, @@ -26,9 +26,9 @@ scale_lora_layers, unscale_lora_layers, ) -from ...utils.outputs import BaseOutput -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline +from ....utils.outputs import BaseOutput +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel diff --git a/src/diffusers/pipelines/wuerstchen/__init__.py b/src/diffusers/pipelines/deprecated/wuerstchen/__init__.py similarity index 91% rename from src/diffusers/pipelines/wuerstchen/__init__.py rename to src/diffusers/pipelines/deprecated/wuerstchen/__init__.py index ddb852d19315..26f259512cbd 100644 --- a/src/diffusers/pipelines/wuerstchen/__init__.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/__init__.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING -from ...utils import ( +from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, @@ -17,7 +17,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils import dummy_torch_and_transformers_objects + from ....utils import dummy_torch_and_transformers_objects _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: @@ -34,7 +34,7 @@ if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 + from ....utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt diff --git a/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_paella_vq_model.py similarity index 95% rename from src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py rename to src/diffusers/pipelines/deprecated/wuerstchen/modeling_paella_vq_model.py index 932c7ac618f6..dd9f2c153e21 100644 --- a/src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_paella_vq_model.py @@ -17,11 +17,11 @@ import torch import torch.nn as nn -from ...configuration_utils import ConfigMixin, register_to_config -from ...models.autoencoders.vae import DecoderOutput, VectorQuantizer -from ...models.modeling_utils import ModelMixin -from ...models.vq_model import VQEncoderOutput -from ...utils.accelerate_utils import apply_forward_hook +from ....configuration_utils import ConfigMixin, register_to_config +from ....models.autoencoders.vae import DecoderOutput, VectorQuantizer +from ....models.modeling_utils import ModelMixin +from ....models.vq_model import VQEncoderOutput +from ....utils.accelerate_utils import apply_forward_hook class MixingResidualBlock(nn.Module): diff --git a/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_common.py similarity index 98% rename from src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py rename to src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_common.py index 73e71b3076fb..7645a5579c58 100644 --- a/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_common.py @@ -1,7 +1,7 @@ import torch import torch.nn as nn -from ...models.attention_processor import Attention +from ....models.attention_processor import Attention class WuerstchenLayerNorm(nn.LayerNorm): diff --git a/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_diffnext.py similarity index 98% rename from src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py rename to src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_diffnext.py index 77ae597655d1..31edf78aada3 100644 --- a/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_diffnext.py @@ -19,8 +19,8 @@ import torch import torch.nn as nn -from ...configuration_utils import ConfigMixin, register_to_config -from ...models.modeling_utils import ModelMixin +from ....configuration_utils import ConfigMixin, register_to_config +from ....models.modeling_utils import ModelMixin from .modeling_wuerstchen_common import AttnBlock, GlobalResponseNorm, TimestepBlock, WuerstchenLayerNorm diff --git a/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_prior.py similarity index 93% rename from src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py rename to src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_prior.py index dbdd50871b43..64e140f25bb9 100644 --- a/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_prior.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/modeling_wuerstchen_prior.py @@ -18,16 +18,16 @@ import torch import torch.nn as nn -from ...configuration_utils import ConfigMixin, register_to_config -from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin -from ...models.attention import AttentionMixin -from ...models.attention_processor import ( +from ....configuration_utils import ConfigMixin, register_to_config +from ....loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin +from ....models.attention import AttentionMixin +from ....models.attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttnAddedKVProcessor, AttnProcessor, ) -from ...models.modeling_utils import ModelMixin +from ....models.modeling_utils import ModelMixin from .modeling_wuerstchen_common import AttnBlock, ResBlock, TimestepBlock, WuerstchenLayerNorm diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py similarity index 98% rename from src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py rename to src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py index cce05c189201..0e8b1539f633 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py @@ -18,10 +18,10 @@ import torch from transformers import CLIPTextModel, CLIPTokenizer -from ...schedulers import DDPMWuerstchenScheduler -from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput +from ....schedulers import DDPMWuerstchenScheduler +from ....utils import deprecate, is_torch_xla_available, logging, replace_example_docstring +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_combined.py similarity index 98% rename from src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py rename to src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_combined.py index 16300a7c71d2..dedeeedfef8f 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_combined.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_combined.py @@ -16,9 +16,9 @@ import torch from transformers import CLIPTextModel, CLIPTokenizer -from ...schedulers import DDPMWuerstchenScheduler -from ...utils import deprecate, replace_example_docstring -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline +from ....schedulers import DDPMWuerstchenScheduler +from ....utils import deprecate, replace_example_docstring +from ...pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt from .modeling_wuerstchen_prior import WuerstchenPrior diff --git a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py similarity index 98% rename from src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py rename to src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py index e79fcf8378aa..ffcc8fb2051d 100644 --- a/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen_prior.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py @@ -20,11 +20,11 @@ import torch from transformers import CLIPTextModel, CLIPTokenizer -from ...loaders import StableDiffusionLoraLoaderMixin -from ...schedulers import DDPMWuerstchenScheduler -from ...utils import BaseOutput, deprecate, is_torch_xla_available, logging, replace_example_docstring -from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DiffusionPipeline +from ....loaders import StableDiffusionLoraLoaderMixin +from ....schedulers import DDPMWuerstchenScheduler +from ....utils import BaseOutput, deprecate, is_torch_xla_available, logging, replace_example_docstring +from ....utils.torch_utils import randn_tensor +from ...pipeline_utils import DiffusionPipeline from .modeling_wuerstchen_prior import WuerstchenPrior diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py index ef40078bfbb9..aff5031a54ee 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py @@ -22,7 +22,7 @@ from ...utils import is_torch_version, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput -from ..wuerstchen.modeling_paella_vq_model import PaellaVQModel +from ..deprecated.wuerstchen.modeling_paella_vq_model import PaellaVQModel if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py index 50e6c02b6017..e302ac4120d6 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py @@ -21,7 +21,7 @@ from ...schedulers import DDPMWuerstchenScheduler from ...utils import is_torch_version, replace_example_docstring from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline -from ..wuerstchen.modeling_paella_vq_model import PaellaVQModel +from ..deprecated.wuerstchen.modeling_paella_vq_model import PaellaVQModel from .pipeline_stable_cascade import StableCascadeDecoderPipeline from .pipeline_stable_cascade_prior import StableCascadePriorPipeline diff --git a/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py b/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py index 8fa4bd5941d2..30c1a8aa5fa2 100644 --- a/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +++ b/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py @@ -54,7 +54,7 @@ from ...utils.constants import DIFFUSERS_REQUEST_TIMEOUT from ...utils.torch_utils import get_device from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel -from ..paint_by_example import PaintByExampleImageEncoder +from ..deprecated.paint_by_example import PaintByExampleImageEncoder from ..pipeline_utils import DiffusionPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer From 8e2cdbfb2c43655427c01c1208fe0708b4202fff Mon Sep 17 00:00:00 2001 From: stevhliu Date: Tue, 17 Feb 2026 12:07:34 -0800 Subject: [PATCH 2/8] fix --- src/diffusers/pipelines/__init__.py | 4 ++-- src/diffusers/pipelines/auto_pipeline.py | 2 +- src/diffusers/pipelines/deprecated/pia/pipeline_pia.py | 7 ++++++- .../pipeline_stable_diffusion_ldm3d.py | 7 ++++++- .../pipelines/stable_cascade/pipeline_stable_cascade.py | 2 +- .../stable_cascade/pipeline_stable_cascade_combined.py | 2 +- .../pipelines/stable_diffusion/convert_from_ckpt.py | 2 +- 7 files changed, 18 insertions(+), 8 deletions(-) diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 1011e7f5d05a..788c413dd414 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -630,8 +630,8 @@ I2VGenXLPipeline, ImageTextPipelineOutput, MusicLDMPipeline, - PIAPipeline, PaintByExamplePipeline, + PIAPipeline, SemanticStableDiffusionPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetXSPipeline, @@ -655,12 +655,12 @@ UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, - VQDiffusionPipeline, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, + VQDiffusionPipeline, WuerstchenCombinedPipeline, WuerstchenDecoderPipeline, WuerstchenPriorPipeline, diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index 7247ca0d161c..569e148e8e05 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -122,7 +122,7 @@ StableDiffusionXLPipeline, ) from .wan import WanImageToVideoPipeline, WanPipeline, WanVideoToVideoPipeline -from .wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline +from .deprecated.wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline from .z_image import ( ZImageControlNetInpaintPipeline, ZImageControlNetPipeline, diff --git a/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py b/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py index 583b29c736bf..6ea5198eac9b 100644 --- a/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py +++ b/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py @@ -22,7 +22,12 @@ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ....image_processor import PipelineImageInput -from ....loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + TextualInversionLoaderMixin, +) from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel from ....models.lora import adjust_lora_scale_text_encoder from ....models.unets.unet_motion_model import MotionAdapter diff --git a/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py b/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py index 17c60b6a147d..16b21dd66132 100644 --- a/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py +++ b/src/diffusers/pipelines/deprecated/stable_diffusion_ldm3d/pipeline_stable_diffusion_ldm3d.py @@ -22,7 +22,12 @@ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ....image_processor import PipelineImageInput, VaeImageProcessorLDM3D -from ....loaders import FromSingleFileMixin, IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin +from ....loaders import ( + FromSingleFileMixin, + IPAdapterMixin, + StableDiffusionLoraLoaderMixin, + TextualInversionLoaderMixin, +) from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import KarrasDiffusionSchedulers diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py index aff5031a54ee..6a4066eb6e17 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade.py @@ -21,8 +21,8 @@ from ...schedulers import DDPMWuerstchenScheduler from ...utils import is_torch_version, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput from ..deprecated.wuerstchen.modeling_paella_vq_model import PaellaVQModel +from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput if is_torch_xla_available(): diff --git a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py index e302ac4120d6..0afecad097da 100644 --- a/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py +++ b/src/diffusers/pipelines/stable_cascade/pipeline_stable_cascade_combined.py @@ -20,8 +20,8 @@ from ...models import StableCascadeUNet from ...schedulers import DDPMWuerstchenScheduler from ...utils import is_torch_version, replace_example_docstring -from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline from ..deprecated.wuerstchen.modeling_paella_vq_model import PaellaVQModel +from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline from .pipeline_stable_cascade import StableCascadeDecoderPipeline from .pipeline_stable_cascade_prior import StableCascadePriorPipeline diff --git a/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py b/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py index 30c1a8aa5fa2..0c8fd842fcba 100644 --- a/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +++ b/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py @@ -53,8 +53,8 @@ from ...utils import is_accelerate_available, logging from ...utils.constants import DIFFUSERS_REQUEST_TIMEOUT from ...utils.torch_utils import get_device -from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel from ..deprecated.paint_by_example import PaintByExampleImageEncoder +from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel from ..pipeline_utils import DiffusionPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer From a6bc44621785720898202da20bbedde372cd179f Mon Sep 17 00:00:00 2001 From: stevhliu Date: Tue, 17 Feb 2026 12:10:29 -0800 Subject: [PATCH 3/8] fix --- src/diffusers/pipelines/auto_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/auto_pipeline.py b/src/diffusers/pipelines/auto_pipeline.py index 569e148e8e05..c7c11899d9f6 100644 --- a/src/diffusers/pipelines/auto_pipeline.py +++ b/src/diffusers/pipelines/auto_pipeline.py @@ -40,6 +40,7 @@ StableDiffusion3ControlNetPipeline, ) from .deepfloyd_if import IFImg2ImgPipeline, IFInpaintingPipeline, IFPipeline +from .deprecated.wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline from .flux import ( FluxControlImg2ImgPipeline, FluxControlInpaintPipeline, @@ -122,7 +123,6 @@ StableDiffusionXLPipeline, ) from .wan import WanImageToVideoPipeline, WanPipeline, WanVideoToVideoPipeline -from .deprecated.wuerstchen import WuerstchenCombinedPipeline, WuerstchenDecoderPipeline from .z_image import ( ZImageControlNetInpaintPipeline, ZImageControlNetPipeline, From acea5cf56e971a42ed80375000106c79f698a76f Mon Sep 17 00:00:00 2001 From: stevhliu Date: Tue, 17 Feb 2026 12:13:55 -0800 Subject: [PATCH 4/8] fix --- .../models/unets/unet_stable_cascade.py | 2 +- .../animatediff/pipeline_animatediff_sdxl.py | 4 ++-- .../pipeline_animatediff_sparsectrl.py | 4 ++-- .../pipelines/audioldm2/pipeline_audioldm2.py | 2 +- .../i2vgen_xl/pipeline_i2vgen_xl.py | 2 +- .../deprecated/musicldm/pipeline_musicldm.py | 6 ++--- .../pipelines/deprecated/pia/pipeline_pia.py | 4 ++-- .../pipeline_text_to_video_synth_img2img.py | 2 +- .../pipeline_text_to_video_zero_sdxl.py | 22 +++++++++---------- .../unclip/pipeline_unclip_image_variation.py | 2 +- .../wuerstchen/pipeline_wuerstchen.py | 2 +- .../wuerstchen/pipeline_wuerstchen_prior.py | 2 +- .../pipelines/kandinsky/pipeline_kandinsky.py | 2 +- .../kandinsky/pipeline_kandinsky_inpaint.py | 2 +- .../kandinsky/pipeline_kandinsky_prior.py | 2 +- .../kandinsky2_2/pipeline_kandinsky2_2.py | 2 +- .../pipeline_kandinsky2_2_controlnet.py | 2 +- .../pipeline_kandinsky2_2_inpainting.py | 2 +- .../pipeline_kandinsky2_2_prior.py | 2 +- .../pipelines/latte/pipeline_latte.py | 2 +- .../pag/pipeline_pag_sd_animatediff.py | 2 +- .../pipelines/shap_e/pipeline_shap_e.py | 2 +- .../shap_e/pipeline_shap_e_img2img.py | 2 +- .../pipeline_stable_unclip.py | 4 ++-- 24 files changed, 40 insertions(+), 40 deletions(-) diff --git a/src/diffusers/models/unets/unet_stable_cascade.py b/src/diffusers/models/unets/unet_stable_cascade.py index 5a6f24ab794b..af98b7a1c602 100644 --- a/src/diffusers/models/unets/unet_stable_cascade.py +++ b/src/diffusers/models/unets/unet_stable_cascade.py @@ -26,7 +26,7 @@ from ..modeling_utils import ModelMixin -# Copied from diffusers.pipelines.wuerstchen.modeling_wuerstchen_common.WuerstchenLayerNorm with WuerstchenLayerNorm -> SDCascadeLayerNorm +# Copied from diffusers.pipelines.deprecated.wuerstchen.modeling_wuerstchen_common.WuerstchenLayerNorm with WuerstchenLayerNorm -> SDCascadeLayerNorm class SDCascadeLayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) diff --git a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py index 68ce7c92896a..f0474487bce9 100644 --- a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py +++ b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py @@ -633,7 +633,7 @@ def prepare_ip_adapter_image_embeds( return ip_adapter_image_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents @@ -736,7 +736,7 @@ def check_inputs( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py index 8b3eb8fc3c03..14605307e18c 100644 --- a/src/diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py +++ b/src/diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py @@ -458,7 +458,7 @@ def prepare_ip_adapter_image_embeds( return ip_adapter_image_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents @@ -621,7 +621,7 @@ def check_image(self, image, prompt, prompt_embeds): f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py b/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py index 5ba3bb40b52c..10ef186a5913 100644 --- a/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py +++ b/src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py @@ -685,7 +685,7 @@ def encode_prompt( return prompt_embeds, attention_mask, generated_prompt_embeds - # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.mel_spectrogram_to_waveform + # Copied from diffusers.pipelines.deprecated.audioldm.pipeline_audioldm.AudioLDMPipeline.mel_spectrogram_to_waveform def mel_spectrogram_to_waveform(self, mel_spectrogram): if mel_spectrogram.dim() == 4: mel_spectrogram = mel_spectrogram.squeeze(1) diff --git a/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py b/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py index c81f5e4fdac9..7712743e6bdd 100644 --- a/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py +++ b/src/diffusers/pipelines/deprecated/i2vgen_xl/pipeline_i2vgen_xl.py @@ -481,7 +481,7 @@ def prepare_image_latents( return image_latents - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/src/diffusers/pipelines/deprecated/musicldm/pipeline_musicldm.py b/src/diffusers/pipelines/deprecated/musicldm/pipeline_musicldm.py index 865b359f0690..2173699a7a6b 100644 --- a/src/diffusers/pipelines/deprecated/musicldm/pipeline_musicldm.py +++ b/src/diffusers/pipelines/deprecated/musicldm/pipeline_musicldm.py @@ -259,7 +259,7 @@ def _encode_prompt( return prompt_embeds - # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.mel_spectrogram_to_waveform + # Copied from diffusers.pipelines.deprecated.audioldm.pipeline_audioldm.AudioLDMPipeline.mel_spectrogram_to_waveform def mel_spectrogram_to_waveform(self, mel_spectrogram): if mel_spectrogram.dim() == 4: mel_spectrogram = mel_spectrogram.squeeze(1) @@ -312,7 +312,7 @@ def prepare_extra_step_kwargs(self, generator, eta): extra_step_kwargs["generator"] = generator return extra_step_kwargs - # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.check_inputs + # Copied from diffusers.pipelines.deprecated.audioldm.pipeline_audioldm.AudioLDMPipeline.check_inputs def check_inputs( self, prompt, @@ -371,7 +371,7 @@ def check_inputs( f" {negative_prompt_embeds.shape}." ) - # Copied from diffusers.pipelines.audioldm.pipeline_audioldm.AudioLDMPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.audioldm.pipeline_audioldm.AudioLDMPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): shape = ( batch_size, diff --git a/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py b/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py index 6ea5198eac9b..cf189a1f18e2 100644 --- a/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py +++ b/src/diffusers/pipelines/deprecated/pia/pipeline_pia.py @@ -420,7 +420,7 @@ def encode_image(self, image, device, num_images_per_prompt, output_hidden_state return image_embeds, uncond_image_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents @@ -560,7 +560,7 @@ def prepare_ip_adapter_image_embeds( return ip_adapter_image_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py index a823760368e5..b135d128b269 100644 --- a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py @@ -373,7 +373,7 @@ def encode_prompt( return prompt_embeds, negative_prompt_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents diff --git a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py index 8e2c6e572295..9af63e5044bd 100644 --- a/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py +++ b/src/diffusers/pipelines/deprecated/text_to_video_synthesis/pipeline_text_to_video_zero_sdxl.py @@ -51,32 +51,32 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_0 +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_0 def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_1 +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_1 def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_3 +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_3 def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_4 +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.rearrange_4 def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. @@ -136,7 +136,7 @@ def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_ma return hidden_states -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor2_0 +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor2_0 class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. @@ -226,7 +226,7 @@ class TextToVideoSDXLPipelineOutput(BaseOutput): images: list[PIL.Image.Image] | np.ndarray -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.coords_grid +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.coords_grid def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) @@ -234,7 +234,7 @@ def coords_grid(batch, ht, wd, device): return coords[None].repeat(batch, 1, 1, 1) -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.warp_single_latent +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.warp_single_latent def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow @@ -262,7 +262,7 @@ def warp_single_latent(latent, reference_flow): return warped -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field @@ -286,7 +286,7 @@ def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ return reference_flow -# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field_and_warp_latents +# Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.create_motion_field_and_warp_latents def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly @@ -820,7 +820,7 @@ def encode_prompt( return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoZeroPipeline.forward_loop + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoZeroPipeline.forward_loop def forward_loop(self, x_t0, t0, t1, generator): """ Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance. diff --git a/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py index eb16217bc39f..3ea5ca75949c 100644 --- a/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py +++ b/src/diffusers/pipelines/deprecated/unclip/pipeline_unclip_image_variation.py @@ -114,7 +114,7 @@ def __init__( super_res_scheduler=super_res_scheduler, ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py index 0e8b1539f633..b57fc732b5f5 100644 --- a/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen.py @@ -107,7 +107,7 @@ def __init__( ) self.register_to_config(latent_dim_scale=latent_dim_scale) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py index ffcc8fb2051d..6d37f2c9eefa 100644 --- a/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py +++ b/src/diffusers/pipelines/deprecated/wuerstchen/pipeline_wuerstchen_prior.py @@ -126,7 +126,7 @@ def __init__( latent_mean=latent_mean, latent_std=latent_std, resolution_multiple=resolution_multiple ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py index d635057f2b05..62e2f12a7f61 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky.py @@ -119,7 +119,7 @@ def __init__( ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py index 4dba85446db5..f74bf1e14900 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py @@ -284,7 +284,7 @@ def __init__( self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) self._warn_has_been_called = False - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py index 523fd010eb7f..935f339bfb24 100644 --- a/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py +++ b/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_prior.py @@ -284,7 +284,7 @@ def interpolate( return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=zero_image_emb) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py index 5129b3f548e8..5fa7ba31a3e8 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py @@ -103,7 +103,7 @@ def __init__( ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py index 01001e0c9eba..9f5340557125 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet.py @@ -145,7 +145,7 @@ def __init__( ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py index 31bd88103a06..796ab94b33a6 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpainting.py @@ -275,7 +275,7 @@ def __init__( self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) self._warn_has_been_called = False - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py index 41f4474c3906..8095f79280d4 100644 --- a/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +++ b/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py @@ -240,7 +240,7 @@ def interpolate( return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=zero_image_emb) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/latte/pipeline_latte.py b/src/diffusers/pipelines/latte/pipeline_latte.py index eed7762cebf1..7bc7b4aa915e 100644 --- a/src/diffusers/pipelines/latte/pipeline_latte.py +++ b/src/diffusers/pipelines/latte/pipeline_latte.py @@ -561,7 +561,7 @@ def _clean_caption(self, caption): return caption.strip() - # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None ): diff --git a/src/diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py b/src/diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py index 62d1c912283f..ac13fe22723e 100644 --- a/src/diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py +++ b/src/diffusers/pipelines/pag/pipeline_pag_sd_animatediff.py @@ -452,7 +452,7 @@ def prepare_extra_step_kwargs(self, generator, eta): extra_step_kwargs["generator"] = generator return extra_step_kwargs - # Copied from diffusers.pipelines.pia.pipeline_pia.PIAPipeline.check_inputs + # Copied from diffusers.pipelines.deprecated.pia.pipeline_pia.PIAPipeline.check_inputs def check_inputs( self, prompt, diff --git a/src/diffusers/pipelines/shap_e/pipeline_shap_e.py b/src/diffusers/pipelines/shap_e/pipeline_shap_e.py index 44967dfb3349..eea83aff9e10 100644 --- a/src/diffusers/pipelines/shap_e/pipeline_shap_e.py +++ b/src/diffusers/pipelines/shap_e/pipeline_shap_e.py @@ -126,7 +126,7 @@ def __init__( shap_e_renderer=shap_e_renderer, ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py b/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py index 964db30e7de2..f59fd298c684 100644 --- a/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py +++ b/src/diffusers/pipelines/shap_e/pipeline_shap_e_img2img.py @@ -127,7 +127,7 @@ def __init__( shap_e_renderer=shap_e_renderer, ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py index 39857ed230e6..7015e9727ea5 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py @@ -166,7 +166,7 @@ def __init__( self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt with _encode_prompt->_encode_prior_prompt, tokenizer->prior_tokenizer, text_encoder->prior_text_encoder + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt with _encode_prompt->_encode_prior_prompt, tokenizer->prior_tokenizer, text_encoder->prior_text_encoder def _encode_prior_prompt( self, prompt, @@ -584,7 +584,7 @@ def check_inputs( f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive." ) - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents + # Copied from diffusers.pipelines.deprecated.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) From 69e7f1bc2430e3eac3a626d99457aee127da46a0 Mon Sep 17 00:00:00 2001 From: stevhliu Date: Tue, 17 Feb 2026 12:58:48 -0800 Subject: [PATCH 5/8] fix --- tests/pipelines/stable_cascade/test_stable_cascade_combined.py | 2 +- tests/pipelines/stable_cascade/test_stable_cascade_decoder.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pipelines/stable_cascade/test_stable_cascade_combined.py b/tests/pipelines/stable_cascade/test_stable_cascade_combined.py index afa0db39f3fa..d9a511ab199c 100644 --- a/tests/pipelines/stable_cascade/test_stable_cascade_combined.py +++ b/tests/pipelines/stable_cascade/test_stable_cascade_combined.py @@ -21,7 +21,7 @@ from diffusers import DDPMWuerstchenScheduler, StableCascadeCombinedPipeline from diffusers.models import StableCascadeUNet -from diffusers.pipelines.wuerstchen import PaellaVQModel +from diffusers.pipelines.deprecated.wuerstchen import PaellaVQModel from ...testing_utils import enable_full_determinism, require_torch_accelerator, torch_device from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py b/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py index 5b3acb8705b3..b92df4c5d268 100644 --- a/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py +++ b/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py @@ -22,7 +22,7 @@ from diffusers import DDPMWuerstchenScheduler, StableCascadeDecoderPipeline from diffusers.models import StableCascadeUNet -from diffusers.pipelines.wuerstchen import PaellaVQModel +from diffusers.pipelines.deprecated.wuerstchen import PaellaVQModel from diffusers.utils.torch_utils import randn_tensor from ...testing_utils import ( From c49c61d0147a6dd4147f7e3cee366caa03bc79d0 Mon Sep 17 00:00:00 2001 From: stevhliu Date: Wed, 18 Feb 2026 11:13:31 -0800 Subject: [PATCH 6/8] remove deprecated .md files --- docs/source/en/api/attnprocessor.md | 2 +- docs/source/en/api/pipelines/amused.md | 51 --- .../en/api/pipelines/attend_and_excite.md | 37 --- docs/source/en/api/pipelines/audioldm.md | 50 --- .../source/en/api/pipelines/blip_diffusion.md | 41 --- docs/source/en/api/pipelines/controlnetxs.md | 43 --- .../en/api/pipelines/controlnetxs_sdxl.md | 42 --- .../en/api/pipelines/dance_diffusion.md | 32 -- docs/source/en/api/pipelines/diffedit.md | 58 ---- docs/source/en/api/pipelines/i2vgenxl.md | 58 ---- docs/source/en/api/pipelines/musicldm.md | 52 --- .../en/api/pipelines/paint_by_example.md | 39 --- docs/source/en/api/pipelines/panorama.md | 54 ---- docs/source/en/api/pipelines/pia.md | 168 ---------- .../api/pipelines/self_attention_guidance.md | 35 -- .../pipelines/semantic_stable_diffusion.md | 35 -- .../api/pipelines/stable_diffusion/gligen.md | 59 ---- .../stable_diffusion/ldm3d_diffusion.md | 59 ---- .../stable_diffusion/stable_diffusion_safe.md | 61 ---- docs/source/en/api/pipelines/text_to_video.md | 191 ----------- .../en/api/pipelines/text_to_video_zero.md | 306 ------------------ docs/source/en/api/pipelines/unclip.md | 37 --- docs/source/en/api/pipelines/unidiffuser.md | 206 ------------ docs/source/en/api/pipelines/wuerstchen.md | 170 ---------- src/diffusers/__init__.py | 16 + 25 files changed, 17 insertions(+), 1885 deletions(-) delete mode 100644 docs/source/en/api/pipelines/amused.md delete mode 100644 docs/source/en/api/pipelines/attend_and_excite.md delete mode 100644 docs/source/en/api/pipelines/audioldm.md delete mode 100644 docs/source/en/api/pipelines/blip_diffusion.md delete mode 100644 docs/source/en/api/pipelines/controlnetxs.md delete mode 100644 docs/source/en/api/pipelines/controlnetxs_sdxl.md delete mode 100644 docs/source/en/api/pipelines/dance_diffusion.md delete mode 100644 docs/source/en/api/pipelines/diffedit.md delete mode 100644 docs/source/en/api/pipelines/i2vgenxl.md delete mode 100644 docs/source/en/api/pipelines/musicldm.md delete mode 100644 docs/source/en/api/pipelines/paint_by_example.md delete mode 100644 docs/source/en/api/pipelines/panorama.md delete mode 100644 docs/source/en/api/pipelines/pia.md delete mode 100644 docs/source/en/api/pipelines/self_attention_guidance.md delete mode 100644 docs/source/en/api/pipelines/semantic_stable_diffusion.md delete mode 100644 docs/source/en/api/pipelines/stable_diffusion/gligen.md delete mode 100644 docs/source/en/api/pipelines/stable_diffusion/ldm3d_diffusion.md delete mode 100644 docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_safe.md delete mode 100644 docs/source/en/api/pipelines/text_to_video.md delete mode 100644 docs/source/en/api/pipelines/text_to_video_zero.md delete mode 100644 docs/source/en/api/pipelines/unclip.md delete mode 100644 docs/source/en/api/pipelines/unidiffuser.md delete mode 100644 docs/source/en/api/pipelines/wuerstchen.md diff --git a/docs/source/en/api/attnprocessor.md b/docs/source/en/api/attnprocessor.md index ed87cdf7d43c..7ab053f10756 100644 --- a/docs/source/en/api/attnprocessor.md +++ b/docs/source/en/api/attnprocessor.md @@ -46,7 +46,7 @@ An attention processor is a class for applying different types of attention mech ## CrossFrameAttnProcessor -[[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor +[[autodoc]] pipelines.deprecated.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor ## Custom Diffusion diff --git a/docs/source/en/api/pipelines/amused.md b/docs/source/en/api/pipelines/amused.md deleted file mode 100644 index ad292abca2cc..000000000000 --- a/docs/source/en/api/pipelines/amused.md +++ /dev/null @@ -1,51 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# aMUSEd - -aMUSEd was introduced in [aMUSEd: An Open MUSE Reproduction](https://huggingface.co/papers/2401.01808) by Suraj Patil, William Berman, Robin Rombach, and Patrick von Platen. - -Amused is a lightweight text to image model based off of the [MUSE](https://huggingface.co/papers/2301.00704) architecture. Amused is particularly useful in applications that require a lightweight and fast model such as generating many images quickly at once. - -Amused is a vqvae token based transformer that can generate an image in fewer forward passes than many diffusion models. In contrast with muse, it uses the smaller text encoder CLIP-L/14 instead of t5-xxl. Due to its small parameter count and few forward pass generation process, amused can generate many images quickly. This benefit is seen particularly at larger batch sizes. - -The abstract from the paper is: - -*We present aMUSEd, an open-source, lightweight masked image model (MIM) for text-to-image generation based on MUSE. With 10 percent of MUSE's parameters, aMUSEd is focused on fast image generation. We believe MIM is under-explored compared to latent diffusion, the prevailing approach for text-to-image generation. Compared to latent diffusion, MIM requires fewer inference steps and is more interpretable. Additionally, MIM can be fine-tuned to learn additional styles with only a single image. We hope to encourage further exploration of MIM by demonstrating its effectiveness on large-scale text-to-image generation and releasing reproducible training code. We also release checkpoints for two models which directly produce images at 256x256 and 512x512 resolutions.* - -| Model | Params | -|-------|--------| -| [amused-256](https://huggingface.co/amused/amused-256) | 603M | -| [amused-512](https://huggingface.co/amused/amused-512) | 608M | - -## AmusedPipeline - -[[autodoc]] AmusedPipeline - - __call__ - - all - - enable_xformers_memory_efficient_attention - - disable_xformers_memory_efficient_attention - -[[autodoc]] AmusedImg2ImgPipeline - - __call__ - - all - - enable_xformers_memory_efficient_attention - - disable_xformers_memory_efficient_attention - -[[autodoc]] AmusedInpaintPipeline - - __call__ - - all - - enable_xformers_memory_efficient_attention - - disable_xformers_memory_efficient_attention \ No newline at end of file diff --git a/docs/source/en/api/pipelines/attend_and_excite.md b/docs/source/en/api/pipelines/attend_and_excite.md deleted file mode 100644 index e7d1e1d2b87c..000000000000 --- a/docs/source/en/api/pipelines/attend_and_excite.md +++ /dev/null @@ -1,37 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# Attend-and-Excite - -Attend-and-Excite for Stable Diffusion was proposed in [Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models](https://attendandexcite.github.io/Attend-and-Excite/) and provides textual attention control over image generation. - -The abstract from the paper is: - -*Recent text-to-image generative models have demonstrated an unparalleled ability to generate diverse and creative imagery guided by a target text prompt. While revolutionary, current state-of-the-art diffusion models may still fail in generating images that fully convey the semantics in the given text prompt. We analyze the publicly available Stable Diffusion model and assess the existence of catastrophic neglect, where the model fails to generate one or more of the subjects from the input prompt. Moreover, we find that in some cases the model also fails to correctly bind attributes (e.g., colors) to their corresponding subjects. To help mitigate these failure cases, we introduce the concept of Generative Semantic Nursing (GSN), where we seek to intervene in the generative process on the fly during inference time to improve the faithfulness of the generated images. Using an attention-based formulation of GSN, dubbed Attend-and-Excite, we guide the model to refine the cross-attention units to attend to all subject tokens in the text prompt and strengthen - or excite - their activations, encouraging the model to generate all subjects described in the text prompt. We compare our approach to alternative approaches and demonstrate that it conveys the desired concepts more faithfully across a range of text prompts.* - -You can find additional information about Attend-and-Excite on the [project page](https://attendandexcite.github.io/Attend-and-Excite/), the [original codebase](https://github.com/AttendAndExcite/Attend-and-Excite), or try it out in a [demo](https://huggingface.co/spaces/AttendAndExcite/Attend-and-Excite). - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## StableDiffusionAttendAndExcitePipeline - -[[autodoc]] StableDiffusionAttendAndExcitePipeline - - all - - __call__ - -## StableDiffusionPipelineOutput - -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/audioldm.md b/docs/source/en/api/pipelines/audioldm.md deleted file mode 100644 index c8073a14ef0a..000000000000 --- a/docs/source/en/api/pipelines/audioldm.md +++ /dev/null @@ -1,50 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# AudioLDM - -AudioLDM was proposed in [AudioLDM: Text-to-Audio Generation with Latent Diffusion Models](https://huggingface.co/papers/2301.12503) by Haohe Liu et al. Inspired by [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview), AudioLDM -is a text-to-audio _latent diffusion model (LDM)_ that learns continuous audio representations from [CLAP](https://huggingface.co/docs/transformers/main/model_doc/clap) -latents. AudioLDM takes a text prompt as input and predicts the corresponding audio. It can generate text-conditional -sound effects, human speech and music. - -The abstract from the paper is: - -*Text-to-audio (TTA) system has recently gained attention for its ability to synthesize general audio based on text descriptions. However, previous studies in TTA have limited generation quality with high computational costs. In this study, we propose AudioLDM, a TTA system that is built on a latent space to learn the continuous audio representations from contrastive language-audio pretraining (CLAP) latents. The pretrained CLAP models enable us to train LDMs with audio embedding while providing text embedding as a condition during sampling. By learning the latent representations of audio signals and their compositions without modeling the cross-modal relationship, AudioLDM is advantageous in both generation quality and computational efficiency. Trained on AudioCaps with a single GPU, AudioLDM achieves state-of-the-art TTA performance measured by both objective and subjective metrics (e.g., frechet distance). Moreover, AudioLDM is the first TTA system that enables various text-guided audio manipulations (e.g., style transfer) in a zero-shot fashion. Our implementation and demos are available at [this https URL](https://audioldm.github.io/).* - -The original codebase can be found at [haoheliu/AudioLDM](https://github.com/haoheliu/AudioLDM). - -## Tips - -When constructing a prompt, keep in mind: - -* Descriptive prompt inputs work best; you can use adjectives to describe the sound (for example, "high quality" or "clear") and make the prompt context specific (for example, "water stream in a forest" instead of "stream"). -* It's best to use general terms like "cat" or "dog" instead of specific names or abstract objects the model may not be familiar with. - -During inference: - -* The _quality_ of the predicted audio sample can be controlled by the `num_inference_steps` argument; higher steps give higher quality audio at the expense of slower inference. -* The _length_ of the predicted audio sample can be controlled by varying the `audio_length_in_s` argument. - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## AudioLDMPipeline -[[autodoc]] AudioLDMPipeline - - all - - __call__ - -## AudioPipelineOutput -[[autodoc]] pipelines.AudioPipelineOutput diff --git a/docs/source/en/api/pipelines/blip_diffusion.md b/docs/source/en/api/pipelines/blip_diffusion.md deleted file mode 100644 index b9c6ed7b5fbf..000000000000 --- a/docs/source/en/api/pipelines/blip_diffusion.md +++ /dev/null @@ -1,41 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# BLIP-Diffusion - -BLIP-Diffusion was proposed in [BLIP-Diffusion: Pre-trained Subject Representation for Controllable Text-to-Image Generation and Editing](https://huggingface.co/papers/2305.14720). It enables zero-shot subject-driven generation and control-guided zero-shot generation. - - -The abstract from the paper is: - -*Subject-driven text-to-image generation models create novel renditions of an input subject based on text prompts. Existing models suffer from lengthy fine-tuning and difficulties preserving the subject fidelity. To overcome these limitations, we introduce BLIP-Diffusion, a new subject-driven image generation model that supports multimodal control which consumes inputs of subject images and text prompts. Unlike other subject-driven generation models, BLIP-Diffusion introduces a new multimodal encoder which is pre-trained to provide subject representation. We first pre-train the multimodal encoder following BLIP-2 to produce visual representation aligned with the text. Then we design a subject representation learning task which enables a diffusion model to leverage such visual representation and generates new subject renditions. Compared with previous methods such as DreamBooth, our model enables zero-shot subject-driven generation, and efficient fine-tuning for customized subject with up to 20x speedup. We also demonstrate that BLIP-Diffusion can be flexibly combined with existing techniques such as ControlNet and prompt-to-prompt to enable novel subject-driven generation and editing applications. Project page at [this https URL](https://dxli94.github.io/BLIP-Diffusion-website/).* - -The original codebase can be found at [salesforce/LAVIS](https://github.com/salesforce/LAVIS/tree/main/projects/blip-diffusion). You can find the official BLIP-Diffusion checkpoints under the [hf.co/SalesForce](https://hf.co/SalesForce) organization. - -`BlipDiffusionPipeline` and `BlipDiffusionControlNetPipeline` were contributed by [`ayushtues`](https://github.com/ayushtues/). - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - - -## BlipDiffusionPipeline -[[autodoc]] BlipDiffusionPipeline - - all - - __call__ - -## BlipDiffusionControlNetPipeline -[[autodoc]] BlipDiffusionControlNetPipeline - - all - - __call__ diff --git a/docs/source/en/api/pipelines/controlnetxs.md b/docs/source/en/api/pipelines/controlnetxs.md deleted file mode 100644 index d44fb0cf0fdf..000000000000 --- a/docs/source/en/api/pipelines/controlnetxs.md +++ /dev/null @@ -1,43 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# ControlNet-XS - -
- LoRA -
- -ControlNet-XS was introduced in [ControlNet-XS](https://vislearn.github.io/ControlNet-XS/) by Denis Zavadski and Carsten Rother. It is based on the observation that the control model in the [original ControlNet](https://huggingface.co/papers/2302.05543) can be made much smaller and still produce good results. - -Like the original ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process. - -ControlNet-XS generates images with comparable quality to a regular ControlNet, but it is 20-25% faster ([see benchmark](https://github.com/UmerHA/controlnet-xs-benchmark/blob/main/Speed%20Benchmark.ipynb) with StableDiffusion-XL) and uses ~45% less memory. - -Here's the overview from the [project page](https://vislearn.github.io/ControlNet-XS/): - -*With increasing computing capabilities, current model architectures appear to follow the trend of simply upscaling all components without validating the necessity for doing so. In this project we investigate the size and architectural design of ControlNet [Zhang et al., 2023] for controlling the image generation process with stable diffusion-based models. We show that a new architecture with as little as 1% of the parameters of the base model achieves state-of-the art results, considerably better than ControlNet in terms of FID score. Hence we call it ControlNet-XS. We provide the code for controlling StableDiffusion-XL [Podell et al., 2023] (Model B, 48M Parameters) and StableDiffusion 2.1 [Rombach et al. 2022] (Model B, 14M Parameters), all under openrail license.* - -This model was contributed by [UmerHA](https://twitter.com/UmerHAdil). ❤️ - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## StableDiffusionControlNetXSPipeline -[[autodoc]] StableDiffusionControlNetXSPipeline - - all - - __call__ - -## StableDiffusionPipelineOutput -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/controlnetxs_sdxl.md b/docs/source/en/api/pipelines/controlnetxs_sdxl.md deleted file mode 100644 index 7ae0e2a2a178..000000000000 --- a/docs/source/en/api/pipelines/controlnetxs_sdxl.md +++ /dev/null @@ -1,42 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# ControlNet-XS with Stable Diffusion XL - -ControlNet-XS was introduced in [ControlNet-XS](https://vislearn.github.io/ControlNet-XS/) by Denis Zavadski and Carsten Rother. It is based on the observation that the control model in the [original ControlNet](https://huggingface.co/papers/2302.05543) can be made much smaller and still produce good results. - -Like the original ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process. - -ControlNet-XS generates images with comparable quality to a regular ControlNet, but it is 20-25% faster ([see benchmark](https://github.com/UmerHA/controlnet-xs-benchmark/blob/main/Speed%20Benchmark.ipynb)) and uses ~45% less memory. - -Here's the overview from the [project page](https://vislearn.github.io/ControlNet-XS/): - -*With increasing computing capabilities, current model architectures appear to follow the trend of simply upscaling all components without validating the necessity for doing so. In this project we investigate the size and architectural design of ControlNet [Zhang et al., 2023] for controlling the image generation process with stable diffusion-based models. We show that a new architecture with as little as 1% of the parameters of the base model achieves state-of-the art results, considerably better than ControlNet in terms of FID score. Hence we call it ControlNet-XS. We provide the code for controlling StableDiffusion-XL [Podell et al., 2023] (Model B, 48M Parameters) and StableDiffusion 2.1 [Rombach et al. 2022] (Model B, 14M Parameters), all under openrail license.* - -This model was contributed by [UmerHA](https://twitter.com/UmerHAdil). ❤️ - -> [!WARNING] -> 🧪 Many of the SDXL ControlNet checkpoints are experimental, and there is a lot of room for improvement. Feel free to open an [Issue](https://github.com/huggingface/diffusers/issues/new/choose) and leave us feedback on how we can improve! - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## StableDiffusionXLControlNetXSPipeline -[[autodoc]] StableDiffusionXLControlNetXSPipeline - - all - - __call__ - -## StableDiffusionPipelineOutput -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/dance_diffusion.md b/docs/source/en/api/pipelines/dance_diffusion.md deleted file mode 100644 index 0434f6319592..000000000000 --- a/docs/source/en/api/pipelines/dance_diffusion.md +++ /dev/null @@ -1,32 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# Dance Diffusion - -[Dance Diffusion](https://github.com/Harmonai-org/sample-generator) is by Zach Evans. - -Dance Diffusion is the first in a suite of generative audio tools for producers and musicians released by [Harmonai](https://github.com/Harmonai-org). - - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## DanceDiffusionPipeline -[[autodoc]] DanceDiffusionPipeline - - all - - __call__ - -## AudioPipelineOutput -[[autodoc]] pipelines.AudioPipelineOutput diff --git a/docs/source/en/api/pipelines/diffedit.md b/docs/source/en/api/pipelines/diffedit.md deleted file mode 100644 index 670b7bb4fca0..000000000000 --- a/docs/source/en/api/pipelines/diffedit.md +++ /dev/null @@ -1,58 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# DiffEdit - -[DiffEdit: Diffusion-based semantic image editing with mask guidance](https://huggingface.co/papers/2210.11427) is by Guillaume Couairon, Jakob Verbeek, Holger Schwenk, and Matthieu Cord. - -The abstract from the paper is: - -*Image generation has recently seen tremendous advances, with diffusion models allowing to synthesize convincing images for a large variety of text prompts. In this article, we propose DiffEdit, a method to take advantage of text-conditioned diffusion models for the task of semantic image editing, where the goal is to edit an image based on a text query. Semantic image editing is an extension of image generation, with the additional constraint that the generated image should be as similar as possible to a given input image. Current editing methods based on diffusion models usually require to provide a mask, making the task much easier by treating it as a conditional inpainting task. In contrast, our main contribution is able to automatically generate a mask highlighting regions of the input image that need to be edited, by contrasting predictions of a diffusion model conditioned on different text prompts. Moreover, we rely on latent inference to preserve content in those regions of interest and show excellent synergies with mask-based diffusion. DiffEdit achieves state-of-the-art editing performance on ImageNet. In addition, we evaluate semantic image editing in more challenging settings, using images from the COCO dataset as well as text-based generated images.* - -The original codebase can be found at [Xiang-cd/DiffEdit-stable-diffusion](https://github.com/Xiang-cd/DiffEdit-stable-diffusion), and you can try it out in this [demo](https://blog.problemsolversguild.com/posts/2022-11-02-diffedit-implementation.html). - -This pipeline was contributed by [clarencechen](https://github.com/clarencechen). ❤️ - -## Tips - -* The pipeline can generate masks that can be fed into other inpainting pipelines. -* In order to generate an image using this pipeline, both an image mask (source and target prompts can be manually specified or generated, and passed to [`~StableDiffusionDiffEditPipeline.generate_mask`]) -and a set of partially inverted latents (generated using [`~StableDiffusionDiffEditPipeline.invert`]) _must_ be provided as arguments when calling the pipeline to generate the final edited image. -* The function [`~StableDiffusionDiffEditPipeline.generate_mask`] exposes two prompt arguments, `source_prompt` and `target_prompt` -that let you control the locations of the semantic edits in the final image to be generated. Let's say, -you wanted to translate from "cat" to "dog". In this case, the edit direction will be "cat -> dog". To reflect -this in the generated mask, you simply have to set the embeddings related to the phrases including "cat" to -`source_prompt` and "dog" to `target_prompt`. -* When generating partially inverted latents using `invert`, assign a caption or text embedding describing the -overall image to the `prompt` argument to help guide the inverse latent sampling process. In most cases, the -source concept is sufficiently descriptive to yield good results, but feel free to explore alternatives. -* When calling the pipeline to generate the final edited image, assign the source concept to `negative_prompt` -and the target concept to `prompt`. Taking the above example, you simply have to set the embeddings related to -the phrases including "cat" to `negative_prompt` and "dog" to `prompt`. -* If you wanted to reverse the direction in the example above, i.e., "dog -> cat", then it's recommended to: - * Swap the `source_prompt` and `target_prompt` in the arguments to `generate_mask`. - * Change the input prompt in [`~StableDiffusionDiffEditPipeline.invert`] to include "dog". - * Swap the `prompt` and `negative_prompt` in the arguments to call the pipeline to generate the final edited image. -* The source and target prompts, or their corresponding embeddings, can also be automatically generated. Please refer to the [DiffEdit](../../using-diffusers/diffedit) guide for more details. - -## StableDiffusionDiffEditPipeline -[[autodoc]] StableDiffusionDiffEditPipeline - - all - - generate_mask - - invert - - __call__ - -## StableDiffusionPipelineOutput -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/i2vgenxl.md b/docs/source/en/api/pipelines/i2vgenxl.md deleted file mode 100644 index 711a5625f99c..000000000000 --- a/docs/source/en/api/pipelines/i2vgenxl.md +++ /dev/null @@ -1,58 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# I2VGen-XL - -[I2VGen-XL: High-Quality Image-to-Video Synthesis via Cascaded Diffusion Models](https://hf.co/papers/2311.04145.pdf) by Shiwei Zhang, Jiayu Wang, Yingya Zhang, Kang Zhao, Hangjie Yuan, Zhiwu Qin, Xiang Wang, Deli Zhao, and Jingren Zhou. - -The abstract from the paper is: - -*Video synthesis has recently made remarkable strides benefiting from the rapid development of diffusion models. However, it still encounters challenges in terms of semantic accuracy, clarity and spatio-temporal continuity. They primarily arise from the scarcity of well-aligned text-video data and the complex inherent structure of videos, making it difficult for the model to simultaneously ensure semantic and qualitative excellence. In this report, we propose a cascaded I2VGen-XL approach that enhances model performance by decoupling these two factors and ensures the alignment of the input data by utilizing static images as a form of crucial guidance. I2VGen-XL consists of two stages: i) the base stage guarantees coherent semantics and preserves content from input images by using two hierarchical encoders, and ii) the refinement stage enhances the video's details by incorporating an additional brief text and improves the resolution to 1280×720. To improve the diversity, we collect around 35 million single-shot text-video pairs and 6 billion text-image pairs to optimize the model. By this means, I2VGen-XL can simultaneously enhance the semantic accuracy, continuity of details and clarity of generated videos. Through extensive experiments, we have investigated the underlying principles of I2VGen-XL and compared it with current top methods, which can demonstrate its effectiveness on diverse data. The source code and models will be publicly available at [this https URL](https://i2vgen-xl.github.io/).* - -The original codebase can be found [here](https://github.com/ali-vilab/i2vgen-xl/). The model checkpoints can be found [here](https://huggingface.co/ali-vilab/). - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. Also, to know more about reducing the memory usage of this pipeline, refer to the ["Reduce memory usage"] section [here](../../using-diffusers/svd#reduce-memory-usage). - -Sample output with I2VGenXL: - - - - - -
- library. -
- library -
- -## Notes - -* I2VGenXL always uses a `clip_skip` value of 1. This means it leverages the penultimate layer representations from the text encoder of CLIP. -* It can generate videos of quality that is often on par with [Stable Video Diffusion](../../using-diffusers/svd) (SVD). -* Unlike SVD, it additionally accepts text prompts as inputs. -* It can generate higher resolution videos. -* When using the [`DDIMScheduler`] (which is default for this pipeline), less than 50 steps for inference leads to bad results. -* This implementation is 1-stage variant of I2VGenXL. The main figure in the [I2VGen-XL](https://huggingface.co/papers/2311.04145) paper shows a 2-stage variant, however, 1-stage variant works well. See [this discussion](https://github.com/huggingface/diffusers/discussions/7952) for more details. - -## I2VGenXLPipeline -[[autodoc]] I2VGenXLPipeline - - all - - __call__ - -## I2VGenXLPipelineOutput -[[autodoc]] pipelines.i2vgen_xl.pipeline_i2vgen_xl.I2VGenXLPipelineOutput \ No newline at end of file diff --git a/docs/source/en/api/pipelines/musicldm.md b/docs/source/en/api/pipelines/musicldm.md deleted file mode 100644 index 1a83e5932ed4..000000000000 --- a/docs/source/en/api/pipelines/musicldm.md +++ /dev/null @@ -1,52 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# MusicLDM - -MusicLDM was proposed in [MusicLDM: Enhancing Novelty in Text-to-Music Generation Using Beat-Synchronous Mixup Strategies](https://huggingface.co/papers/2308.01546) by Ke Chen, Yusong Wu, Haohe Liu, Marianna Nezhurina, Taylor Berg-Kirkpatrick, Shlomo Dubnov. -MusicLDM takes a text prompt as input and predicts the corresponding music sample. - -Inspired by [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview) and [AudioLDM](https://huggingface.co/docs/diffusers/api/pipelines/audioldm), -MusicLDM is a text-to-music _latent diffusion model (LDM)_ that learns continuous audio representations from [CLAP](https://huggingface.co/docs/transformers/main/model_doc/clap) -latents. - -MusicLDM is trained on a corpus of 466 hours of music data. Beat-synchronous data augmentation strategies are applied to the music samples, both in the time domain and in the latent space. Using beat-synchronous data augmentation strategies encourages the model to interpolate between the training samples, but stay within the domain of the training data. The result is generated music that is more diverse while staying faithful to the corresponding style. - -The abstract of the paper is the following: - -*Diffusion models have shown promising results in cross-modal generation tasks, including text-to-image and text-to-audio generation. However, generating music, as a special type of audio, presents unique challenges due to limited availability of music data and sensitive issues related to copyright and plagiarism. In this paper, to tackle these challenges, we first construct a state-of-the-art text-to-music model, MusicLDM, that adapts Stable Diffusion and AudioLDM architectures to the music domain. We achieve this by retraining the contrastive language-audio pretraining model (CLAP) and the Hifi-GAN vocoder, as components of MusicLDM, on a collection of music data samples. Then, to address the limitations of training data and to avoid plagiarism, we leverage a beat tracking model and propose two different mixup strategies for data augmentation: beat-synchronous audio mixup and beat-synchronous latent mixup, which recombine training audio directly or via a latent embeddings space, respectively. Such mixup strategies encourage the model to interpolate between musical training samples and generate new music within the convex hull of the training data, making the generated music more diverse while still staying faithful to the corresponding style. In addition to popular evaluation metrics, we design several new evaluation metrics based on CLAP score to demonstrate that our proposed MusicLDM and beat-synchronous mixup strategies improve both the quality and novelty of generated music, as well as the correspondence between input text and generated music.* - -This pipeline was contributed by [sanchit-gandhi](https://huggingface.co/sanchit-gandhi). - -## Tips - -When constructing a prompt, keep in mind: - -* Descriptive prompt inputs work best; use adjectives to describe the sound (for example, "high quality" or "clear") and make the prompt context specific where possible (e.g. "melodic techno with a fast beat and synths" works better than "techno"). -* Using a *negative prompt* can significantly improve the quality of the generated audio. Try using a negative prompt of "low quality, average quality". - -During inference: - -* The _quality_ of the generated audio sample can be controlled by the `num_inference_steps` argument; higher steps give higher quality audio at the expense of slower inference. -* Multiple waveforms can be generated in one go: set `num_waveforms_per_prompt` to a value greater than 1 to enable. Automatic scoring will be performed between the generated waveforms and prompt text, and the audios ranked from best to worst accordingly. -* The _length_ of the generated audio sample can be controlled by varying the `audio_length_in_s` argument. - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## MusicLDMPipeline -[[autodoc]] MusicLDMPipeline - - all - - __call__ diff --git a/docs/source/en/api/pipelines/paint_by_example.md b/docs/source/en/api/pipelines/paint_by_example.md deleted file mode 100644 index 02bf6db7265d..000000000000 --- a/docs/source/en/api/pipelines/paint_by_example.md +++ /dev/null @@ -1,39 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# Paint by Example - -[Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://huggingface.co/papers/2211.13227) is by Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, Fang Wen. - -The abstract from the paper is: - -*Language-guided image editing has achieved great success recently. In this paper, for the first time, we investigate exemplar-guided image editing for more precise control. We achieve this goal by leveraging self-supervised training to disentangle and re-organize the source image and the exemplar. However, the naive approach will cause obvious fusing artifacts. We carefully analyze it and propose an information bottleneck and strong augmentations to avoid the trivial solution of directly copying and pasting the exemplar image. Meanwhile, to ensure the controllability of the editing process, we design an arbitrary shape mask for the exemplar image and leverage the classifier-free guidance to increase the similarity to the exemplar image. The whole framework involves a single forward of the diffusion model without any iterative optimization. We demonstrate that our method achieves an impressive performance and enables controllable editing on in-the-wild images with high fidelity.* - -The original codebase can be found at [Fantasy-Studio/Paint-by-Example](https://github.com/Fantasy-Studio/Paint-by-Example), and you can try it out in a [demo](https://huggingface.co/spaces/Fantasy-Studio/Paint-by-Example). - -## Tips - -Paint by Example is supported by the official [Fantasy-Studio/Paint-by-Example](https://huggingface.co/Fantasy-Studio/Paint-by-Example) checkpoint. The checkpoint is warm-started from [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) to inpaint partly masked images conditioned on example and reference images. - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## PaintByExamplePipeline -[[autodoc]] PaintByExamplePipeline - - all - - __call__ - -## StableDiffusionPipelineOutput -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/panorama.md b/docs/source/en/api/pipelines/panorama.md deleted file mode 100644 index b65e05dd0b51..000000000000 --- a/docs/source/en/api/pipelines/panorama.md +++ /dev/null @@ -1,54 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# MultiDiffusion - -
- LoRA -
- -[MultiDiffusion: Fusing Diffusion Paths for Controlled Image Generation](https://huggingface.co/papers/2302.08113) is by Omer Bar-Tal, Lior Yariv, Yaron Lipman, and Tali Dekel. - -The abstract from the paper is: - -*Recent advances in text-to-image generation with diffusion models present transformative capabilities in image quality. However, user controllability of the generated image, and fast adaptation to new tasks still remains an open challenge, currently mostly addressed by costly and long re-training and fine-tuning or ad-hoc adaptations to specific image generation tasks. In this work, we present MultiDiffusion, a unified framework that enables versatile and controllable image generation, using a pre-trained text-to-image diffusion model, without any further training or finetuning. At the center of our approach is a new generation process, based on an optimization task that binds together multiple diffusion generation processes with a shared set of parameters or constraints. We show that MultiDiffusion can be readily applied to generate high quality and diverse images that adhere to user-provided controls, such as desired aspect ratio (e.g., panorama), and spatial guiding signals, ranging from tight segmentation masks to bounding boxes.* - -You can find additional information about MultiDiffusion on the [project page](https://multidiffusion.github.io/), [original codebase](https://github.com/omerbt/MultiDiffusion), and try it out in a [demo](https://huggingface.co/spaces/weizmannscience/MultiDiffusion). - -## Tips - -While calling [`StableDiffusionPanoramaPipeline`], it's possible to specify the `view_batch_size` parameter to be > 1. -For some GPUs with high performance, this can speedup the generation process and increase VRAM usage. - -To generate panorama-like images make sure you pass the width parameter accordingly. We recommend a width value of 2048 which is the default. - -Circular padding is applied to ensure there are no stitching artifacts when working with panoramas to ensure a seamless transition from the rightmost part to the leftmost part. By enabling circular padding (set `circular_padding=True`), the operation applies additional crops after the rightmost point of the image, allowing the model to "see” the transition from the rightmost part to the leftmost part. This helps maintain visual consistency in a 360-degree sense and creates a proper “panorama” that can be viewed using 360-degree panorama viewers. When decoding latents in Stable Diffusion, circular padding is applied to ensure that the decoded latents match in the RGB space. - -For example, without circular padding, there is a stitching artifact (default): -![img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/indoor_%20no_circular_padding.png) - -But with circular padding, the right and the left parts are matching (`circular_padding=True`): -![img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/indoor_%20circular_padding.png) - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## StableDiffusionPanoramaPipeline -[[autodoc]] StableDiffusionPanoramaPipeline - - __call__ - - all - -## StableDiffusionPipelineOutput -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/pia.md b/docs/source/en/api/pipelines/pia.md deleted file mode 100644 index eebfa4d4f8a6..000000000000 --- a/docs/source/en/api/pipelines/pia.md +++ /dev/null @@ -1,168 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# Image-to-Video Generation with PIA (Personalized Image Animator) - -
- LoRA -
- -## Overview - -[PIA: Your Personalized Image Animator via Plug-and-Play Modules in Text-to-Image Models](https://huggingface.co/papers/2312.13964) by Yiming Zhang, Zhening Xing, Yanhong Zeng, Youqing Fang, Kai Chen - -Recent advancements in personalized text-to-image (T2I) models have revolutionized content creation, empowering non-experts to generate stunning images with unique styles. While promising, adding realistic motions into these personalized images by text poses significant challenges in preserving distinct styles, high-fidelity details, and achieving motion controllability by text. In this paper, we present PIA, a Personalized Image Animator that excels in aligning with condition images, achieving motion controllability by text, and the compatibility with various personalized T2I models without specific tuning. To achieve these goals, PIA builds upon a base T2I model with well-trained temporal alignment layers, allowing for the seamless transformation of any personalized T2I model into an image animation model. A key component of PIA is the introduction of the condition module, which utilizes the condition frame and inter-frame affinity as input to transfer appearance information guided by the affinity hint for individual frame synthesis in the latent space. This design mitigates the challenges of appearance-related image alignment within and allows for a stronger focus on aligning with motion-related guidance. - -[Project page](https://pi-animator.github.io/) - -## Available Pipelines - -| Pipeline | Tasks | Demo -|---|---|:---:| -| [PIAPipeline](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pia/pipeline_pia.py) | *Image-to-Video Generation with PIA* | - -## Available checkpoints - -Motion Adapter checkpoints for PIA can be found under the [OpenMMLab org](https://huggingface.co/openmmlab/PIA-condition-adapter). These checkpoints are meant to work with any model based on Stable Diffusion 1.5 - -## Usage example - -PIA works with a MotionAdapter checkpoint and a Stable Diffusion 1.5 model checkpoint. The MotionAdapter is a collection of Motion Modules that are responsible for adding coherent motion across image frames. These modules are applied after the Resnet and Attention blocks in the Stable Diffusion UNet. In addition to the motion modules, PIA also replaces the input convolution layer of the SD 1.5 UNet model with a 9 channel input convolution layer. - -The following example demonstrates how to use PIA to generate a video from a single image. - -```python -import torch -from diffusers import ( - EulerDiscreteScheduler, - MotionAdapter, - PIAPipeline, -) -from diffusers.utils import export_to_gif, load_image - -adapter = MotionAdapter.from_pretrained("openmmlab/PIA-condition-adapter") -pipe = PIAPipeline.from_pretrained("SG161222/Realistic_Vision_V6.0_B1_noVAE", motion_adapter=adapter, torch_dtype=torch.float16) - -pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) -pipe.enable_model_cpu_offload() -pipe.enable_vae_slicing() - -image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png?download=true" -) -image = image.resize((512, 512)) -prompt = "cat in a field" -negative_prompt = "wrong white balance, dark, sketches,worst quality,low quality" - -generator = torch.Generator("cpu").manual_seed(0) -output = pipe(image=image, prompt=prompt, generator=generator) -frames = output.frames[0] -export_to_gif(frames, "pia-animation.gif") -``` - -Here are some sample outputs: - - - - - -
- cat in a field. -
- cat in a field -
- - -> [!TIP] -> If you plan on using a scheduler that can clip samples, make sure to disable it by setting `clip_sample=False` in the scheduler as this can also have an adverse effect on generated samples. Additionally, the PIA checkpoints can be sensitive to the beta schedule of the scheduler. We recommend setting this to `linear`. - -## Using FreeInit - -[FreeInit: Bridging Initialization Gap in Video Diffusion Models](https://huggingface.co/papers/2312.07537) by Tianxing Wu, Chenyang Si, Yuming Jiang, Ziqi Huang, Ziwei Liu. - -FreeInit is an effective method that improves temporal consistency and overall quality of videos generated using video-diffusion-models without any addition training. It can be applied to PIA, AnimateDiff, ModelScope, VideoCrafter and various other video generation models seamlessly at inference time, and works by iteratively refining the latent-initialization noise. More details can be found it the paper. - -The following example demonstrates the usage of FreeInit. - -```python -import torch -from diffusers import ( - DDIMScheduler, - MotionAdapter, - PIAPipeline, -) -from diffusers.utils import export_to_gif, load_image - -adapter = MotionAdapter.from_pretrained("openmmlab/PIA-condition-adapter") -pipe = PIAPipeline.from_pretrained("SG161222/Realistic_Vision_V6.0_B1_noVAE", motion_adapter=adapter) - -# enable FreeInit -# Refer to the enable_free_init documentation for a full list of configurable parameters -pipe.enable_free_init(method="butterworth", use_fast_sampling=True) - -# Memory saving options -pipe.enable_model_cpu_offload() -pipe.enable_vae_slicing() - -pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) -image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png?download=true" -) -image = image.resize((512, 512)) -prompt = "cat in a field" -negative_prompt = "wrong white balance, dark, sketches,worst quality,low quality" - -generator = torch.Generator("cpu").manual_seed(0) - -output = pipe(image=image, prompt=prompt, generator=generator) -frames = output.frames[0] -export_to_gif(frames, "pia-freeinit-animation.gif") -``` - - - - - -
- cat in a field. -
- cat in a field -
- - -> [!WARNING] -> FreeInit is not really free - the improved quality comes at the cost of extra computation. It requires sampling a few extra times depending on the `num_iters` parameter that is set when enabling it. Setting the `use_fast_sampling` parameter to `True` can improve the overall performance (at the cost of lower quality compared to when `use_fast_sampling=False` but still better results than vanilla video generation models). - -## PIAPipeline - -[[autodoc]] PIAPipeline - - all - - __call__ - - enable_freeu - - disable_freeu - - enable_free_init - - disable_free_init - - enable_vae_slicing - - disable_vae_slicing - - enable_vae_tiling - - disable_vae_tiling - -## PIAPipelineOutput - -[[autodoc]] pipelines.pia.PIAPipelineOutput \ No newline at end of file diff --git a/docs/source/en/api/pipelines/self_attention_guidance.md b/docs/source/en/api/pipelines/self_attention_guidance.md deleted file mode 100644 index 8d411598ae6d..000000000000 --- a/docs/source/en/api/pipelines/self_attention_guidance.md +++ /dev/null @@ -1,35 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# Self-Attention Guidance - -[Improving Sample Quality of Diffusion Models Using Self-Attention Guidance](https://huggingface.co/papers/2210.00939) is by Susung Hong et al. - -The abstract from the paper is: - -*Denoising diffusion models (DDMs) have attracted attention for their exceptional generation quality and diversity. This success is largely attributed to the use of class- or text-conditional diffusion guidance methods, such as classifier and classifier-free guidance. In this paper, we present a more comprehensive perspective that goes beyond the traditional guidance methods. From this generalized perspective, we introduce novel condition- and training-free strategies to enhance the quality of generated images. As a simple solution, blur guidance improves the suitability of intermediate samples for their fine-scale information and structures, enabling diffusion models to generate higher quality samples with a moderate guidance scale. Improving upon this, Self-Attention Guidance (SAG) uses the intermediate self-attention maps of diffusion models to enhance their stability and efficacy. Specifically, SAG adversarially blurs only the regions that diffusion models attend to at each iteration and guides them accordingly. Our experimental results show that our SAG improves the performance of various diffusion models, including ADM, IDDPM, Stable Diffusion, and DiT. Moreover, combining SAG with conventional guidance methods leads to further improvement.* - -You can find additional information about Self-Attention Guidance on the [project page](https://ku-cvlab.github.io/Self-Attention-Guidance), [original codebase](https://github.com/KU-CVLAB/Self-Attention-Guidance), and try it out in a [demo](https://huggingface.co/spaces/susunghong/Self-Attention-Guidance) or [notebook](https://colab.research.google.com/github/SusungHong/Self-Attention-Guidance/blob/main/SAG_Stable.ipynb). - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## StableDiffusionSAGPipeline -[[autodoc]] StableDiffusionSAGPipeline - - __call__ - - all - -## StableDiffusionOutput -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/semantic_stable_diffusion.md b/docs/source/en/api/pipelines/semantic_stable_diffusion.md deleted file mode 100644 index dda428e80f8f..000000000000 --- a/docs/source/en/api/pipelines/semantic_stable_diffusion.md +++ /dev/null @@ -1,35 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# Semantic Guidance - -Semantic Guidance for Diffusion Models was proposed in [SEGA: Instructing Text-to-Image Models using Semantic Guidance](https://huggingface.co/papers/2301.12247) and provides strong semantic control over image generation. -Small changes to the text prompt usually result in entirely different output images. However, with SEGA a variety of changes to the image are enabled that can be controlled easily and intuitively, while staying true to the original image composition. - -The abstract from the paper is: - -*Text-to-image diffusion models have recently received a lot of interest for their astonishing ability to produce high-fidelity images from text only. However, achieving one-shot generation that aligns with the user's intent is nearly impossible, yet small changes to the input prompt often result in very different images. This leaves the user with little semantic control. To put the user in control, we show how to interact with the diffusion process to flexibly steer it along semantic directions. This semantic guidance (SEGA) generalizes to any generative architecture using classifier-free guidance. More importantly, it allows for subtle and extensive edits, changes in composition and style, as well as optimizing the overall artistic conception. We demonstrate SEGA's effectiveness on both latent and pixel-based diffusion models such as Stable Diffusion, Paella, and DeepFloyd-IF using a variety of tasks, thus providing strong evidence for its versatility, flexibility, and improvements over existing methods.* - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## SemanticStableDiffusionPipeline -[[autodoc]] SemanticStableDiffusionPipeline - - all - - __call__ - -## SemanticStableDiffusionPipelineOutput -[[autodoc]] pipelines.semantic_stable_diffusion.pipeline_output.SemanticStableDiffusionPipelineOutput - - all diff --git a/docs/source/en/api/pipelines/stable_diffusion/gligen.md b/docs/source/en/api/pipelines/stable_diffusion/gligen.md deleted file mode 100644 index c8297fb7b3de..000000000000 --- a/docs/source/en/api/pipelines/stable_diffusion/gligen.md +++ /dev/null @@ -1,59 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# GLIGEN (Grounded Language-to-Image Generation) - -The GLIGEN model was created by researchers and engineers from [University of Wisconsin-Madison, Columbia University, and Microsoft](https://github.com/gligen/GLIGEN). The [`StableDiffusionGLIGENPipeline`] and [`StableDiffusionGLIGENTextImagePipeline`] can generate photorealistic images conditioned on grounding inputs. Along with text and bounding boxes with [`StableDiffusionGLIGENPipeline`], if input images are given, [`StableDiffusionGLIGENTextImagePipeline`] can insert objects described by text at the region defined by bounding boxes. Otherwise, it'll generate an image described by the caption/prompt and insert objects described by text at the region defined by bounding boxes. It's trained on COCO2014D and COCO2014CD datasets, and the model uses a frozen CLIP ViT-L/14 text encoder to condition itself on grounding inputs. - -The abstract from the [paper](https://huggingface.co/papers/2301.07093) is: - -*Large-scale text-to-image diffusion models have made amazing advances. However, the status quo is to use text input alone, which can impede controllability. In this work, we propose GLIGEN, Grounded-Language-to-Image Generation, a novel approach that builds upon and extends the functionality of existing pre-trained text-to-image diffusion models by enabling them to also be conditioned on grounding inputs. To preserve the vast concept knowledge of the pre-trained model, we freeze all of its weights and inject the grounding information into new trainable layers via a gated mechanism. Our model achieves open-world grounded text2img generation with caption and bounding box condition inputs, and the grounding ability generalizes well to novel spatial configurations and concepts. GLIGEN’s zeroshot performance on COCO and LVIS outperforms existing supervised layout-to-image baselines by a large margin.* - -> [!TIP] -> Make sure to check out the Stable Diffusion [Tips](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality and how to reuse pipeline components efficiently! -> -> If you want to use one of the official checkpoints for a task, explore the [gligen](https://huggingface.co/gligen) Hub organizations! - -[`StableDiffusionGLIGENPipeline`] was contributed by [Nikhil Gajendrakumar](https://github.com/nikhil-masterful) and [`StableDiffusionGLIGENTextImagePipeline`] was contributed by [Nguyễn Công Tú Anh](https://github.com/tuanh123789). - -## StableDiffusionGLIGENPipeline - -[[autodoc]] StableDiffusionGLIGENPipeline - - all - - __call__ - - enable_vae_slicing - - disable_vae_slicing - - enable_vae_tiling - - disable_vae_tiling - - enable_model_cpu_offload - - prepare_latents - - enable_fuser - -## StableDiffusionGLIGENTextImagePipeline - -[[autodoc]] StableDiffusionGLIGENTextImagePipeline - - all - - __call__ - - enable_vae_slicing - - disable_vae_slicing - - enable_vae_tiling - - disable_vae_tiling - - enable_model_cpu_offload - - prepare_latents - - enable_fuser - -## StableDiffusionPipelineOutput - -[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput diff --git a/docs/source/en/api/pipelines/stable_diffusion/ldm3d_diffusion.md b/docs/source/en/api/pipelines/stable_diffusion/ldm3d_diffusion.md deleted file mode 100644 index 15f9f1db851f..000000000000 --- a/docs/source/en/api/pipelines/stable_diffusion/ldm3d_diffusion.md +++ /dev/null @@ -1,59 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# Text-to-(RGB, depth) - -
- LoRA -
- -LDM3D was proposed in [LDM3D: Latent Diffusion Model for 3D](https://huggingface.co/papers/2305.10853) by Gabriela Ben Melech Stan, Diana Wofk, Scottie Fox, Alex Redden, Will Saxton, Jean Yu, Estelle Aflalo, Shao-Yen Tseng, Fabio Nonato, Matthias Muller, and Vasudev Lal. LDM3D generates an image and a depth map from a given text prompt unlike the existing text-to-image diffusion models such as [Stable Diffusion](./overview) which only generates an image. With almost the same number of parameters, LDM3D achieves to create a latent space that can compress both the RGB images and the depth maps. - -Two checkpoints are available for use: -- [ldm3d-original](https://huggingface.co/Intel/ldm3d). The original checkpoint used in the [paper](https://huggingface.co/papers/2305.10853) -- [ldm3d-4c](https://huggingface.co/Intel/ldm3d-4c). The new version of LDM3D using 4 channels inputs instead of 6-channels inputs and finetuned on higher resolution images. - - -The abstract from the paper is: - -*This research paper proposes a Latent Diffusion Model for 3D (LDM3D) that generates both image and depth map data from a given text prompt, allowing users to generate RGBD images from text prompts. The LDM3D model is fine-tuned on a dataset of tuples containing an RGB image, depth map and caption, and validated through extensive experiments. We also develop an application called DepthFusion, which uses the generated RGB images and depth maps to create immersive and interactive 360-degree-view experiences using TouchDesigner. This technology has the potential to transform a wide range of industries, from entertainment and gaming to architecture and design. Overall, this paper presents a significant contribution to the field of generative AI and computer vision, and showcases the potential of LDM3D and DepthFusion to revolutionize content creation and digital experiences. A short video summarizing the approach can be found at [this url](https://t.ly/tdi2).* - -> [!TIP] -> Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! - -## StableDiffusionLDM3DPipeline - -[[autodoc]] pipelines.stable_diffusion_ldm3d.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline - - all - - __call__ - - -## LDM3DPipelineOutput - -[[autodoc]] pipelines.stable_diffusion_ldm3d.pipeline_stable_diffusion_ldm3d.LDM3DPipelineOutput - - all - - __call__ - -# Upscaler - -[LDM3D-VR](https://huggingface.co/papers/2311.03226) is an extended version of LDM3D. - -The abstract from the paper is: -*Latent diffusion models have proven to be state-of-the-art in the creation and manipulation of visual outputs. However, as far as we know, the generation of depth maps jointly with RGB is still limited. We introduce LDM3D-VR, a suite of diffusion models targeting virtual reality development that includes LDM3D-pano and LDM3D-SR. These models enable the generation of panoramic RGBD based on textual prompts and the upscaling of low-resolution inputs to high-resolution RGBD, respectively. Our models are fine-tuned from existing pretrained models on datasets containing panoramic/high-resolution RGB images, depth maps and captions. Both models are evaluated in comparison to existing related methods* - -Two checkpoints are available for use: -- [ldm3d-pano](https://huggingface.co/Intel/ldm3d-pano). This checkpoint enables the generation of panoramic images and requires the StableDiffusionLDM3DPipeline pipeline to be used. -- [ldm3d-sr](https://huggingface.co/Intel/ldm3d-sr). This checkpoint enables the upscaling of RGB and depth images. Can be used in cascade after the original LDM3D pipeline using the StableDiffusionUpscaleLDM3DPipeline from communauty pipeline. - diff --git a/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_safe.md b/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_safe.md deleted file mode 100644 index 151b0b8a6507..000000000000 --- a/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_safe.md +++ /dev/null @@ -1,61 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# Safe Stable Diffusion - -Safe Stable Diffusion was proposed in [Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models](https://huggingface.co/papers/2211.05105) and mitigates inappropriate degeneration from Stable Diffusion models because they're trained on unfiltered web-crawled datasets. For instance Stable Diffusion may unexpectedly generate nudity, violence, images depicting self-harm, and otherwise offensive content. Safe Stable Diffusion is an extension of Stable Diffusion that drastically reduces this type of content. - -The abstract from the paper is: - -*Text-conditioned image generation models have recently achieved astonishing results in image quality and text alignment and are consequently employed in a fast-growing number of applications. Since they are highly data-driven, relying on billion-sized datasets randomly scraped from the internet, they also suffer, as we demonstrate, from degenerated and biased human behavior. In turn, they may even reinforce such biases. To help combat these undesired side effects, we present safe latent diffusion (SLD). Specifically, to measure the inappropriate degeneration due to unfiltered and imbalanced training sets, we establish a novel image generation test bed-inappropriate image prompts (I2P)-containing dedicated, real-world image-to-text prompts covering concepts such as nudity and violence. As our exhaustive empirical evaluation demonstrates, the introduced SLD removes and suppresses inappropriate image parts during the diffusion process, with no additional training required and no adverse effect on overall image quality or text alignment.* - -## Tips - -Use the `safety_concept` property of [`StableDiffusionPipelineSafe`] to check and edit the current safety concept: - -```python ->>> from diffusers import StableDiffusionPipelineSafe - ->>> pipeline = StableDiffusionPipelineSafe.from_pretrained("AIML-TUDA/stable-diffusion-safe") ->>> pipeline.safety_concept -'an image showing hate, harassment, violence, suffering, humiliation, harm, suicide, sexual, nudity, bodily fluids, blood, obscene gestures, illegal activity, drug use, theft, vandalism, weapons, child abuse, brutality, cruelty' -``` -For each image generation the active concept is also contained in [`StableDiffusionSafePipelineOutput`]. - -There are 4 configurations (`SafetyConfig.WEAK`, `SafetyConfig.MEDIUM`, `SafetyConfig.STRONG`, and `SafetyConfig.MAX`) that can be applied: - -```python ->>> from diffusers import StableDiffusionPipelineSafe ->>> from diffusers.pipelines.stable_diffusion_safe import SafetyConfig - ->>> pipeline = StableDiffusionPipelineSafe.from_pretrained("AIML-TUDA/stable-diffusion-safe") ->>> prompt = "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c. leyendecker" ->>> out = pipeline(prompt=prompt, **SafetyConfig.MAX) -``` - -> [!TIP] -> Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! - -## StableDiffusionPipelineSafe - -[[autodoc]] StableDiffusionPipelineSafe - - all - - __call__ - -## StableDiffusionSafePipelineOutput - -[[autodoc]] pipelines.stable_diffusion_safe.StableDiffusionSafePipelineOutput - - all - - __call__ diff --git a/docs/source/en/api/pipelines/text_to_video.md b/docs/source/en/api/pipelines/text_to_video.md deleted file mode 100644 index d9f6d8e722ac..000000000000 --- a/docs/source/en/api/pipelines/text_to_video.md +++ /dev/null @@ -1,191 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# Text-to-video - -
- LoRA -
- -[ModelScope Text-to-Video Technical Report](https://huggingface.co/papers/2308.06571) is by Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, Shiwei Zhang. - -The abstract from the paper is: - -*This paper introduces ModelScopeT2V, a text-to-video synthesis model that evolves from a text-to-image synthesis model (i.e., Stable Diffusion). ModelScopeT2V incorporates spatio-temporal blocks to ensure consistent frame generation and smooth movement transitions. The model could adapt to varying frame numbers during training and inference, rendering it suitable for both image-text and video-text datasets. ModelScopeT2V brings together three components (i.e., VQGAN, a text encoder, and a denoising UNet), totally comprising 1.7 billion parameters, in which 0.5 billion parameters are dedicated to temporal capabilities. The model demonstrates superior performance over state-of-the-art methods across three evaluation metrics. The code and an online demo are available at https://modelscope.cn/models/damo/text-to-video-synthesis/summary.* - -You can find additional information about Text-to-Video on the [project page](https://modelscope.cn/models/damo/text-to-video-synthesis/summary), [original codebase](https://github.com/modelscope/modelscope/), and try it out in a [demo](https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis). Official checkpoints can be found at [damo-vilab](https://huggingface.co/damo-vilab) and [cerspense](https://huggingface.co/cerspense). - -## Usage example - -### `text-to-video-ms-1.7b` - -Let's start by generating a short video with the default length of 16 frames (2s at 8 fps): - -```python -import torch -from diffusers import DiffusionPipeline -from diffusers.utils import export_to_video - -pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") -pipe = pipe.to("cuda") - -prompt = "Spiderman is surfing" -video_frames = pipe(prompt).frames[0] -video_path = export_to_video(video_frames) -video_path -``` - -Diffusers supports different optimization techniques to improve the latency -and memory footprint of a pipeline. Since videos are often more memory-heavy than images, -we can enable CPU offloading and VAE slicing to keep the memory footprint at bay. - -Let's generate a video of 8 seconds (64 frames) on the same GPU using CPU offloading and VAE slicing: - -```python -import torch -from diffusers import DiffusionPipeline -from diffusers.utils import export_to_video - -pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") -pipe.enable_model_cpu_offload() - -# memory optimization -pipe.enable_vae_slicing() - -prompt = "Darth Vader surfing a wave" -video_frames = pipe(prompt, num_frames=64).frames[0] -video_path = export_to_video(video_frames) -video_path -``` - -It just takes **7 GBs of GPU memory** to generate the 64 video frames using PyTorch 2.0, "fp16" precision and the techniques mentioned above. - -We can also use a different scheduler easily, using the same method we'd use for Stable Diffusion: - -```python -import torch -from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler -from diffusers.utils import export_to_video - -pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") -pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) -pipe.enable_model_cpu_offload() - -prompt = "Spiderman is surfing" -video_frames = pipe(prompt, num_inference_steps=25).frames[0] -video_path = export_to_video(video_frames) -video_path -``` - -Here are some sample outputs: - - - - - - -
- An astronaut riding a horse. -
- An astronaut riding a horse. -
- Darth vader surfing in waves. -
- Darth vader surfing in waves. -
- -### `cerspense/zeroscope_v2_576w` & `cerspense/zeroscope_v2_XL` - -Zeroscope are watermark-free model and have been trained on specific sizes such as `576x320` and `1024x576`. -One should first generate a video using the lower resolution checkpoint [`cerspense/zeroscope_v2_576w`](https://huggingface.co/cerspense/zeroscope_v2_576w) with [`TextToVideoSDPipeline`], -which can then be upscaled using [`VideoToVideoSDPipeline`] and [`cerspense/zeroscope_v2_XL`](https://huggingface.co/cerspense/zeroscope_v2_XL). - - -```py -import torch -from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler -from diffusers.utils import export_to_video -from PIL import Image - -pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) -pipe.enable_model_cpu_offload() - -# memory optimization -pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) -pipe.enable_vae_slicing() - -prompt = "Darth Vader surfing a wave" -video_frames = pipe(prompt, num_frames=24).frames[0] -video_path = export_to_video(video_frames) -video_path -``` - -Now the video can be upscaled: - -```py -pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16) -pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) -pipe.enable_model_cpu_offload() - -# memory optimization -pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) -pipe.enable_vae_slicing() - -video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames] - -video_frames = pipe(prompt, video=video, strength=0.6).frames[0] -video_path = export_to_video(video_frames) -video_path -``` - -Here are some sample outputs: - - - - - -
- Darth vader surfing in waves. -
- Darth vader surfing in waves. -
- -## Tips - -Video generation is memory-intensive and one way to reduce your memory usage is to set `enable_forward_chunking` on the pipeline's UNet so you don't run the entire feedforward layer at once. Breaking it up into chunks in a loop is more efficient. - -Check out the [Text or image-to-video](../../using-diffusers/text-img2vid) guide for more details about how certain parameters can affect video generation and how to optimize inference by reducing memory usage. - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## TextToVideoSDPipeline -[[autodoc]] TextToVideoSDPipeline - - all - - __call__ - -## VideoToVideoSDPipeline -[[autodoc]] VideoToVideoSDPipeline - - all - - __call__ - -## TextToVideoSDPipelineOutput -[[autodoc]] pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput diff --git a/docs/source/en/api/pipelines/text_to_video_zero.md b/docs/source/en/api/pipelines/text_to_video_zero.md deleted file mode 100644 index 50e7620760f3..000000000000 --- a/docs/source/en/api/pipelines/text_to_video_zero.md +++ /dev/null @@ -1,306 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# Text2Video-Zero - -
- LoRA -
- -[Text2Video-Zero: Text-to-Image Diffusion Models are Zero-Shot Video Generators](https://huggingface.co/papers/2303.13439) is by Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, [Zhangyang Wang](https://www.ece.utexas.edu/people/faculty/atlas-wang), Shant Navasardyan, [Humphrey Shi](https://www.humphreyshi.com). - -Text2Video-Zero enables zero-shot video generation using either: -1. A textual prompt -2. A prompt combined with guidance from poses or edges -3. Video Instruct-Pix2Pix (instruction-guided video editing) - -Results are temporally consistent and closely follow the guidance and textual prompts. - -![teaser-img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/t2v_zero_teaser.png) - -The abstract from the paper is: - -*Recent text-to-video generation approaches rely on computationally heavy training and require large-scale video datasets. In this paper, we introduce a new task of zero-shot text-to-video generation and propose a low-cost approach (without any training or optimization) by leveraging the power of existing text-to-image synthesis methods (e.g., Stable Diffusion), making them suitable for the video domain. -Our key modifications include (i) enriching the latent codes of the generated frames with motion dynamics to keep the global scene and the background time consistent; and (ii) reprogramming frame-level self-attention using a new cross-frame attention of each frame on the first frame, to preserve the context, appearance, and identity of the foreground object. -Experiments show that this leads to low overhead, yet high-quality and remarkably consistent video generation. Moreover, our approach is not limited to text-to-video synthesis but is also applicable to other tasks such as conditional and content-specialized video generation, and Video Instruct-Pix2Pix, i.e., instruction-guided video editing. -As experiments show, our method performs comparably or sometimes better than recent approaches, despite not being trained on additional video data.* - -You can find additional information about Text2Video-Zero on the [project page](https://text2video-zero.github.io/), [paper](https://huggingface.co/papers/2303.13439), and [original codebase](https://github.com/Picsart-AI-Research/Text2Video-Zero). - -## Usage example - -### Text-To-Video - -To generate a video from prompt, run the following Python code: -```python -import torch -from diffusers import TextToVideoZeroPipeline -import imageio - -model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" -pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") - -prompt = "A panda is playing guitar on times square" -result = pipe(prompt=prompt).images -result = [(r * 255).astype("uint8") for r in result] -imageio.mimsave("video.mp4", result, fps=4) -``` -You can change these parameters in the pipeline call: -* Motion field strength (see the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1): - * `motion_field_strength_x` and `motion_field_strength_y`. Default: `motion_field_strength_x=12`, `motion_field_strength_y=12` -* `T` and `T'` (see the [paper](https://huggingface.co/papers/2303.13439), Sect. 3.3.1) - * `t0` and `t1` in the range `{0, ..., num_inference_steps}`. Default: `t0=45`, `t1=48` -* Video length: - * `video_length`, the number of frames video_length to be generated. Default: `video_length=8` - -We can also generate longer videos by doing the processing in a chunk-by-chunk manner: -```python -import torch -from diffusers import TextToVideoZeroPipeline -import numpy as np - -model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" -pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") -seed = 0 -video_length = 24 #24 ÷ 4fps = 6 seconds -chunk_size = 8 -prompt = "A panda is playing guitar on times square" - -# Generate the video chunk-by-chunk -result = [] -chunk_ids = np.arange(0, video_length, chunk_size - 1) -generator = torch.Generator(device="cuda") -for i in range(len(chunk_ids)): - print(f"Processing chunk {i + 1} / {len(chunk_ids)}") - ch_start = chunk_ids[i] - ch_end = video_length if i == len(chunk_ids) - 1 else chunk_ids[i + 1] - # Attach the first frame for Cross Frame Attention - frame_ids = [0] + list(range(ch_start, ch_end)) - # Fix the seed for the temporal consistency - generator.manual_seed(seed) - output = pipe(prompt=prompt, video_length=len(frame_ids), generator=generator, frame_ids=frame_ids) - result.append(output.images[1:]) - -# Concatenate chunks and save -result = np.concatenate(result) -result = [(r * 255).astype("uint8") for r in result] -imageio.mimsave("video.mp4", result, fps=4) -``` - - -- #### SDXL Support -In order to use the SDXL model when generating a video from prompt, use the `TextToVideoZeroSDXLPipeline` pipeline: - -```python -import torch -from diffusers import TextToVideoZeroSDXLPipeline - -model_id = "stabilityai/stable-diffusion-xl-base-1.0" -pipe = TextToVideoZeroSDXLPipeline.from_pretrained( - model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True -).to("cuda") -``` - -### Text-To-Video with Pose Control -To generate a video from prompt with additional pose control - -1. Download a demo video - - ```python - from huggingface_hub import hf_hub_download - - filename = "__assets__/poses_skeleton_gifs/dance1_corr.mp4" - repo_id = "PAIR/Text2Video-Zero" - video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) - ``` - - -2. Read video containing extracted pose images - ```python - from PIL import Image - import imageio - - reader = imageio.get_reader(video_path, "ffmpeg") - frame_count = 8 - pose_images = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] - ``` - To extract pose from actual video, read [ControlNet documentation](controlnet). - -3. Run `StableDiffusionControlNetPipeline` with our custom attention processor - - ```python - import torch - from diffusers import StableDiffusionControlNetPipeline, ControlNetModel - from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor - - model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16) - pipe = StableDiffusionControlNetPipeline.from_pretrained( - model_id, controlnet=controlnet, torch_dtype=torch.float16 - ).to("cuda") - - # Set the attention processor - pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) - pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) - - # fix latents for all frames - latents = torch.randn((1, 4, 64, 64), device="cuda", dtype=torch.float16).repeat(len(pose_images), 1, 1, 1) - - prompt = "Darth Vader dancing in a desert" - result = pipe(prompt=[prompt] * len(pose_images), image=pose_images, latents=latents).images - imageio.mimsave("video.mp4", result, fps=4) - ``` -- #### SDXL Support - - Since our attention processor also works with SDXL, it can be utilized to generate a video from prompt using ControlNet models powered by SDXL: - ```python - import torch - from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel - from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor - - controlnet_model_id = 'thibaud/controlnet-openpose-sdxl-1.0' - model_id = 'stabilityai/stable-diffusion-xl-base-1.0' - - controlnet = ControlNetModel.from_pretrained(controlnet_model_id, torch_dtype=torch.float16) - pipe = StableDiffusionControlNetPipeline.from_pretrained( - model_id, controlnet=controlnet, torch_dtype=torch.float16 - ).to('cuda') - - # Set the attention processor - pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) - pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) - - # fix latents for all frames - latents = torch.randn((1, 4, 128, 128), device="cuda", dtype=torch.float16).repeat(len(pose_images), 1, 1, 1) - - prompt = "Darth Vader dancing in a desert" - result = pipe(prompt=[prompt] * len(pose_images), image=pose_images, latents=latents).images - imageio.mimsave("video.mp4", result, fps=4) - ``` - -### Text-To-Video with Edge Control - -To generate a video from prompt with additional Canny edge control, follow the same steps described above for pose-guided generation using [Canny edge ControlNet model](https://huggingface.co/lllyasviel/sd-controlnet-canny). - - -### Video Instruct-Pix2Pix - -To perform text-guided video editing (with [InstructPix2Pix](pix2pix)): - -1. Download a demo video - - ```python - from huggingface_hub import hf_hub_download - - filename = "__assets__/pix2pix video/camel.mp4" - repo_id = "PAIR/Text2Video-Zero" - video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) - ``` - -2. Read video from path - ```python - from PIL import Image - import imageio - - reader = imageio.get_reader(video_path, "ffmpeg") - frame_count = 8 - video = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] - ``` - -3. Run `StableDiffusionInstructPix2PixPipeline` with our custom attention processor - ```python - import torch - from diffusers import StableDiffusionInstructPix2PixPipeline - from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor - - model_id = "timbrooks/instruct-pix2pix" - pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") - pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=3)) - - prompt = "make it Van Gogh Starry Night style" - result = pipe(prompt=[prompt] * len(video), image=video).images - imageio.mimsave("edited_video.mp4", result, fps=4) - ``` - - -### DreamBooth specialization - -Methods **Text-To-Video**, **Text-To-Video with Pose Control** and **Text-To-Video with Edge Control** -can run with custom [DreamBooth](../../training/dreambooth) models, as shown below for -[Canny edge ControlNet model](https://huggingface.co/lllyasviel/sd-controlnet-canny) and -[Avatar style DreamBooth](https://huggingface.co/PAIR/text2video-zero-controlnet-canny-avatar) model: - -1. Download a demo video - - ```python - from huggingface_hub import hf_hub_download - - filename = "__assets__/canny_videos_mp4/girl_turning.mp4" - repo_id = "PAIR/Text2Video-Zero" - video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) - ``` - -2. Read video from path - ```python - from PIL import Image - import imageio - - reader = imageio.get_reader(video_path, "ffmpeg") - frame_count = 8 - canny_edges = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] - ``` - -3. Run `StableDiffusionControlNetPipeline` with custom trained DreamBooth model - ```python - import torch - from diffusers import StableDiffusionControlNetPipeline, ControlNetModel - from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor - - # set model id to custom model - model_id = "PAIR/text2video-zero-controlnet-canny-avatar" - controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) - pipe = StableDiffusionControlNetPipeline.from_pretrained( - model_id, controlnet=controlnet, torch_dtype=torch.float16 - ).to("cuda") - - # Set the attention processor - pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) - pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) - - # fix latents for all frames - latents = torch.randn((1, 4, 64, 64), device="cuda", dtype=torch.float16).repeat(len(canny_edges), 1, 1, 1) - - prompt = "oil painting of a beautiful girl avatar style" - result = pipe(prompt=[prompt] * len(canny_edges), image=canny_edges, latents=latents).images - imageio.mimsave("video.mp4", result, fps=4) - ``` - -You can filter out some available DreamBooth-trained models with [this link](https://huggingface.co/models?search=dreambooth). - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## TextToVideoZeroPipeline -[[autodoc]] TextToVideoZeroPipeline - - all - - __call__ - -## TextToVideoZeroSDXLPipeline -[[autodoc]] TextToVideoZeroSDXLPipeline - - all - - __call__ - -## TextToVideoPipelineOutput -[[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput diff --git a/docs/source/en/api/pipelines/unclip.md b/docs/source/en/api/pipelines/unclip.md deleted file mode 100644 index 7c5c2b0d9ab9..000000000000 --- a/docs/source/en/api/pipelines/unclip.md +++ /dev/null @@ -1,37 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# unCLIP - -[Hierarchical Text-Conditional Image Generation with CLIP Latents](https://huggingface.co/papers/2204.06125) is by Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, Mark Chen. The unCLIP model in 🤗 Diffusers comes from kakaobrain's [karlo](https://github.com/kakaobrain/karlo). - -The abstract from the paper is following: - -*Contrastive models like CLIP have been shown to learn robust representations of images that capture both semantics and style. To leverage these representations for image generation, we propose a two-stage model: a prior that generates a CLIP image embedding given a text caption, and a decoder that generates an image conditioned on the image embedding. We show that explicitly generating image representations improves image diversity with minimal loss in photorealism and caption similarity. Our decoders conditioned on image representations can also produce variations of an image that preserve both its semantics and style, while varying the non-essential details absent from the image representation. Moreover, the joint embedding space of CLIP enables language-guided image manipulations in a zero-shot fashion. We use diffusion models for the decoder and experiment with both autoregressive and diffusion models for the prior, finding that the latter are computationally more efficient and produce higher-quality samples.* - -You can find lucidrains' DALL-E 2 recreation at [lucidrains/DALLE2-pytorch](https://github.com/lucidrains/DALLE2-pytorch). - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## UnCLIPPipeline -[[autodoc]] UnCLIPPipeline - - all - - __call__ - -## UnCLIPImageVariationPipeline -[[autodoc]] UnCLIPImageVariationPipeline - - all - - __call__ - -## ImagePipelineOutput -[[autodoc]] pipelines.ImagePipelineOutput diff --git a/docs/source/en/api/pipelines/unidiffuser.md b/docs/source/en/api/pipelines/unidiffuser.md deleted file mode 100644 index 2ff700e4b8be..000000000000 --- a/docs/source/en/api/pipelines/unidiffuser.md +++ /dev/null @@ -1,206 +0,0 @@ - - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -# UniDiffuser - -
- LoRA -
- -The UniDiffuser model was proposed in [One Transformer Fits All Distributions in Multi-Modal Diffusion at Scale](https://huggingface.co/papers/2303.06555) by Fan Bao, Shen Nie, Kaiwen Xue, Chongxuan Li, Shi Pu, Yaole Wang, Gang Yue, Yue Cao, Hang Su, Jun Zhu. - -The abstract from the paper is: - -*This paper proposes a unified diffusion framework (dubbed UniDiffuser) to fit all distributions relevant to a set of multi-modal data in one model. Our key insight is -- learning diffusion models for marginal, conditional, and joint distributions can be unified as predicting the noise in the perturbed data, where the perturbation levels (i.e. timesteps) can be different for different modalities. Inspired by the unified view, UniDiffuser learns all distributions simultaneously with a minimal modification to the original diffusion model -- perturbs data in all modalities instead of a single modality, inputs individual timesteps in different modalities, and predicts the noise of all modalities instead of a single modality. UniDiffuser is parameterized by a transformer for diffusion models to handle input types of different modalities. Implemented on large-scale paired image-text data, UniDiffuser is able to perform image, text, text-to-image, image-to-text, and image-text pair generation by setting proper timesteps without additional overhead. In particular, UniDiffuser is able to produce perceptually realistic samples in all tasks and its quantitative results (e.g., the FID and CLIP score) are not only superior to existing general-purpose models but also comparable to the bespoken models (e.g., Stable Diffusion and DALL-E 2) in representative tasks (e.g., text-to-image generation).* - -You can find the original codebase at [thu-ml/unidiffuser](https://github.com/thu-ml/unidiffuser) and additional checkpoints at [thu-ml](https://huggingface.co/thu-ml). - -> [!WARNING] -> There is currently an issue on PyTorch 1.X where the output images are all black or the pixel values become `NaNs`. This issue can be mitigated by switching to PyTorch 2.X. - -This pipeline was contributed by [dg845](https://github.com/dg845). ❤️ - -## Usage Examples - -Because the UniDiffuser model is trained to model the joint distribution of (image, text) pairs, it is capable of performing a diverse range of generation tasks: - -### Unconditional Image and Text Generation - -Unconditional generation (where we start from only latents sampled from a standard Gaussian prior) from a [`UniDiffuserPipeline`] will produce a (image, text) pair: - -```python -import torch - -from diffusers import UniDiffuserPipeline - -device = "cuda" -model_id_or_path = "thu-ml/unidiffuser-v1" -pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) -pipe.to(device) - -# Unconditional image and text generation. The generation task is automatically inferred. -sample = pipe(num_inference_steps=20, guidance_scale=8.0) -image = sample.images[0] -text = sample.text[0] -image.save("unidiffuser_joint_sample_image.png") -print(text) -``` - -This is also called "joint" generation in the UniDiffuser paper, since we are sampling from the joint image-text distribution. - -Note that the generation task is inferred from the inputs used when calling the pipeline. -It is also possible to manually specify the unconditional generation task ("mode") manually with [`UniDiffuserPipeline.set_joint_mode`]: - -```python -# Equivalent to the above. -pipe.set_joint_mode() -sample = pipe(num_inference_steps=20, guidance_scale=8.0) -``` - -When the mode is set manually, subsequent calls to the pipeline will use the set mode without attempting to infer the mode. -You can reset the mode with [`UniDiffuserPipeline.reset_mode`], after which the pipeline will once again infer the mode. - -You can also generate only an image or only text (which the UniDiffuser paper calls "marginal" generation since we sample from the marginal distribution of images and text, respectively): - -```python -# Unlike other generation tasks, image-only and text-only generation don't use classifier-free guidance -# Image-only generation -pipe.set_image_mode() -sample_image = pipe(num_inference_steps=20).images[0] -# Text-only generation -pipe.set_text_mode() -sample_text = pipe(num_inference_steps=20).text[0] -``` - -### Text-to-Image Generation - -UniDiffuser is also capable of sampling from conditional distributions; that is, the distribution of images conditioned on a text prompt or the distribution of texts conditioned on an image. -Here is an example of sampling from the conditional image distribution (text-to-image generation or text-conditioned image generation): - -```python -import torch - -from diffusers import UniDiffuserPipeline - -device = "cuda" -model_id_or_path = "thu-ml/unidiffuser-v1" -pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) -pipe.to(device) - -# Text-to-image generation -prompt = "an elephant under the sea" - -sample = pipe(prompt=prompt, num_inference_steps=20, guidance_scale=8.0) -t2i_image = sample.images[0] -t2i_image -``` - -The `text2img` mode requires that either an input `prompt` or `prompt_embeds` be supplied. You can set the `text2img` mode manually with [`UniDiffuserPipeline.set_text_to_image_mode`]. - -### Image-to-Text Generation - -Similarly, UniDiffuser can also produce text samples given an image (image-to-text or image-conditioned text generation): - -```python -import torch - -from diffusers import UniDiffuserPipeline -from diffusers.utils import load_image - -device = "cuda" -model_id_or_path = "thu-ml/unidiffuser-v1" -pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) -pipe.to(device) - -# Image-to-text generation -image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg" -init_image = load_image(image_url).resize((512, 512)) - -sample = pipe(image=init_image, num_inference_steps=20, guidance_scale=8.0) -i2t_text = sample.text[0] -print(i2t_text) -``` - -The `img2text` mode requires that an input `image` be supplied. You can set the `img2text` mode manually with [`UniDiffuserPipeline.set_image_to_text_mode`]. - -### Image Variation - -The UniDiffuser authors suggest performing image variation through a "round-trip" generation method, where given an input image, we first perform an image-to-text generation, and then perform a text-to-image generation on the outputs of the first generation. -This produces a new image which is semantically similar to the input image: - -```python -import torch - -from diffusers import UniDiffuserPipeline -from diffusers.utils import load_image - -device = "cuda" -model_id_or_path = "thu-ml/unidiffuser-v1" -pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) -pipe.to(device) - -# Image variation can be performed with an image-to-text generation followed by a text-to-image generation: -# 1. Image-to-text generation -image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg" -init_image = load_image(image_url).resize((512, 512)) - -sample = pipe(image=init_image, num_inference_steps=20, guidance_scale=8.0) -i2t_text = sample.text[0] -print(i2t_text) - -# 2. Text-to-image generation -sample = pipe(prompt=i2t_text, num_inference_steps=20, guidance_scale=8.0) -final_image = sample.images[0] -final_image.save("unidiffuser_image_variation_sample.png") -``` - -### Text Variation - -Similarly, text variation can be performed on an input prompt with a text-to-image generation followed by a image-to-text generation: - -```python -import torch - -from diffusers import UniDiffuserPipeline - -device = "cuda" -model_id_or_path = "thu-ml/unidiffuser-v1" -pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) -pipe.to(device) - -# Text variation can be performed with a text-to-image generation followed by a image-to-text generation: -# 1. Text-to-image generation -prompt = "an elephant under the sea" - -sample = pipe(prompt=prompt, num_inference_steps=20, guidance_scale=8.0) -t2i_image = sample.images[0] -t2i_image.save("unidiffuser_text2img_sample_image.png") - -# 2. Image-to-text generation -sample = pipe(image=t2i_image, num_inference_steps=20, guidance_scale=8.0) -final_prompt = sample.text[0] -print(final_prompt) -``` - -> [!TIP] -> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. - -## UniDiffuserPipeline -[[autodoc]] UniDiffuserPipeline - - all - - __call__ - -## ImageTextPipelineOutput -[[autodoc]] pipelines.ImageTextPipelineOutput diff --git a/docs/source/en/api/pipelines/wuerstchen.md b/docs/source/en/api/pipelines/wuerstchen.md deleted file mode 100644 index 2be3631d8456..000000000000 --- a/docs/source/en/api/pipelines/wuerstchen.md +++ /dev/null @@ -1,170 +0,0 @@ - - -# Würstchen - -> [!WARNING] -> This pipeline is deprecated but it can still be used. However, we won't test the pipeline anymore and won't accept any changes to it. If you run into any issues, reinstall the last Diffusers version that supported this model. - -
- LoRA -
- - - -[Wuerstchen: An Efficient Architecture for Large-Scale Text-to-Image Diffusion Models](https://huggingface.co/papers/2306.00637) is by Pablo Pernias, Dominic Rampas, Mats L. Richter and Christopher Pal and Marc Aubreville. - -The abstract from the paper is: - -*We introduce Würstchen, a novel architecture for text-to-image synthesis that combines competitive performance with unprecedented cost-effectiveness for large-scale text-to-image diffusion models. A key contribution of our work is to develop a latent diffusion technique in which we learn a detailed but extremely compact semantic image representation used to guide the diffusion process. This highly compressed representation of an image provides much more detailed guidance compared to latent representations of language and this significantly reduces the computational requirements to achieve state-of-the-art results. Our approach also improves the quality of text-conditioned image generation based on our user preference study. The training requirements of our approach consists of 24,602 A100-GPU hours - compared to Stable Diffusion 2.1's 200,000 GPU hours. Our approach also requires less training data to achieve these results. Furthermore, our compact latent representations allows us to perform inference over twice as fast, slashing the usual costs and carbon footprint of a state-of-the-art (SOTA) diffusion model significantly, without compromising the end performance. In a broader comparison against SOTA models our approach is substantially more efficient and compares favorably in terms of image quality. We believe that this work motivates more emphasis on the prioritization of both performance and computational accessibility.* - -## Würstchen Overview -Würstchen is a diffusion model, whose text-conditional model works in a highly compressed latent space of images. Why is this important? Compressing data can reduce computational costs for both training and inference by magnitudes. Training on 1024x1024 images is way more expensive than training on 32x32. Usually, other works make use of a relatively small compression, in the range of 4x - 8x spatial compression. Würstchen takes this to an extreme. Through its novel design, we achieve a 42x spatial compression. This was unseen before because common methods fail to faithfully reconstruct detailed images after 16x spatial compression. Würstchen employs a two-stage compression, what we call Stage A and Stage B. Stage A is a VQGAN, and Stage B is a Diffusion Autoencoder (more details can be found in the [paper](https://huggingface.co/papers/2306.00637)). A third model, Stage C, is learned in that highly compressed latent space. This training requires fractions of the compute used for current top-performing models, while also allowing cheaper and faster inference. - -## Würstchen v2 comes to Diffusers - -After the initial paper release, we have improved numerous things in the architecture, training and sampling, making Würstchen competitive to current state-of-the-art models in many ways. We are excited to release this new version together with Diffusers. Here is a list of the improvements. - -- Higher resolution (1024x1024 up to 2048x2048) -- Faster inference -- Multi Aspect Resolution Sampling -- Better quality - - -We are releasing 3 checkpoints for the text-conditional image generation model (Stage C). Those are: - -- v2-base -- v2-aesthetic -- **(default)** v2-interpolated (50% interpolation between v2-base and v2-aesthetic) - -We recommend using v2-interpolated, as it has a nice touch of both photorealism and aesthetics. Use v2-base for finetunings as it does not have a style bias and use v2-aesthetic for very artistic generations. -A comparison can be seen here: - - - -## Text-to-Image Generation - -For the sake of usability, Würstchen can be used with a single pipeline. This pipeline can be used as follows: - -```python -import torch -from diffusers import AutoPipelineForText2Image -from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS - -pipe = AutoPipelineForText2Image.from_pretrained("warp-ai/wuerstchen", torch_dtype=torch.float16).to("cuda") - -caption = "Anthropomorphic cat dressed as a fire fighter" -images = pipe( - caption, - width=1024, - height=1536, - prior_timesteps=DEFAULT_STAGE_C_TIMESTEPS, - prior_guidance_scale=4.0, - num_images_per_prompt=2, -).images -``` - -For explanation purposes, we can also initialize the two main pipelines of Würstchen individually. Würstchen consists of 3 stages: Stage C, Stage B, Stage A. They all have different jobs and work only together. When generating text-conditional images, Stage C will first generate the latents in a very compressed latent space. This is what happens in the `prior_pipeline`. Afterwards, the generated latents will be passed to Stage B, which decompresses the latents into a bigger latent space of a VQGAN. These latents can then be decoded by Stage A, which is a VQGAN, into the pixel-space. Stage B & Stage A are both encapsulated in the `decoder_pipeline`. For more details, take a look at the [paper](https://huggingface.co/papers/2306.00637). - -```python -import torch -from diffusers import WuerstchenDecoderPipeline, WuerstchenPriorPipeline -from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS - -device = "cuda" -dtype = torch.float16 -num_images_per_prompt = 2 - -prior_pipeline = WuerstchenPriorPipeline.from_pretrained( - "warp-ai/wuerstchen-prior", torch_dtype=dtype -).to(device) -decoder_pipeline = WuerstchenDecoderPipeline.from_pretrained( - "warp-ai/wuerstchen", torch_dtype=dtype -).to(device) - -caption = "Anthropomorphic cat dressed as a fire fighter" -negative_prompt = "" - -prior_output = prior_pipeline( - prompt=caption, - height=1024, - width=1536, - timesteps=DEFAULT_STAGE_C_TIMESTEPS, - negative_prompt=negative_prompt, - guidance_scale=4.0, - num_images_per_prompt=num_images_per_prompt, -) -decoder_output = decoder_pipeline( - image_embeddings=prior_output.image_embeddings, - prompt=caption, - negative_prompt=negative_prompt, - guidance_scale=0.0, - output_type="pil", -).images[0] -decoder_output -``` - -## Speed-Up Inference -You can make use of `torch.compile` function and gain a speed-up of about 2-3x: - -```python -prior_pipeline.prior = torch.compile(prior_pipeline.prior, mode="reduce-overhead", fullgraph=True) -decoder_pipeline.decoder = torch.compile(decoder_pipeline.decoder, mode="reduce-overhead", fullgraph=True) -``` - -## Limitations - -- Due to the high compression employed by Würstchen, generations can lack a good amount -of detail. To our human eye, this is especially noticeable in faces, hands etc. -- **Images can only be generated in 128-pixel steps**, e.g. the next higher resolution -after 1024x1024 is 1152x1152 -- The model lacks the ability to render correct text in images -- The model often does not achieve photorealism -- Difficult compositional prompts are hard for the model - -The original codebase, as well as experimental ideas, can be found at [dome272/Wuerstchen](https://github.com/dome272/Wuerstchen). - - -## WuerstchenCombinedPipeline - -[[autodoc]] WuerstchenCombinedPipeline - - all - - __call__ - -## WuerstchenPriorPipeline - -[[autodoc]] WuerstchenPriorPipeline - - all - - __call__ - -## WuerstchenPriorPipelineOutput - -[[autodoc]] pipelines.wuerstchen.pipeline_wuerstchen_prior.WuerstchenPriorPipelineOutput - -## WuerstchenDecoderPipeline - -[[autodoc]] WuerstchenDecoderPipeline - - all - - __call__ - -## Citation - -```bibtex - @misc{pernias2023wuerstchen, - title={Wuerstchen: An Efficient Architecture for Large-Scale Text-to-Image Diffusion Models}, - author={Pablo Pernias and Dominic Rampas and Mats L. Richter and Christopher J. Pal and Marc Aubreville}, - year={2023}, - eprint={2306.00637}, - archivePrefix={arXiv}, - primaryClass={cs.CV} - } -``` diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index 1fc0914fe09e..442bff4c06e7 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -177,6 +177,14 @@ "apply_taylorseer_cache", ] ) + _import_structure["image_processor"] = [ + "IPAdapterMaskProcessor", + "InpaintProcessor", + "PixArtImageProcessor", + "VaeImageProcessor", + "VaeImageProcessorLDM3D", + ] + _import_structure["video_processor"] = ["VideoProcessor"] _import_structure["models"].extend( [ "AllegroTransformer3DModel", @@ -945,6 +953,13 @@ apply_pyramid_attention_broadcast, apply_taylorseer_cache, ) + from .image_processor import ( + InpaintProcessor, + IPAdapterMaskProcessor, + PixArtImageProcessor, + VaeImageProcessor, + VaeImageProcessorLDM3D, + ) from .models import ( AllegroTransformer3DModel, AsymmetricAutoencoderKL, @@ -1141,6 +1156,7 @@ VQDiffusionScheduler, ) from .training_utils import EMAModel + from .video_processor import VideoProcessor try: if not (is_torch_available() and is_scipy_available()): From cb4d6766985d55a2b7fcdcbf9c6e8bd603feb7bb Mon Sep 17 00:00:00 2001 From: stevhliu Date: Wed, 18 Feb 2026 11:18:47 -0800 Subject: [PATCH 7/8] update links --- docs/source/en/api/pipelines/overview.md | 20 ------------------- docs/source/en/training/wuerstchen.md | 5 ----- .../using-diffusers/controlling_generation.md | 10 +++++----- 3 files changed, 5 insertions(+), 30 deletions(-) diff --git a/docs/source/en/api/pipelines/overview.md b/docs/source/en/api/pipelines/overview.md index 22fcf560eaca..c034c0a84fcc 100644 --- a/docs/source/en/api/pipelines/overview.md +++ b/docs/source/en/api/pipelines/overview.md @@ -27,13 +27,9 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an | Pipeline | Tasks | |---|---| -| [aMUSEd](amused) | text2image | | [AnimateDiff](animatediff) | text2video | -| [Attend-and-Excite](attend_and_excite) | text2image | -| [AudioLDM](audioldm) | text2audio | | [AudioLDM2](audioldm2) | text2audio | | [AuraFlow](aura_flow) | text2image | -| [BLIP Diffusion](blip_diffusion) | text2image | | [Bria 3.2](bria_3_2) | text2image | | [CogVideoX](cogvideox) | text2video | | [Consistency Models](consistency_models) | unconditional image generation | @@ -42,17 +38,12 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an | [ControlNet with Hunyuan-DiT](controlnet_hunyuandit) | text2image | | [ControlNet with Stable Diffusion 3](controlnet_sd3) | text2image | | [ControlNet with Stable Diffusion XL](controlnet_sdxl) | text2image | -| [ControlNet-XS](controlnetxs) | text2image | -| [ControlNet-XS with Stable Diffusion XL](controlnetxs_sdxl) | text2image | -| [Dance Diffusion](dance_diffusion) | unconditional audio generation | | [DDIM](ddim) | unconditional image generation | | [DDPM](ddpm) | unconditional image generation | | [DeepFloyd IF](deepfloyd_if) | text2image, image2image, inpainting, super-resolution | -| [DiffEdit](diffedit) | inpainting | | [DiT](dit) | text2image | | [Flux](flux) | text2image | | [Hunyuan-DiT](hunyuandit) | text2image | -| [I2VGen-XL](i2vgenxl) | image2video | | [InstructPix2Pix](pix2pix) | image editing | | [Kandinsky 2.1](kandinsky) | text2image, image2image, inpainting, interpolation | | [Kandinsky 2.2](kandinsky_v22) | text2image, image2image, inpainting | @@ -64,15 +55,9 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an | [LEDITS++](ledits_pp) | image editing | | [Lumina-T2X](lumina) | text2image | | [Marigold](marigold) | depth-estimation, normals-estimation, intrinsic-decomposition | -| [MultiDiffusion](panorama) | text2image | -| [MusicLDM](musicldm) | text2audio | | [PAG](pag) | text2image | -| [Paint by Example](paint_by_example) | inpainting | -| [PIA](pia) | image2video | | [PixArt-α](pixart) | text2image | | [PixArt-Σ](pixart_sigma) | text2image | -| [Self-Attention Guidance](self_attention_guidance) | text2image | -| [Semantic Guidance](semantic_stable_diffusion) | text2image | | [Shap-E](shap_e) | text-to-3D, image-to-3D | | [Stable Audio](stable_audio) | text2audio | | [Stable Cascade](stable_cascade) | text2image | @@ -81,12 +66,7 @@ The table below lists all the pipelines currently available in 🤗 Diffusers an | [Stable Diffusion XL Turbo](stable_diffusion/sdxl_turbo) | text2image, image2image, inpainting | | [Stable unCLIP](stable_unclip) | text2image, image variation | | [T2I-Adapter](stable_diffusion/adapter) | text2image | -| [Text2Video](text_to_video) | text2video, video2video | -| [Text2Video-Zero](text_to_video_zero) | text2video | -| [unCLIP](unclip) | text2image, image variation | -| [UniDiffuser](unidiffuser) | text2image, image2text, image variation, text variation, unconditional image generation, unconditional audio generation | | [Value-guided planning](value_guided_sampling) | value guided sampling | -| [Wuerstchen](wuerstchen) | text2image | | [VisualCloze](visualcloze) | text2image, image2image, subject driven generation, inpainting, style transfer, image restoration, image editing, [depth,normal,edge,pose]2image, [depth,normal,edge,pose]-estimation, virtual try-on, image relighting | ## DiffusionPipeline diff --git a/docs/source/en/training/wuerstchen.md b/docs/source/en/training/wuerstchen.md index 1c362879a6f4..c8418df1989e 100644 --- a/docs/source/en/training/wuerstchen.md +++ b/docs/source/en/training/wuerstchen.md @@ -173,8 +173,3 @@ images = pipeline( ).images ``` -## Next steps - -Congratulations on training a Wuerstchen model! To learn more about how to use your new model, the following may be helpful: - -- Take a look at the [Wuerstchen](../api/pipelines/wuerstchen#text-to-image-generation) API documentation to learn more about how to use the pipeline for text-to-image generation and its limitations. diff --git a/docs/source/en/using-diffusers/controlling_generation.md b/docs/source/en/using-diffusers/controlling_generation.md index b7b0ea491949..f69e54730a2e 100644 --- a/docs/source/en/using-diffusers/controlling_generation.md +++ b/docs/source/en/using-diffusers/controlling_generation.md @@ -74,7 +74,7 @@ InstructPix2Pix has been explicitly trained to work well with [InstructGPT](http [Paper](https://huggingface.co/papers/2301.13826) -[Attend and Excite](../api/pipelines/attend_and_excite) allows subjects in the prompt to be faithfully represented in the final image. +Attend and Excite allows subjects in the prompt to be faithfully represented in the final image. A set of token indices are given as input, corresponding to the subjects in the prompt that need to be present in the image. During denoising, each token index is guaranteed to have a minimum attention threshold for at least one patch of the image. The intermediate latents are iteratively optimized during the denoising process to strengthen the attention of the most neglected subject token until the attention threshold is passed for all subject tokens. @@ -84,7 +84,7 @@ Like Pix2Pix Zero, Attend and Excite also involves a mini optimization loop (lea [Paper](https://huggingface.co/papers/2301.12247) -[SEGA](../api/pipelines/semantic_stable_diffusion) allows applying or removing one or more concepts from an image. The strength of the concept can also be controlled. I.e. the smile concept can be used to incrementally increase or decrease the smile of a portrait. +SEGA allows applying or removing one or more concepts from an image. The strength of the concept can also be controlled. I.e. the smile concept can be used to incrementally increase or decrease the smile of a portrait. Similar to how classifier free guidance provides guidance via empty prompt inputs, SEGA provides guidance on conceptual prompts. Multiple of these conceptual prompts can be applied simultaneously. Each conceptual prompt can either add or remove their concept depending on if the guidance is applied positively or negatively. @@ -94,7 +94,7 @@ Unlike Pix2Pix Zero or Attend and Excite, SEGA directly interacts with the diffu [Paper](https://huggingface.co/papers/2210.00939) -[Self-attention Guidance](../api/pipelines/self_attention_guidance) improves the general quality of images. +Self-attention Guidance improves the general quality of images. SAG provides guidance from predictions not conditioned on high-frequency details to fully conditioned images. The high frequency details are extracted out of the UNet self-attention maps. @@ -110,7 +110,7 @@ It conditions on a monocular depth estimate of the original image. [Paper](https://huggingface.co/papers/2302.08113) -[MultiDiffusion Panorama](../api/pipelines/panorama) defines a new generation process over a pre-trained diffusion model. This process binds together multiple diffusion generation methods that can be readily applied to generate high quality and diverse images. Results adhere to user-provided controls, such as desired aspect ratio (e.g., panorama), and spatial guiding signals, ranging from tight segmentation masks to bounding boxes. +MultiDiffusion Panorama defines a new generation process over a pre-trained diffusion model. This process binds together multiple diffusion generation methods that can be readily applied to generate high quality and diverse images. Results adhere to user-provided controls, such as desired aspect ratio (e.g., panorama), and spatial guiding signals, ranging from tight segmentation masks to bounding boxes. MultiDiffusion Panorama allows to generate high-quality images at arbitrary aspect ratios (e.g., panoramas). ## Fine-tuning your own models @@ -156,7 +156,7 @@ concept(s) of interest. [Paper](https://huggingface.co/papers/2210.11427) -[DiffEdit](../api/pipelines/diffedit) allows for semantic editing of input images along with +DiffEdit allows for semantic editing of input images along with input prompts while preserving the original input images as much as possible. ## T2I-Adapter From c911d1fd458baf3a23e76b98865ee9c3bcb251a2 Mon Sep 17 00:00:00 2001 From: stevhliu Date: Wed, 18 Feb 2026 11:19:31 -0800 Subject: [PATCH 8/8] fix --- src/diffusers/utils/dummy_pt_objects.py | 90 +++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/src/diffusers/utils/dummy_pt_objects.py b/src/diffusers/utils/dummy_pt_objects.py index 4e402921aa5f..e6993c6b609b 100644 --- a/src/diffusers/utils/dummy_pt_objects.py +++ b/src/diffusers/utils/dummy_pt_objects.py @@ -311,6 +311,81 @@ def apply_taylorseer_cache(*args, **kwargs): requires_backends(apply_taylorseer_cache, ["torch"]) +class InpaintProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class IPAdapterMaskProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class PixArtImageProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class VaeImageProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + +class VaeImageProcessorLDM3D(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + class AllegroTransformer3DModel(metaclass=DummyObject): _backends = ["torch"] @@ -3011,3 +3086,18 @@ def from_config(cls, *args, **kwargs): @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) + + +class VideoProcessor(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"])