Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	Commit 
							
							·
						
						7391723
	
1
								Parent(s):
							
							c1f6c62
								
Improve mem usage (#70)
Browse files- Improve mem usage (2b0be71ae4a84219bf76feddb29e3a339d2ad2f9)
- improve (dcab47337e68e8397244156ea1f8216685eef465)
- improve (f7bc5db379dbda268146e36d32388273bdd603ca)
- improve (cf27adf590f95fe663b87c0e7df23f535a3d9b68)
- improve (fb9b203ded8b0e9018fc1566ea3dca01e4273b31)
- improve (69cc446845536f34f18ba354dd8515b3ce28b8a9)
- improve (70b9430b16cb8d20a76feb590f8920dcdf2ff938)
- make style (679bb8131dff16a0d5d957af456a886ea1ec1e18)
Co-authored-by: Patrick von Platen <[email protected]>
- app.py +7 -5
 - requirements.txt +4 -2
 
    	
        app.py
    CHANGED
    
    | 
         @@ -1,5 +1,4 @@ 
     | 
|
| 1 | 
         
             
            import torch
         
     | 
| 2 | 
         
            -
            import os
         
     | 
| 3 | 
         
             
            import gradio as gr
         
     | 
| 4 | 
         
             
            from PIL import Image
         
     | 
| 5 | 
         
             
            import random
         
     | 
| 
         @@ -31,11 +30,13 @@ main_pipe = StableDiffusionControlNetPipeline.from_pretrained( 
     | 
|
| 31 | 
         
             
                safety_checker=None,
         
     | 
| 32 | 
         
             
                torch_dtype=torch.float16,
         
     | 
| 33 | 
         
             
            ).to("cuda")
         
     | 
| 
         | 
|
| 34 | 
         
             
            #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
         
     | 
| 35 | 
         
             
            #main_pipe.unet.to(memory_format=torch.channels_last)
         
     | 
| 36 | 
         
             
            #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
         
     | 
| 37 | 
         
             
            #model_id = "stabilityai/sd-x2-latent-upscaler"
         
     | 
| 38 | 
         
            -
            image_pipe = StableDiffusionControlNetImg2ImgPipeline 
     | 
| 
         | 
|
| 39 | 
         
             
            #image_pipe.unet = torch.compile(image_pipe.unet, mode="reduce-overhead", fullgraph=True)
         
     | 
| 40 | 
         
             
            #upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
         
     | 
| 41 | 
         
             
            #upscaler.to("cuda")
         
     | 
| 
         @@ -110,9 +111,11 @@ def inference( 
     | 
|
| 110 | 
         | 
| 111 | 
         
             
                # Rest of your existing code
         
     | 
| 112 | 
         
             
                control_image_small = center_crop_resize(control_image)
         
     | 
| 
         | 
|
| 
         | 
|
| 113 | 
         
             
                main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
         
     | 
| 114 | 
         
             
                my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
         
     | 
| 115 | 
         
            -
                generator = torch.manual_seed(my_seed)
         
     | 
| 116 | 
         | 
| 117 | 
         
             
                out = main_pipe(
         
     | 
| 118 | 
         
             
                    prompt=prompt,
         
     | 
| 
         @@ -126,7 +129,6 @@ def inference( 
     | 
|
| 126 | 
         
             
                    num_inference_steps=15,
         
     | 
| 127 | 
         
             
                    output_type="latent"
         
     | 
| 128 | 
         
             
                )
         
     | 
| 129 | 
         
            -
                control_image_large = center_crop_resize(control_image, (1024, 1024))
         
     | 
| 130 | 
         
             
                upscaled_latents = upscale(out, "nearest-exact", 2)
         
     | 
| 131 | 
         
             
                out_image = image_pipe(
         
     | 
| 132 | 
         
             
                    prompt=prompt,
         
     | 
| 
         @@ -201,4 +203,4 @@ with gr.Blocks(css=css) as app: 
     | 
|
| 201 | 
         
             
            app.queue(max_size=20)
         
     | 
| 202 | 
         | 
| 203 | 
         
             
            if __name__ == "__main__":
         
     | 
| 204 | 
         
            -
                app.launch()
         
     | 
| 
         | 
|
| 1 | 
         
             
            import torch
         
     | 
| 
         | 
|
| 2 | 
         
             
            import gradio as gr
         
     | 
| 3 | 
         
             
            from PIL import Image
         
     | 
| 4 | 
         
             
            import random
         
     | 
| 
         | 
|
| 30 | 
         
             
                safety_checker=None,
         
     | 
| 31 | 
         
             
                torch_dtype=torch.float16,
         
     | 
| 32 | 
         
             
            ).to("cuda")
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
             
            #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
         
     | 
| 35 | 
         
             
            #main_pipe.unet.to(memory_format=torch.channels_last)
         
     | 
| 36 | 
         
             
            #main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
         
     | 
| 37 | 
         
             
            #model_id = "stabilityai/sd-x2-latent-upscaler"
         
     | 
| 38 | 
         
            +
            image_pipe = StableDiffusionControlNetImg2ImgPipeline(**main_pipe.components)
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
             
            #image_pipe.unet = torch.compile(image_pipe.unet, mode="reduce-overhead", fullgraph=True)
         
     | 
| 41 | 
         
             
            #upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
         
     | 
| 42 | 
         
             
            #upscaler.to("cuda")
         
     | 
| 
         | 
|
| 111 | 
         | 
| 112 | 
         
             
                # Rest of your existing code
         
     | 
| 113 | 
         
             
                control_image_small = center_crop_resize(control_image)
         
     | 
| 114 | 
         
            +
                control_image_large = center_crop_resize(control_image, (1024, 1024))
         
     | 
| 115 | 
         
            +
             
     | 
| 116 | 
         
             
                main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
         
     | 
| 117 | 
         
             
                my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
         
     | 
| 118 | 
         
            +
                generator = torch.Generator(device="cuda").manual_seed(my_seed)
         
     | 
| 119 | 
         | 
| 120 | 
         
             
                out = main_pipe(
         
     | 
| 121 | 
         
             
                    prompt=prompt,
         
     | 
| 
         | 
|
| 129 | 
         
             
                    num_inference_steps=15,
         
     | 
| 130 | 
         
             
                    output_type="latent"
         
     | 
| 131 | 
         
             
                )
         
     | 
| 
         | 
|
| 132 | 
         
             
                upscaled_latents = upscale(out, "nearest-exact", 2)
         
     | 
| 133 | 
         
             
                out_image = image_pipe(
         
     | 
| 134 | 
         
             
                    prompt=prompt,
         
     | 
| 
         | 
|
| 203 | 
         
             
            app.queue(max_size=20)
         
     | 
| 204 | 
         | 
| 205 | 
         
             
            if __name__ == "__main__":
         
     | 
| 206 | 
         
            +
                app.launch()
         
     | 
    	
        requirements.txt
    CHANGED
    
    | 
         @@ -1,9 +1,11 @@ 
     | 
|
| 1 | 
         
             
            diffusers
         
     | 
| 2 | 
         
             
            transformers
         
     | 
| 3 | 
         
             
            accelerate
         
     | 
| 4 | 
         
            -
            torch
         
     | 
| 5 | 
         
             
            xformers
         
     | 
| 6 | 
         
             
            gradio
         
     | 
| 7 | 
         
             
            Pillow
         
     | 
| 8 | 
         
             
            qrcode
         
     | 
| 9 | 
         
            -
            filelock
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
             
            diffusers
         
     | 
| 2 | 
         
             
            transformers
         
     | 
| 3 | 
         
             
            accelerate
         
     | 
| 
         | 
|
| 4 | 
         
             
            xformers
         
     | 
| 5 | 
         
             
            gradio
         
     | 
| 6 | 
         
             
            Pillow
         
     | 
| 7 | 
         
             
            qrcode
         
     | 
| 8 | 
         
            +
            filelock
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            --extra-index-url https://download.pytorch.org/whl/cu118
         
     | 
| 11 | 
         
            +
            torch
         
     |