Upload folder using huggingface_hub
Browse files- .gitattributes +1 -1
- README.md +142 -0
- config.json +42 -0
- diffusion_pytorch_model.bin +3 -0
- images/original.png +0 -0
- images/output.png +3 -0
- sd.png +0 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -25,7 +25,6 @@ | |
| 25 | 
             
            *.safetensors filter=lfs diff=lfs merge=lfs -text
         | 
| 26 | 
             
            saved_model/**/* filter=lfs diff=lfs merge=lfs -text
         | 
| 27 | 
             
            *.tar.* filter=lfs diff=lfs merge=lfs -text
         | 
| 28 | 
            -
            *.tar filter=lfs diff=lfs merge=lfs -text
         | 
| 29 | 
             
            *.tflite filter=lfs diff=lfs merge=lfs -text
         | 
| 30 | 
             
            *.tgz filter=lfs diff=lfs merge=lfs -text
         | 
| 31 | 
             
            *.wasm filter=lfs diff=lfs merge=lfs -text
         | 
| @@ -33,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text | |
| 33 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
|  | 
|  | |
| 25 | 
             
            *.safetensors filter=lfs diff=lfs merge=lfs -text
         | 
| 26 | 
             
            saved_model/**/* filter=lfs diff=lfs merge=lfs -text
         | 
| 27 | 
             
            *.tar.* filter=lfs diff=lfs merge=lfs -text
         | 
|  | |
| 28 | 
             
            *.tflite filter=lfs diff=lfs merge=lfs -text
         | 
| 29 | 
             
            *.tgz filter=lfs diff=lfs merge=lfs -text
         | 
| 30 | 
             
            *.wasm filter=lfs diff=lfs merge=lfs -text
         | 
|  | |
| 32 | 
             
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 33 | 
             
            *.zst filter=lfs diff=lfs merge=lfs -text
         | 
| 34 | 
             
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
| 35 | 
            +
            images/output.png filter=lfs diff=lfs merge=lfs -text
         | 
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,142 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            license: openrail
         | 
| 3 | 
            +
            base_model: runwayml/stable-diffusion-v1-5
         | 
| 4 | 
            +
            tags:
         | 
| 5 | 
            +
            - art
         | 
| 6 | 
            +
            - controlnet
         | 
| 7 | 
            +
            - stable-diffusion
         | 
| 8 | 
            +
            - controlnet-v1-1
         | 
| 9 | 
            +
            - image-to-image
         | 
| 10 | 
            +
            duplicated_from: ControlNet-1-1-preview/control_v11f1e_sd15_tile
         | 
| 11 | 
            +
            ---
         | 
| 12 | 
            +
             | 
| 13 | 
            +
            # Controlnet - v1.1 - *Tile Version*
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            **Controlnet v1.1** was released in [lllyasviel/ControlNet-v1-1](https://huggingface.co/lllyasviel/ControlNet-v1-1) by [Lvmin Zhang](https://huggingface.co/lllyasviel).
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            This checkpoint is a conversion of [the original checkpoint](https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11f1e_sd15_tile.pth) into `diffusers` format.
         | 
| 18 | 
            +
            It can be used in combination with **Stable Diffusion**, such as [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5).
         | 
| 19 | 
            +
             | 
| 20 | 
            +
             | 
| 21 | 
            +
            For more details, please also have a look at the [🧨 Diffusers docs](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/controlnet).
         | 
| 22 | 
            +
             | 
| 23 | 
            +
             | 
| 24 | 
            +
            ControlNet is a neural network structure to control diffusion models by adding extra conditions. 
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            This checkpoint corresponds to the ControlNet conditioned on **tiled image**. Conceptually, it is similar to a super-resolution model, but its usage is not limited to that. It is also possible to generate details at the same size as the input (conditione) image.
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            **This model was contributed by [*takuma104*](https://huggingface.co/takuma104)**
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            ## Model Details
         | 
| 33 | 
            +
            - **Developed by:** Lvmin Zhang, Maneesh Agrawala
         | 
| 34 | 
            +
            - **Model type:** Diffusion-based text-to-image generation model
         | 
| 35 | 
            +
            - **Language(s):** English
         | 
| 36 | 
            +
            - **License:** [The CreativeML OpenRAIL M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based.
         | 
| 37 | 
            +
            - **Resources for more information:** [GitHub Repository](https://github.com/lllyasviel/ControlNet), [Paper](https://arxiv.org/abs/2302.05543).
         | 
| 38 | 
            +
            - **Cite as:**
         | 
| 39 | 
            +
             | 
| 40 | 
            +
              @misc{zhang2023adding,
         | 
| 41 | 
            +
                title={Adding Conditional Control to Text-to-Image Diffusion Models}, 
         | 
| 42 | 
            +
                author={Lvmin Zhang and Maneesh Agrawala},
         | 
| 43 | 
            +
                year={2023},
         | 
| 44 | 
            +
                eprint={2302.05543},
         | 
| 45 | 
            +
                archivePrefix={arXiv},
         | 
| 46 | 
            +
                primaryClass={cs.CV}
         | 
| 47 | 
            +
              }
         | 
| 48 | 
            +
            ## Introduction
         | 
| 49 | 
            +
             | 
| 50 | 
            +
            Controlnet was proposed in [*Adding Conditional Control to Text-to-Image Diffusion Models*](https://arxiv.org/abs/2302.05543) by 
         | 
| 51 | 
            +
            Lvmin Zhang, Maneesh Agrawala.
         | 
| 52 | 
            +
             | 
| 53 | 
            +
            The abstract reads as follows:
         | 
| 54 | 
            +
             | 
| 55 | 
            +
            *We present a neural network structure, ControlNet, to control pretrained large diffusion models to support additional input conditions. 
         | 
| 56 | 
            +
            The ControlNet learns task-specific conditions in an end-to-end way, and the learning is robust even when the training dataset is small (< 50k). 
         | 
| 57 | 
            +
            Moreover, training a ControlNet is as fast as fine-tuning a diffusion model, and the model can be trained on a personal devices. 
         | 
| 58 | 
            +
            Alternatively, if powerful computation clusters are available, the model can scale to large amounts (millions to billions) of data. 
         | 
| 59 | 
            +
            We report that large diffusion models like Stable Diffusion can be augmented with ControlNets to enable conditional inputs like edge maps, segmentation maps, keypoints, etc. 
         | 
| 60 | 
            +
            This may enrich the methods to control large diffusion models and further facilitate related applications.*
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            ## Example
         | 
| 63 | 
            +
             | 
| 64 | 
            +
            It is recommended to use the checkpoint with [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) as the checkpoint has been trained on it.
         | 
| 65 | 
            +
            Experimentally, the checkpoint can be used with other diffusion models such as dreamboothed stable diffusion.
         | 
| 66 | 
            +
             | 
| 67 | 
            +
             | 
| 68 | 
            +
            1. Let's install `diffusers` and related packages:
         | 
| 69 | 
            +
            ```
         | 
| 70 | 
            +
            $ pip install diffusers transformers accelerate
         | 
| 71 | 
            +
            ```
         | 
| 72 | 
            +
            2. Run code:
         | 
| 73 | 
            +
            ```python
         | 
| 74 | 
            +
             | 
| 75 | 
            +
            import torch
         | 
| 76 | 
            +
            from PIL import Image
         | 
| 77 | 
            +
            from diffusers import ControlNetModel, DiffusionPipeline
         | 
| 78 | 
            +
            from diffusers.utils import load_image
         | 
| 79 | 
            +
             | 
| 80 | 
            +
            def resize_for_condition_image(input_image: Image, resolution: int):
         | 
| 81 | 
            +
                input_image = input_image.convert("RGB")
         | 
| 82 | 
            +
                W, H = input_image.size
         | 
| 83 | 
            +
                k = float(resolution) / min(H, W)
         | 
| 84 | 
            +
                H *= k
         | 
| 85 | 
            +
                W *= k
         | 
| 86 | 
            +
                H = int(round(H / 64.0)) * 64
         | 
| 87 | 
            +
                W = int(round(W / 64.0)) * 64
         | 
| 88 | 
            +
                img = input_image.resize((W, H), resample=Image.LANCZOS)
         | 
| 89 | 
            +
                return img
         | 
| 90 | 
            +
             | 
| 91 | 
            +
            controlnet = ControlNetModel.from_pretrained('lllyasviel/control_v11f1e_sd15_tile', 
         | 
| 92 | 
            +
                                                         torch_dtype=torch.float16)
         | 
| 93 | 
            +
            pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",
         | 
| 94 | 
            +
                                                     custom_pipeline="stable_diffusion_controlnet_img2img",
         | 
| 95 | 
            +
                                                     controlnet=controlnet,
         | 
| 96 | 
            +
                                                     torch_dtype=torch.float16).to('cuda')
         | 
| 97 | 
            +
            pipe.enable_xformers_memory_efficient_attention()
         | 
| 98 | 
            +
             | 
| 99 | 
            +
            source_image = load_image('https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile/resolve/main/images/original.png')
         | 
| 100 | 
            +
             | 
| 101 | 
            +
            condition_image = resize_for_condition_image(source_image, 1024)
         | 
| 102 | 
            +
            image = pipe(prompt="best quality", 
         | 
| 103 | 
            +
                         negative_prompt="blur, lowres, bad anatomy, bad hands, cropped, worst quality", 
         | 
| 104 | 
            +
                         image=condition_image, 
         | 
| 105 | 
            +
                         controlnet_conditioning_image=condition_image, 
         | 
| 106 | 
            +
                         width=condition_image.size[0],
         | 
| 107 | 
            +
                         height=condition_image.size[1],
         | 
| 108 | 
            +
                         strength=1.0,
         | 
| 109 | 
            +
                         generator=torch.manual_seed(0),
         | 
| 110 | 
            +
                         num_inference_steps=32,
         | 
| 111 | 
            +
                        ).images[0]
         | 
| 112 | 
            +
             | 
| 113 | 
            +
            image.save('output.png')
         | 
| 114 | 
            +
            ```
         | 
| 115 | 
            +
             | 
| 116 | 
            +
            
         | 
| 117 | 
            +
            
         | 
| 118 | 
            +
             | 
| 119 | 
            +
            ## Other released checkpoints v1-1
         | 
| 120 | 
            +
            The authors released 14 different checkpoints, each trained with [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) 
         | 
| 121 | 
            +
            on a different type of conditioning:
         | 
| 122 | 
            +
             | 
| 123 | 
            +
            | Model Name | Control Image Overview| Condition Image | Control Image Example | Generated Image Example |
         | 
| 124 | 
            +
            |---|---|---|---|---|
         | 
| 125 | 
            +
            |[lllyasviel/control_v11p_sd15_canny](https://huggingface.co/lllyasviel/control_v11p_sd15_canny)<br/> | *Trained with canny edge detection* | A monochrome image with white edges on a black background.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/main/images/image_out.png"/></a>|
         | 
| 126 | 
            +
            |[lllyasviel/control_v11e_sd15_ip2p](https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p)<br/> | *Trained with pixel to pixel instruction* | No condition .|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11e_sd15_ip2p/resolve/main/images/image_out.png"/></a>|
         | 
| 127 | 
            +
            |[lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint)<br/> | Trained with image inpainting | No condition.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/output.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint/resolve/main/images/output.png"/></a>|
         | 
| 128 | 
            +
            |[lllyasviel/control_v11p_sd15_mlsd](https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd)<br/> | Trained with multi-level line segment detection | An image with annotated line segments.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_mlsd/resolve/main/images/image_out.png"/></a>|
         | 
| 129 | 
            +
            |[lllyasviel/control_v11f1p_sd15_depth](https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth)<br/> | Trained with depth estimation | An image with depth information, usually represented as a grayscale image.|<a href="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/image_out.png"/></a>|
         | 
| 130 | 
            +
            |[lllyasviel/control_v11p_sd15_normalbae](https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae)<br/> | Trained with surface normal estimation | An image with surface normal information, usually represented as a color-coded image.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/image_out.png"/></a>|
         | 
| 131 | 
            +
            |[lllyasviel/control_v11p_sd15_seg](https://huggingface.co/lllyasviel/control_v11p_sd15_seg)<br/> | Trained with image segmentation | An image with segmented regions, usually represented as a color-coded image.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_seg/resolve/main/images/image_out.png"/></a>|
         | 
| 132 | 
            +
            |[lllyasviel/control_v11p_sd15_lineart](https://huggingface.co/lllyasviel/control_v11p_sd15_lineart)<br/> | Trained with line art generation | An image with line art, usually black lines on a white background.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_lineart/resolve/main/images/image_out.png"/></a>|
         | 
| 133 | 
            +
            |[lllyasviel/control_v11p_sd15s2_lineart_anime](https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime)<br/> | Trained with anime line art generation | An image with anime-style line art.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime/resolve/main/images/image_out.png"/></a>|
         | 
| 134 | 
            +
            |[lllyasviel/control_v11p_sd15_openpose](https://huggingface.co/lllyasviel/control_v11p_sd15s2_lineart_anime)<br/> | Trained with human pose estimation | An image with human poses, usually represented as a set of keypoints or skeletons.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_openpose/resolve/main/images/image_out.png"/></a>|
         | 
| 135 | 
            +
            |[lllyasviel/control_v11p_sd15_scribble](https://huggingface.co/lllyasviel/control_v11p_sd15_scribble)<br/> | Trained with scribble-based image generation | An image with scribbles, usually random or user-drawn strokes.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_scribble/resolve/main/images/image_out.png"/></a>|
         | 
| 136 | 
            +
            |[lllyasviel/control_v11p_sd15_softedge](https://huggingface.co/lllyasviel/control_v11p_sd15_softedge)<br/> | Trained with soft edge image generation | An image with soft edges, usually to create a more painterly or artistic effect.|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11p_sd15_softedge/resolve/main/images/image_out.png"/></a>|
         | 
| 137 | 
            +
            |[lllyasviel/control_v11e_sd15_shuffle](https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle)<br/> | Trained with image shuffling | An image with shuffled patches or regions.|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/image_out.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/image_out.png"/></a>|
         | 
| 138 | 
            +
            |[lllyasviel/control_v11f1e_sd15_tile](https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile)<br/> | Trained with image tiling | A blurry image or part of an image .|<a href="https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile/resolve/main/images/original.png"><img width="64" style="margin:0;padding:0;" src="https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile/resolve/main/images/original.png"/></a>|<a href="https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile/resolve/main/images/output.png"><img width="64" src="https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile/resolve/main/images/output.png"/></a>|
         | 
| 139 | 
            +
             | 
| 140 | 
            +
            ## More information
         | 
| 141 | 
            +
             | 
| 142 | 
            +
            For more information, please also have a look at the [Diffusers ControlNet Blog Post](https://huggingface.co/blog/controlnet) and have a look at the [official docs](https://github.com/lllyasviel/ControlNet-v1-1-nightly).
         | 
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,42 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_class_name": "ControlNetModel",
         | 
| 3 | 
            +
              "_diffusers_version": "0.17.0.dev0",
         | 
| 4 | 
            +
              "act_fn": "silu",
         | 
| 5 | 
            +
              "attention_head_dim": 8,
         | 
| 6 | 
            +
              "block_out_channels": [
         | 
| 7 | 
            +
                320,
         | 
| 8 | 
            +
                640,
         | 
| 9 | 
            +
                1280,
         | 
| 10 | 
            +
                1280
         | 
| 11 | 
            +
              ],
         | 
| 12 | 
            +
              "class_embed_type": null,
         | 
| 13 | 
            +
              "conditioning_embedding_out_channels": [
         | 
| 14 | 
            +
                16,
         | 
| 15 | 
            +
                32,
         | 
| 16 | 
            +
                96,
         | 
| 17 | 
            +
                256
         | 
| 18 | 
            +
              ],
         | 
| 19 | 
            +
              "controlnet_conditioning_channel_order": "rgb",
         | 
| 20 | 
            +
              "cross_attention_dim": 768,
         | 
| 21 | 
            +
              "down_block_types": [
         | 
| 22 | 
            +
                "CrossAttnDownBlock2D",
         | 
| 23 | 
            +
                "CrossAttnDownBlock2D",
         | 
| 24 | 
            +
                "CrossAttnDownBlock2D",
         | 
| 25 | 
            +
                "DownBlock2D"
         | 
| 26 | 
            +
              ],
         | 
| 27 | 
            +
              "downsample_padding": 1,
         | 
| 28 | 
            +
              "flip_sin_to_cos": true,
         | 
| 29 | 
            +
              "freq_shift": 0,
         | 
| 30 | 
            +
              "global_pool_conditions": false,
         | 
| 31 | 
            +
              "in_channels": 4,
         | 
| 32 | 
            +
              "layers_per_block": 2,
         | 
| 33 | 
            +
              "mid_block_scale_factor": 1,
         | 
| 34 | 
            +
              "norm_eps": 1e-05,
         | 
| 35 | 
            +
              "norm_num_groups": 32,
         | 
| 36 | 
            +
              "num_class_embeds": null,
         | 
| 37 | 
            +
              "only_cross_attention": false,
         | 
| 38 | 
            +
              "projection_class_embeddings_input_dim": null,
         | 
| 39 | 
            +
              "resnet_time_scale_shift": "default",
         | 
| 40 | 
            +
              "upcast_attention": false,
         | 
| 41 | 
            +
              "use_linear_projection": false
         | 
| 42 | 
            +
            }
         | 
    	
        diffusion_pytorch_model.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:eb05b4c3665bd76dad70a90652014a9b3aab391abd8a5bb484e860330f9492fb
         | 
| 3 | 
            +
            size 1445255176
         | 
    	
        images/original.png
    ADDED
    
    |   | 
    	
        images/output.png
    ADDED
    
    |   | 
| Git LFS Details
 | 
    	
        sd.png
    ADDED
    
    |   | 
