ericmorrison commited on
Commit
60551e3
1 Parent(s): 28362e2

Initial commit with folder contents

Browse files
Files changed (7) hide show
  1. .gitattributes +0 -35
  2. .gitmodules +8 -0
  3. README.md +12 -0
  4. pyproject.toml +19 -0
  5. requirements.txt +2 -0
  6. src/main.py +48 -0
  7. src/pipeline.py +33 -0
.gitattributes CHANGED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitmodules ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ [submodule "newdream-sdxl-20"]
2
+ path = models/newdream-sdxl-20
3
+ url = https://huggingface.co/stablediffusionapi/newdream-sdxl-20
4
+ branch = main
5
+ [submodule "sdxl-lcmlora-1024-100k-3000steps"]
6
+ path = models/sdxl-lcmlora-1024-100k-3000steps
7
+ url = https://huggingface.co/mhussainahmad/sdxl-lcmlora-1024-100k-3000steps
8
+ branch = main
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # edge-maxxing-newdream-sdxl
2
+
3
+ This holds the baseline for the SDXL Nvidia GeForce RTX 4090 contest, which can be forked freely and optimized
4
+
5
+ Some recommendations are as follows:
6
+ - Installing dependencies should be done in pyproject.toml, including git dependencies
7
+ - Compiled models should be included directly in the repository(rather than compiling during loading), loading time matters far more than file sizes
8
+ - Avoid changing `src/main.py`, as that includes mostly protocol logic. Most changes should be in `models` and `src/pipeline.py`
9
+ - Change `requirements.txt` to add extra arguments to be used when installing the package
10
+
11
+ For testing, you need a docker container with pytorch and ubuntu 22.04,
12
+ you can download your listed dependencies with `pip install -r requirements.txt -e .`, and then running `start_inference`
pyproject.toml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "edge-maxxing-4090-newdream"
7
+ description = "An edge-maxxing model submission for the 4090 newdream contest"
8
+ requires-python = ">=3.10,<3.11"
9
+ version = "1.0.0"
10
+ dependencies = [
11
+ "diffusers==0.30.2",
12
+ "transformers==4.41.2",
13
+ "accelerate==0.31.0",
14
+ "omegaconf==2.3.0",
15
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines",
16
+ ]
17
+
18
+ [project.scripts]
19
+ start_inference = "main:main"
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Specify any extra options here, like --find-links, --pre, etc. Avoid specifying dependencies here and specify them in pyproject.toml instead
2
+ peft
src/main.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from multiprocessing.connection import Listener
3
+ from os import chmod
4
+ from os.path import abspath
5
+ from pathlib import Path
6
+
7
+ from PIL.JpegImagePlugin import JpegImageFile
8
+ from pipelines.models import TextToImageRequest
9
+
10
+ from pipeline import load_pipeline, infer
11
+
12
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
13
+
14
+
15
+ def main():
16
+ print(f"Loading pipeline")
17
+ pipeline = load_pipeline()
18
+
19
+ print(f"Pipeline loaded")
20
+
21
+ print(f"Creating socket at '{SOCKET}'")
22
+ with Listener(SOCKET) as listener:
23
+ chmod(SOCKET, 0o777)
24
+
25
+ print(f"Awaiting connections")
26
+ with listener.accept() as connection:
27
+ print(f"Connected")
28
+
29
+ while True:
30
+ try:
31
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
32
+ except EOFError:
33
+ print(f"Inference socket exiting")
34
+
35
+ return
36
+
37
+ image = infer(request, pipeline)
38
+
39
+ data = BytesIO()
40
+ image.save(data, format=JpegImageFile.format)
41
+
42
+ packet = data.getvalue()
43
+
44
+ connection.send_bytes(packet)
45
+
46
+
47
+ if __name__ == '__main__':
48
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL.Image import Image
3
+ from diffusers import StableDiffusionXLPipeline, LCMScheduler
4
+ from pipelines.models import TextToImageRequest
5
+ from torch import Generator
6
+
7
+
8
+ def load_pipeline() -> StableDiffusionXLPipeline:
9
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
10
+ "./models/newdream-sdxl-20",
11
+ torch_dtype=torch.float16,
12
+ local_files_only=True,
13
+ ).to("cuda")
14
+ pipeline.scheduler = LCMScheduler.from_config(pipeline.scheduler.config)
15
+ pipeline.load_lora_weights("./models/sdxl-lcmlora-1024-100k-3000steps")
16
+
17
+ pipeline(prompt="")
18
+
19
+ return pipeline
20
+
21
+
22
+ def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
23
+ generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None
24
+
25
+ return pipeline(
26
+ prompt=request.prompt,
27
+ negative_prompt=request.negative_prompt,
28
+ width=request.width,
29
+ height=request.height,
30
+ generator=generator,
31
+ num_inference_steps=4,
32
+ guidance_scale=1.5,
33
+ ).images[0]