3v324v23 commited on
Commit
e98258e
1 Parent(s): 4b8e5c2
Files changed (6) hide show
  1. .gitmodules +4 -0
  2. README.md +12 -0
  3. install_args.txt +1 -0
  4. pyproject.toml +19 -0
  5. src/main.py +48 -0
  6. src/pipeline.py +29 -0
.gitmodules ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [submodule "newdream-sdxl-20"]
2
+ path = models/newdream-sdxl-20
3
+ url = https://huggingface.co/stablediffusionapi/newdream-sdxl-20
4
+ branch = main
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # edge-maxxing-newdream-sdxl
2
+
3
+ This holds the baseline for the SDXL Nvidia GeForce RTX 4090 contest, which can be forked freely and optimized
4
+
5
+ Some recommendations are as follows:
6
+ - Installing dependencies should be done in pyproject.toml, including git dependencies
7
+ - Compiled models should be included directly in the repository(rather than compiling during loading), loading time matters far more than file sizes
8
+ - Avoid changing `src/main.py`, as that includes mostly protocol logic. Most changes should be in `models` and `src/pipeline.py`
9
+ - Change `install_args.txt` to add `pip install` arguments to be used when installing the package
10
+
11
+ For testing, you need a docker container with pytorch and ubuntu 22.04,
12
+ you can download your listed dependencies with `pip install $(cat install_args.txt) -e .`, and then running `start_inference`
install_args.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
pyproject.toml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "edge-maxxing-4090-newdream"
7
+ description = "An edge-maxxing model submission for the 4090 newdream contest"
8
+ requires-python = ">=3.10,<3.11"
9
+ version = "1.0.0"
10
+ dependencies = [
11
+ "diffusers==0.30.2",
12
+ "transformers==4.41.2",
13
+ "accelerate==0.31.0",
14
+ "omegaconf==2.3.0",
15
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines",
16
+ ]
17
+
18
+ [project.scripts]
19
+ start_inference = "main:main"
src/main.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from multiprocessing.connection import Listener
3
+ from os import chmod
4
+ from os.path import abspath
5
+ from pathlib import Path
6
+
7
+ from PIL.JpegImagePlugin import JpegImageFile
8
+ from pipelines.models import TextToImageRequest
9
+
10
+ from pipeline import load_pipeline, infer
11
+
12
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
13
+
14
+
15
+ def main():
16
+ print(f"Loading pipeline")
17
+ pipeline = load_pipeline()
18
+
19
+ print(f"Pipeline loaded")
20
+
21
+ print(f"Creating socket at '{SOCKET}'")
22
+ with Listener(SOCKET) as listener:
23
+ chmod(SOCKET, 0o777)
24
+
25
+ print(f"Awaiting connections")
26
+ with listener.accept() as connection:
27
+ print(f"Connected")
28
+
29
+ while True:
30
+ try:
31
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
32
+ except EOFError:
33
+ print(f"Inference socket exiting")
34
+
35
+ return
36
+
37
+ image = infer(request, pipeline)
38
+
39
+ data = BytesIO()
40
+ image.save(data, format=JpegImageFile.format)
41
+
42
+ packet = data.getvalue()
43
+
44
+ connection.send_bytes(packet)
45
+
46
+
47
+ if __name__ == '__main__':
48
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL.Image import Image
3
+ from diffusers import StableDiffusionXLPipeline
4
+ from pipelines.models import TextToImageRequest
5
+ from torch import Generator
6
+
7
+
8
+ def load_pipeline() -> StableDiffusionXLPipeline:
9
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
10
+ "./models/newdream-sdxl-20",
11
+ torch_dtype=torch.float16,
12
+ local_files_only=True,
13
+ ).to("cuda")
14
+
15
+ pipeline(prompt="")
16
+
17
+ return pipeline
18
+
19
+
20
+ def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
21
+ generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None
22
+
23
+ return pipeline(
24
+ prompt=request.prompt,
25
+ negative_prompt=request.negative_prompt,
26
+ width=request.width,
27
+ height=request.height,
28
+ generator=generator,
29
+ ).images[0]