Spaces:
Sleeping
Sleeping
File size: 1,931 Bytes
ad4fcaa d630363 ad4fcaa e79973b ad4fcaa 5973045 0e016e5 621839f e1da8ff e79973b 6e2b814 9e00db0 e79973b 5973045 00a296f e7cbd9e e79973b f4154bf e79973b e1da8ff e79973b 5973045 0ee138d 7c7ed6c e79973b bce1681 9495165 ad4fcaa e79973b ad4fcaa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
import subprocess
import os
import json
import uuid
import torch
from diffusers import (
StableDiffusionPipeline,
DPMSolverMultistepScheduler,
EulerDiscreteScheduler,
)
app = FastAPI()
@app.get("/generate")
def generate_image(prompt, model):
torch.cuda.empty_cache()
modelArray = model.split(",")
modelName = modelArray[0]
modelVersion = modelArray[1]
pipeline = StableDiffusionPipeline.from_pretrained(
str(model), torch_dtype=torch.float16
)
pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
pipeline = pipeline.to("cuda")
image = pipeline(prompt, num_inference_steps=50, height=512, width=512).images[0]
filename = str(uuid.uuid4()) + ".jpg"
image.save(filename)
assertion = {
"assertions": [
{
"label": "com.truepic.custom.ai",
"data": {
"model_name": modelName,
"model_version": modelVersion,
"prompt": prompt,
},
}
]
}
json_object = json.dumps(assertion)
subprocess.check_output(
[
"./truepic-sign",
"init",
"file-system",
"--api-key",
os.environ.get("api_key"),
]
)
subprocess.check_output(
[
"./truepic-sign",
"sign",
filename,
"--assertions",
json_object,
"--output",
(os.getcwd() + "/static/" + filename),
]
)
return {"response": filename}
app.mount("/", StaticFiles(directory="static", html=True), name="static")
@app.get("/")
def index() -> FileResponse:
return FileResponse(path="/app/static/index.html", media_type="text/html")
|