Spaces:
Sleeping
Sleeping
<html> | |
<head> | |
<meta charset="UTF-8"> | |
<title>Real-Time Latent Consistency Model ControlNet</title> | |
<meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
<script | |
src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script> | |
<script src="https://cdn.jsdelivr.net/npm/[email protected]/piexif.min.js"></script> | |
<script src="https://cdn.tailwindcss.com"></script> | |
<style type="text/tailwindcss"> | |
.button { | |
@apply bg-gray-700 hover:bg-gray-800 text-white font-normal p-2 rounded disabled:bg-gray-300 dark:disabled:bg-gray-700 disabled:cursor-not-allowed dark:disabled:text-black | |
} | |
</style> | |
<script type="module"> | |
const getValue = (id) => { | |
const el = document.querySelector(`${id}`) | |
if (el.type === "checkbox") | |
return el.checked; | |
return el.value; | |
} | |
const startBtn = document.querySelector("#start"); | |
const stopBtn = document.querySelector("#stop"); | |
const videoEl = document.querySelector("#webcam"); | |
const imageEl = document.querySelector("#player"); | |
const queueSizeEl = document.querySelector("#queue_size"); | |
const errorEl = document.querySelector("#error"); | |
const snapBtn = document.querySelector("#snap"); | |
const webcamsEl = document.querySelector("#webcams"); | |
function LCMLive(webcamVideo, liveImage) { | |
let websocket; | |
async function start() { | |
return new Promise((resolve, reject) => { | |
const websocketURL = `${window.location.protocol === "https:" ? "wss" : "ws" | |
}:${window.location.host}/ws`; | |
const socket = new WebSocket(websocketURL); | |
socket.onopen = () => { | |
console.log("Connected to websocket"); | |
}; | |
socket.onclose = () => { | |
console.log("Disconnected from websocket"); | |
stop(); | |
resolve({ "status": "disconnected" }); | |
}; | |
socket.onerror = (err) => { | |
console.error(err); | |
reject(err); | |
}; | |
socket.onmessage = (event) => { | |
const data = JSON.parse(event.data); | |
switch (data.status) { | |
case "success": | |
break; | |
case "start": | |
const userId = data.userId; | |
initVideoStream(userId); | |
break; | |
case "timeout": | |
stop(); | |
resolve({ "status": "timeout" }); | |
case "error": | |
stop(); | |
reject(data.message); | |
} | |
}; | |
websocket = socket; | |
}) | |
} | |
function switchCamera() { | |
const constraints = { | |
audio: false, | |
video: { width: 1024, height: 1024, deviceId: mediaDevices[webcamsEl.value].deviceId } | |
}; | |
navigator.mediaDevices | |
.getUserMedia(constraints) | |
.then((mediaStream) => { | |
webcamVideo.removeEventListener("timeupdate", videoTimeUpdateHandler); | |
webcamVideo.srcObject = mediaStream; | |
webcamVideo.onloadedmetadata = () => { | |
webcamVideo.play(); | |
webcamVideo.addEventListener("timeupdate", videoTimeUpdateHandler); | |
}; | |
}) | |
.catch((err) => { | |
console.error(`${err.name}: ${err.message}`); | |
}); | |
} | |
async function videoTimeUpdateHandler() { | |
const dimension = getValue("input[name=dimension]:checked"); | |
const [WIDTH, HEIGHT] = JSON.parse(dimension); | |
const canvas = new OffscreenCanvas(WIDTH, HEIGHT); | |
const videoW = webcamVideo.videoWidth; | |
const videoH = webcamVideo.videoHeight; | |
const aspectRatio = WIDTH / HEIGHT; | |
const ctx = canvas.getContext("2d"); | |
ctx.drawImage(webcamVideo, videoW / 2 - videoH * aspectRatio / 2, 0, videoH * aspectRatio, videoH, 0, 0, WIDTH, HEIGHT) | |
const blob = await canvas.convertToBlob({ type: "image/jpeg", quality: 1 }); | |
websocket.send(blob); | |
websocket.send(JSON.stringify({ | |
"seed": getValue("#seed"), | |
"prompt": getValue("#prompt"), | |
"guidance_scale": getValue("#guidance-scale"), | |
"strength": getValue("#strength"), | |
"steps": getValue("#steps"), | |
"lcm_steps": getValue("#lcm_steps"), | |
"width": WIDTH, | |
"height": HEIGHT, | |
"controlnet_scale": getValue("#controlnet_scale"), | |
"controlnet_start": getValue("#controlnet_start"), | |
"controlnet_end": getValue("#controlnet_end"), | |
"canny_low_threshold": getValue("#canny_low_threshold"), | |
"canny_high_threshold": getValue("#canny_high_threshold"), | |
"debug_canny": getValue("#debug_canny") | |
})); | |
} | |
let mediaDevices = []; | |
async function initVideoStream(userId) { | |
liveImage.src = `/stream/${userId}`; | |
await navigator.mediaDevices.enumerateDevices() | |
.then(devices => { | |
const cameras = devices.filter(device => device.kind === 'videoinput'); | |
mediaDevices = cameras; | |
webcamsEl.innerHTML = ""; | |
cameras.forEach((camera, index) => { | |
const option = document.createElement("option"); | |
option.value = index; | |
option.innerText = camera.label; | |
webcamsEl.appendChild(option); | |
option.selected = index === 0; | |
}); | |
webcamsEl.addEventListener("change", switchCamera); | |
}) | |
.catch(err => { | |
console.error(err); | |
}); | |
const constraints = { | |
audio: false, | |
video: { width: 1024, height: 1024, deviceId: mediaDevices[0].deviceId } | |
}; | |
navigator.mediaDevices | |
.getUserMedia(constraints) | |
.then((mediaStream) => { | |
webcamVideo.srcObject = mediaStream; | |
webcamVideo.onloadedmetadata = () => { | |
webcamVideo.play(); | |
webcamVideo.addEventListener("timeupdate", videoTimeUpdateHandler); | |
}; | |
}) | |
.catch((err) => { | |
console.error(`${err.name}: ${err.message}`); | |
}); | |
} | |
async function stop() { | |
websocket.close(); | |
navigator.mediaDevices.getUserMedia({ video: true }).then((mediaStream) => { | |
mediaStream.getTracks().forEach((track) => track.stop()); | |
}); | |
webcamVideo.removeEventListener("timeupdate", videoTimeUpdateHandler); | |
webcamsEl.removeEventListener("change", switchCamera); | |
webcamVideo.srcObject = null; | |
} | |
return { | |
start, | |
stop | |
} | |
} | |
function toggleMessage(type) { | |
errorEl.hidden = false; | |
errorEl.scrollIntoView(); | |
switch (type) { | |
case "error": | |
errorEl.innerText = "To many users are using the same GPU, please try again later."; | |
errorEl.classList.toggle("bg-red-300", "text-red-900"); | |
break; | |
case "success": | |
errorEl.innerText = "Your session has ended, please start a new one."; | |
errorEl.classList.toggle("bg-green-300", "text-green-900"); | |
break; | |
} | |
setTimeout(() => { | |
errorEl.hidden = true; | |
}, 2000); | |
} | |
function snapImage() { | |
try { | |
const zeroth = {}; | |
const exif = {}; | |
const gps = {}; | |
zeroth[piexif.ImageIFD.Make] = "LCM Image-to-Image ControNet"; | |
zeroth[piexif.ImageIFD.ImageDescription] = `prompt: ${getValue("#prompt")} | seed: ${getValue("#seed")} | guidance_scale: ${getValue("#guidance-scale")} | strength: ${getValue("#strength")} | controlnet_start: ${getValue("#controlnet_start")} | controlnet_end: ${getValue("#controlnet_end")} | lcm_steps: ${getValue("#lcm_steps")} | steps: ${getValue("#steps")}`; | |
zeroth[piexif.ImageIFD.Software] = "https://github.com/radames/Real-Time-Latent-Consistency-Model"; | |
exif[piexif.ExifIFD.DateTimeOriginal] = new Date().toISOString(); | |
const exifObj = { "0th": zeroth, "Exif": exif, "GPS": gps }; | |
const exifBytes = piexif.dump(exifObj); | |
const canvas = document.createElement("canvas"); | |
canvas.width = imageEl.naturalWidth; | |
canvas.height = imageEl.naturalHeight; | |
const ctx = canvas.getContext("2d"); | |
ctx.drawImage(imageEl, 0, 0); | |
const dataURL = canvas.toDataURL("image/jpeg"); | |
const withExif = piexif.insert(exifBytes, dataURL); | |
const a = document.createElement("a"); | |
a.href = withExif; | |
a.download = `lcm_txt_2_img${Date.now()}.png`; | |
a.click(); | |
} catch (err) { | |
console.log(err); | |
} | |
} | |
const lcmLive = LCMLive(videoEl, imageEl); | |
startBtn.addEventListener("click", async () => { | |
try { | |
startBtn.disabled = true; | |
snapBtn.disabled = false; | |
const res = await lcmLive.start(); | |
startBtn.disabled = false; | |
if (res.status === "timeout") | |
toggleMessage("success") | |
} catch (err) { | |
console.log(err); | |
toggleMessage("error") | |
startBtn.disabled = false; | |
} | |
}); | |
stopBtn.addEventListener("click", () => { | |
lcmLive.stop(); | |
}); | |
window.addEventListener("beforeunload", () => { | |
lcmLive.stop(); | |
}); | |
snapBtn.addEventListener("click", snapImage); | |
setInterval(() => | |
fetch("/queue_size") | |
.then((res) => res.json()) | |
.then((data) => { | |
queueSizeEl.innerText = data.queue_size; | |
}) | |
.catch((err) => { | |
console.log(err); | |
}) | |
, 5000); | |
</script> | |
</head> | |
<body class="text-black dark:bg-gray-900 dark:text-white"> | |
<div class="fixed right-2 top-2 p-4 font-bold text-sm rounded-lg max-w-xs text-center" id="error"> | |
</div> | |
<main class="container mx-auto px-4 py-4 max-w-4xl flex flex-col gap-4"> | |
<article class="text-center max-w-xl mx-auto"> | |
<h1 class="text-3xl font-bold">Real-Time Latent Consistency Model</h1> | |
<h2 class="text-2xl font-bold mb-4">ControlNet</h2> | |
<p class="text-sm"> | |
This demo showcases | |
<a href="https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7" target="_blank" | |
class="text-blue-500 underline hover:no-underline">LCM</a> Image to Image pipeline | |
using | |
<a href="https://github.com/huggingface/diffusers/tree/main/examples/community#latent-consistency-pipeline" | |
target="_blank" class="text-blue-500 underline hover:no-underline">Diffusers</a> with a MJPEG | |
stream server. | |
</p> | |
<p class="text-sm"> | |
There are <span id="queue_size" class="font-bold">0</span> user(s) sharing the same GPU, affecting | |
real-time performance. Maximum queue size is 4. <a | |
href="https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model?duplicate=true" | |
target="_blank" class="text-blue-500 underline hover:no-underline">Duplicate</a> and run it on your | |
own GPU. | |
</p> | |
</article> | |
<div> | |
<h2 class="font-medium">Prompt</h2> | |
<p class="text-sm text-gray-500"> | |
Change the prompt to generate different images, accepts <a | |
href="https://github.com/damian0815/compel/blob/main/doc/syntax.md" target="_blank" | |
class="text-blue-500 underline hover:no-underline">Compel</a> syntax. | |
</p> | |
<div class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center"> | |
<textarea type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 outline-none dark:text-black" | |
title="Prompt, this is an example, feel free to modify" | |
placeholder="Add your prompt here...">Portrait of The Terminator with , glare pose, detailed, intricate, full of colour, cinematic lighting, trending on artstation, 8k, hyperrealistic, focused, extreme details, unreal engine 5, cinematic, masterpiece</textarea> | |
</div> | |
</div> | |
<div class=""> | |
<details> | |
<summary class="font-medium cursor-pointer">Advanced Options</summary> | |
<div class="grid grid-cols-3 sm:grid-cols-6 items-center gap-3 py-3"> | |
<label for="webcams" class="text-sm font-medium">Camera Options: </label> | |
<select id="webcams" class="text-sm border-2 border-gray-500 rounded-md font-light dark:text-black"> | |
</select> | |
<div></div> | |
<label class="text-sm font-medium " for="steps">Inference Steps | |
</label> | |
<input type="range" id="steps" name="steps" min="1" max="20" value="4" | |
oninput="this.nextElementSibling.value = Number(this.value)"> | |
<output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> | |
4</output> | |
<!-- --> | |
<label class="text-sm font-medium" for="lcm_steps">LCM Inference Steps | |
</label> | |
<input type="range" id="lcm_steps" name="lcm_steps" min="2" max="60" value="50" | |
oninput="this.nextElementSibling.value = Number(this.value)"> | |
<output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> | |
50</output> | |
<!-- --> | |
<label class="text-sm font-medium" for="guidance-scale">Guidance Scale | |
</label> | |
<input type="range" id="guidance-scale" name="guidance-scale" min="0" max="30" step="0.001" | |
value="8.0" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)"> | |
<output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> | |
8.0</output> | |
<!-- --> | |
<label class="text-sm font-medium" for="strength">Strength</label> | |
<input type="range" id="strength" name="strength" min="0.1" max="1" step="0.001" value="0.50" | |
oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)"> | |
<output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> | |
0.5</output> | |
<!-- --> | |
<label class="text-sm font-medium" for="controlnet_scale">ControlNet Condition Scale</label> | |
<input type="range" id="controlnet_scale" name="controlnet_scale" min="0.0" max="1" step="0.001" | |
value="0.80" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)"> | |
<output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> | |
0.8</output> | |
<!-- --> | |
<label class="text-sm font-medium" for="controlnet_start">ControlNet Guidance Start</label> | |
<input type="range" id="controlnet_start" name="controlnet_start" min="0.0" max="1.0" step="0.001" | |
value="0.0" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)"> | |
<output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> | |
0.0</output> | |
<!-- --> | |
<label class="text-sm font-medium" for="controlnet_end">ControlNet Guidance End</label> | |
<input type="range" id="controlnet_end" name="controlnet_end" min="0.0" max="1.0" step="0.001" | |
value="1.0" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)"> | |
<output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> | |
1.0</output> | |
<!-- --> | |
<label class="text-sm font-medium" for="canny_low_threshold">Canny Low Threshold</label> | |
<input type="range" id="canny_low_threshold" name="canny_low_threshold" min="0.0" max="1.0" | |
step="0.001" value="0.1" | |
oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)"> | |
<output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> | |
0.1</output> | |
<!-- --> | |
<label class="text-sm font-medium" for="canny_high_threshold">Canny High Threshold</label> | |
<input type="range" id="canny_high_threshold" name="canny_high_threshold" min="0.0" max="1.0" | |
step="0.001" value="0.2" | |
oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)"> | |
<output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> | |
0.2</output> | |
<!-- --> | |
<label class="text-sm font-medium" for="seed">Seed</label> | |
<input type="number" id="seed" name="seed" value="299792458" | |
class="font-light border border-gray-700 text-right rounded-md p-2 dark:text-black"> | |
<button | |
onclick="document.querySelector('#seed').value = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)" | |
class="button"> | |
Rand | |
</button> | |
<!-- --> | |
<!-- --> | |
<label class="text-sm font-medium" for="dimension">Image Dimensions</label> | |
<div class="col-span-2 flex gap-2"> | |
<div class="flex gap-1"> | |
<input type="radio" id="dimension512" name="dimension" value="[512,512]" checked | |
class="cursor-pointer"> | |
<label for="dimension512" class="text-sm cursor-pointer">512x512</label> | |
</div> | |
<div class="flex gap-1"> | |
<input type="radio" id="dimension768" name="dimension" value="[768,768]" | |
lass="cursor-pointer"> | |
<label for="dimension768" class="text-sm cursor-pointer">768x768</label> | |
</div> | |
</div> | |
<!-- --> | |
<!-- --> | |
<label class="text-sm font-medium" for="debug_canny">Debug Canny</label> | |
<div class="col-span-2 flex gap-2"> | |
<input type="checkbox" id="debug_canny" name="debug_canny" class="cursor-pointer"> | |
<label for="debug_canny" class="text-sm cursor-pointer"></label> | |
</div> | |
<div></div> | |
<!-- --> | |
</div> | |
</details> | |
</div> | |
<div class="flex gap-3"> | |
<button id="start" class="button"> | |
Start | |
</button> | |
<button id="stop" class="button"> | |
Stop | |
</button> | |
<button id="snap" disabled class="button ml-auto"> | |
Snapshot | |
</button> | |
</div> | |
<div class="relative rounded-lg border border-slate-300 overflow-hidden"> | |
<img id="player" class="w-full aspect-square rounded-lg" | |
src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII="> | |
<div class="absolute top-0 left-0 w-1/4 aspect-square"> | |
<video id="webcam" class="w-full aspect-square relative z-10 object-cover" playsinline autoplay muted | |
loop></video> | |
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 448" width="100" | |
class="w-full p-4 absolute top-0 opacity-20 z-0"> | |
<path fill="currentColor" | |
d="M224 256a128 128 0 1 0 0-256 128 128 0 1 0 0 256zm-45.7 48A178.3 178.3 0 0 0 0 482.3 29.7 29.7 0 0 0 29.7 512h388.6a29.7 29.7 0 0 0 29.7-29.7c0-98.5-79.8-178.3-178.3-178.3h-91.4z" /> | |
</svg> | |
</div> | |
</div> | |
</main> | |
</body> | |
</html> |