File size: 4,888 Bytes
cb92d2b
 
43148fd
d6fedfa
1123781
 
 
 
43148fd
3207814
f45636e
cd353d4
1123781
 
43148fd
d6fedfa
 
c01188e
cb92d2b
 
 
1123781
cb92d2b
cefdf75
43148fd
 
3207814
d6fedfa
1123781
c01188e
 
 
 
 
 
 
 
cefdf75
c01188e
1123781
 
1d3190d
 
 
 
 
0e617d2
 
ff9325e
3207814
f45636e
be97094
ff9325e
3207814
d6fedfa
 
 
 
be97094
1d3190d
be97094
ff9325e
d6fedfa
 
 
ff9325e
 
 
cb92d2b
 
d1f4c77
c01188e
cb92d2b
0b5ceff
 
 
c01188e
cb92d2b
 
d6fedfa
cb92d2b
d6fedfa
cb92d2b
 
 
d6fedfa
cb92d2b
 
 
 
c01188e
 
 
 
 
 
 
d6fedfa
 
c01188e
 
d6fedfa
 
 
 
 
 
 
cb92d2b
43148fd
d1f4c77
 
0e5136c
d1f4c77
 
 
0e5136c
d1f4c77
 
0e5136c
d1f4c77
6a5df00
 
 
 
 
 
d1f4c77
cefdf75
 
43148fd
 
 
 
 
 
 
cb92d2b
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
<script lang="ts">
  import { onMount } from 'svelte';
  import type { FieldProps, PipelineInfo } from '$lib/types';
  import { PipelineMode } from '$lib/types';
  import ImagePlayer from '$lib/components/ImagePlayer.svelte';
  import VideoInput from '$lib/components/VideoInput.svelte';
  import Button from '$lib/components/Button.svelte';
  import PipelineOptions from '$lib/components/PipelineOptions.svelte';
  import Spinner from '$lib/icons/spinner.svelte';
  import { lcmLiveStatus, lcmLiveActions, LCMLiveStatus } from '$lib/lcmLive';
  import { mediaStreamActions, onFrameChangeStore } from '$lib/mediaStream';
  import { getPipelineValues, deboucedPipelineValues } from '$lib/store';

  let pipelineParams: FieldProps[];
  let pipelineInfo: PipelineInfo;
  let isImageMode: boolean = false;
  let maxQueueSize: number = 0;
  let currentQueueSize: number = 0;
  onMount(() => {
    getSettings();
  });

  async function getSettings() {
    const settings = await fetch('/settings').then((r) => r.json());
    pipelineParams = Object.values(settings.input_params.properties);
    pipelineInfo = settings.info.properties;
    isImageMode = pipelineInfo.input_mode.default === PipelineMode.IMAGE;
    maxQueueSize = settings.max_queue_size;
    pipelineParams = pipelineParams.filter((e) => e?.disabled !== true);
    if (maxQueueSize > 0) {
      getQueueSize();
      setInterval(() => {
        getQueueSize();
      }, 2000);
    }
  }
  async function getQueueSize() {
    const data = await fetch('/queue_size').then((r) => r.json());
    currentQueueSize = data.queue_size;
  }

  function getSreamdata() {
    if (isImageMode) {
      return [getPipelineValues(), $onFrameChangeStore?.blob];
    } else {
      return [$deboucedPipelineValues];
    }
  }

  $: isLCMRunning = $lcmLiveStatus !== LCMLiveStatus.DISCONNECTED;

  let disabled = false;
  async function toggleLcmLive() {
    if (!isLCMRunning) {
      if (isImageMode) {
        await mediaStreamActions.enumerateDevices();
        await mediaStreamActions.start();
      }
      disabled = true;
      await lcmLiveActions.start(getSreamdata);
      disabled = false;
    } else {
      if (isImageMode) {
        mediaStreamActions.stop();
      }
      lcmLiveActions.stop();
    }
  }
</script>

<main class="container mx-auto flex max-w-5xl flex-col gap-3 px-4 py-4">
  <article class="text-center">
    <h1 class="text-3xl font-bold">Real-Time Latent Consistency Model</h1>
    {#if pipelineInfo?.title?.default}
      <h3 class="text-xl font-bold">{pipelineInfo?.title?.default}</h3>
    {/if}
    <p class="text-sm">
      This demo showcases
      <a
        href="https://huggingface.co/blog/lcm_lora"
        target="_blank"
        class="text-blue-500 underline hover:no-underline">LCM LoRA</a
      >
      Image to Image pipeline using
      <a
        href="https://huggingface.co/docs/diffusers/main/en/using-diffusers/lcm#performing-inference-with-lcm"
        target="_blank"
        class="text-blue-500 underline hover:no-underline">Diffusers</a
      > with a MJPEG stream server.
    </p>
    <p class="text-sm text-gray-500">
      Change the prompt to generate different images, accepts <a
        href="https://github.com/damian0815/compel/blob/main/doc/syntax.md"
        target="_blank"
        class="text-blue-500 underline hover:no-underline">Compel</a
      > syntax.
    </p>
    {#if maxQueueSize > 0}
      <p class="text-sm">
        There are <span id="queue_size" class="font-bold">{currentQueueSize}</span>
        user(s) sharing the same GPU, affecting real-time performance. Maximum queue size is {maxQueueSize}.
        <a
          href="https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model?duplicate=true"
          target="_blank"
          class="text-blue-500 underline hover:no-underline">Duplicate</a
        > and run it on your own GPU.
      </p>
    {/if}
  </article>
  {#if pipelineParams}
    <article class="my-3 grid grid-cols-1 gap-3 sm:grid-cols-2">
      {#if isImageMode}
        <div class="sm:col-start-1">
          <VideoInput></VideoInput>
        </div>
      {/if}
      <div class={isImageMode ? 'sm:col-start-2' : 'col-span-2'}>
        <ImagePlayer />
      </div>
      <div class="sm:col-span-2">
        <Button on:click={toggleLcmLive} {disabled} classList={'text-lg my-1 p-2'}>
          {#if isLCMRunning}
            Stop
          {:else}
            Start
          {/if}
        </Button>
        <PipelineOptions {pipelineParams}></PipelineOptions>
      </div>
    </article>
  {:else}
    <!-- loading -->
    <div class="flex items-center justify-center gap-3 py-48 text-2xl">
      <Spinner classList={'animate-spin opacity-50'}></Spinner>
      <p>Loading...</p>
    </div>
  {/if}
</main>

<style lang="postcss">
  :global(html) {
    @apply text-black dark:bg-gray-900 dark:text-white;
  }
</style>