Spaces:
Running
Running
sarahciston
commited on
Commit
•
f413c4d
1
Parent(s):
e7a7a42
add gradio client for image gen instead of fetch
Browse files
sketch.js
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
|
2 |
// IMPORT LIBRARIES TOOLS
|
3 |
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
|
|
4 |
|
5 |
// skip local model check
|
6 |
env.allowLocalModels = false;
|
@@ -39,31 +40,50 @@ async function post(request) {
|
|
39 |
}
|
40 |
}
|
41 |
|
42 |
-
async function textImgTask(
|
43 |
console.log('text-to-image task initiated')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
-
const URL = 'https://multimodalart-flux-1-merged.hf.space/call/infer'
|
46 |
-
const seed = 0
|
47 |
-
const randomizeSeed = true
|
48 |
-
const width = 1024
|
49 |
-
const height = 1024
|
50 |
-
const guidaneceScale = 3.5
|
51 |
-
const inferenceSteps = 8
|
52 |
|
53 |
-
const options = [ prompt[0], seed, randomizeSeed, width, height, guidaneceScale, inferenceSteps ]
|
54 |
|
55 |
-
const request = new Request(URL,{
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
})
|
60 |
|
61 |
-
let out = post(request)
|
62 |
|
63 |
-
console.log(out)
|
64 |
-
console.log("text-to-image task completed")
|
65 |
|
66 |
-
return
|
67 |
}
|
68 |
|
69 |
|
@@ -200,8 +220,8 @@ new p5(function (p5){
|
|
200 |
const inputDiv = p5.createDiv().class('module', 'main').id('inputDiv')
|
201 |
p5.createElement('h4', 'INPUT').parent(inputDiv)
|
202 |
p5.createElement('h3', 'Enter your prompt').class('header').parent(inputDiv)
|
203 |
-
p5.createP(`Write your prompt in the box below using one [BLANK] and one [MASK]
|
204 |
-
p5.createP(`e.g. Write "The [BLANK] was a [MASK]." and in the three blanks choose three occupations
|
205 |
p5.createP(`(This is taken from an actual example used to test GPT-3. (Brown et al. 2020, §6.2.1).)`).class('caption').parent(inputDiv)
|
206 |
promptField = p5.createInput(PROMPT_INPUT).parent(inputDiv) // turns the string into an input; now access the text via PROMPT_INPUT.value()
|
207 |
promptField.size(700)
|
@@ -258,7 +278,7 @@ new p5(function (p5){
|
|
258 |
// placeholder DIV for images and captions
|
259 |
p5.createElement('h3', 'Text-to-image output').parent(outputDiv)
|
260 |
outPics = p5.createDiv().id('outPics').parent(outputDiv)
|
261 |
-
|
262 |
|
263 |
// print info about model, prompt, and hyperparams
|
264 |
p5.createElement('h3', 'Prompting info').parent(outputDiv)
|
@@ -302,7 +322,7 @@ new p5(function (p5){
|
|
302 |
|
303 |
let outPic = await textImgTask(promptArray)
|
304 |
console.log(outPic[1])
|
305 |
-
p5.createImage(outPic
|
306 |
}
|
307 |
|
308 |
p5.draw = function(){
|
|
|
1 |
|
2 |
// IMPORT LIBRARIES TOOLS
|
3 |
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
4 |
+
import { Client } from "@gradio/client";
|
5 |
|
6 |
// skip local model check
|
7 |
env.allowLocalModels = false;
|
|
|
40 |
}
|
41 |
}
|
42 |
|
43 |
+
async function textImgTask(input){
|
44 |
console.log('text-to-image task initiated')
|
45 |
+
|
46 |
+
let MODEL = "multimodalart/FLUX.1-merged"
|
47 |
+
|
48 |
+
let INPUT = input
|
49 |
+
|
50 |
+
const client = await Client.connect(MODEL);
|
51 |
+
const result = await client.predict("/infer", {
|
52 |
+
prompt: INPUT,
|
53 |
+
seed: 0,
|
54 |
+
randomize_seed: true,
|
55 |
+
width: 256,
|
56 |
+
height: 256,
|
57 |
+
guidance_scale: 1,
|
58 |
+
num_inference_steps: 1,
|
59 |
+
});
|
60 |
+
|
61 |
+
console.log(result.data);
|
62 |
+
|
63 |
+
let OUT = result.data[0]
|
64 |
|
65 |
+
// const URL = 'https://multimodalart-flux-1-merged.hf.space/call/infer'
|
66 |
+
// const seed = 0
|
67 |
+
// const randomizeSeed = true
|
68 |
+
// const width = 1024
|
69 |
+
// const height = 1024
|
70 |
+
// const guidaneceScale = 3.5
|
71 |
+
// const inferenceSteps = 8
|
72 |
|
73 |
+
// const options = [ prompt[0], seed, randomizeSeed, width, height, guidaneceScale, inferenceSteps ]
|
74 |
|
75 |
+
// const request = new Request(URL,{
|
76 |
+
// method: "POST",
|
77 |
+
// body: JSON.stringify({"data": options }),
|
78 |
+
// headers: { "Content-Type": "application/json" }
|
79 |
+
// })
|
80 |
|
81 |
+
// let out = post(request)
|
82 |
|
83 |
+
// console.log(out)
|
84 |
+
// console.log("text-to-image task completed")
|
85 |
|
86 |
+
return OUT
|
87 |
}
|
88 |
|
89 |
|
|
|
220 |
const inputDiv = p5.createDiv().class('module', 'main').id('inputDiv')
|
221 |
p5.createElement('h4', 'INPUT').parent(inputDiv)
|
222 |
p5.createElement('h3', 'Enter your prompt').class('header').parent(inputDiv)
|
223 |
+
p5.createP(`Write your prompt in the box below using one [BLANK] and one [MASK]`).parent(inputDiv)
|
224 |
+
p5.createP(`e.g. Write "The [BLANK] was a [MASK]." and in the three blanks choose three occupations`).parent(inputDiv)
|
225 |
p5.createP(`(This is taken from an actual example used to test GPT-3. (Brown et al. 2020, §6.2.1).)`).class('caption').parent(inputDiv)
|
226 |
promptField = p5.createInput(PROMPT_INPUT).parent(inputDiv) // turns the string into an input; now access the text via PROMPT_INPUT.value()
|
227 |
promptField.size(700)
|
|
|
278 |
// placeholder DIV for images and captions
|
279 |
p5.createElement('h3', 'Text-to-image output').parent(outputDiv)
|
280 |
outPics = p5.createDiv().id('outPics').parent(outputDiv)
|
281 |
+
|
282 |
|
283 |
// print info about model, prompt, and hyperparams
|
284 |
p5.createElement('h3', 'Prompting info').parent(outputDiv)
|
|
|
322 |
|
323 |
let outPic = await textImgTask(promptArray)
|
324 |
console.log(outPic[1])
|
325 |
+
p5.createImage(outPic).parent('#outputDiv')
|
326 |
}
|
327 |
|
328 |
p5.draw = function(){
|