File size: 11,648 Bytes
ceb472c
 
a083558
f413c4d
4189f9e
8993dbb
4189f9e
 
ceb472c
a02510b
76061be
a8f0395
76061be
3af77c3
8cfe734
de353b9
8cfe734
76061be
8cfe734
4189f9e
9e5d3d1
 
 
 
 
 
 
 
 
 
 
6122de9
 
 
 
 
 
 
 
 
 
 
f413c4d
c32ec2d
f413c4d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c32ec2d
f413c4d
 
 
 
 
 
 
76061be
f413c4d
76061be
f413c4d
 
 
 
 
76061be
f413c4d
76061be
f413c4d
 
76061be
f413c4d
76061be
 
 
 
b23bba4
 
76061be
f235b40
b23bba4
372a0c2
8c1ca46
76061be
c32ec2d
 
eef4c4d
76061be
 
 
 
 
eef4c4d
 
76061be
9e2a81c
372a0c2
 
a083558
 
0b9db9e
8c46069
 
 
 
 
a6f115b
 
8c46069
 
0b9db9e
98ddae8
8c46069
1b6e6cc
0b9db9e
98ddae8
1fd0967
1b6e6cc
8cfe734
8c1ca46
1b6e6cc
8cfe734
8c1ca46
 
8cfe734
372a0c2
8cfe734
 
 
 
8c1ca46
 
 
 
 
 
a083558
 
f0ff9a3
 
 
 
 
8c1ca46
8cfe734
8c1ca46
b23bba4
 
 
 
 
eb4bd74
b23bba4
 
eb800fb
2988cff
 
8c46069
 
 
4d4ea35
6ff171e
eb800fb
855a94a
ceb472c
b23bba4
4d4ea35
 
b23bba4
 
 
 
 
6ff171e
b23bba4
2988cff
 
b23bba4
 
6ff171e
855a94a
60949e3
 
 
 
 
 
7c5c6b0
 
 
60949e3
 
7c5c6b0
961ae7a
7c5c6b0
a04b9b1
b397845
a04b9b1
1765b74
b397845
 
 
60949e3
 
7c5c6b0
961ae7a
 
7c5c6b0
f413c4d
 
a8f0395
7c5c6b0
8cfe734
7c5c6b0
8cfe734
9e2a81c
1765b74
7c5c6b0
 
 
2422cf3
 
 
 
 
 
 
 
 
 
 
 
9e2a81c
2422cf3
b397845
 
 
 
 
 
 
 
 
 
1765b74
b397845
7c5c6b0
 
 
 
 
 
 
 
 
961ae7a
 
7c5c6b0
961ae7a
7c5c6b0
961ae7a
de353b9
7c5c6b0
961ae7a
 
de353b9
f413c4d
961ae7a
 
 
de353b9
2988cff
 
7c5c6b0
a083558
4fbd4b0
c4835d8
 
4fbd4b0
8cfe734
 
8f87f71
2628cff
52d92f9
76061be
7b64471
ab1373b
 
 
22a8a4d
76061be
7b64471
 
2628cff
7b64471
76061be
 
a02510b
3960a3b
 
76061be
 
 
3960a3b
8f87f71
3960a3b
de353b9
76061be
 
6122de9
f413c4d
60949e3
2e68462
7c5c6b0
 
96b4bd1
2e68462
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332

// IMPORT LIBRARIES TOOLS
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
import { Client } from "@gradio/client";

// skip local model check
env.allowLocalModels = false;

// GLOBAL VARIABLES

var promptArray = []
var PREPROMPT = `Please continue each sentence, filling in [MASK] with your own words:`

var PROMPT_INPUT = `` // a field for writing or changing a text value
var promptField // an html element to hold the prompt
var outText, outPics, outInfo // html elements to hold the results
var blanksArray = [] // an empty list to store all the variables we enter to modify the prompt

// e.g. ["woman", "man", "non-binary person"]

// // RUN IMAGE CAPTIONER //// W-I-P
// async function captionTask(prompt){
//   // PICK MODEL 
//   let MODEL = 'Xenova/vit-gpt2-image-captioning'
//   const pipe = await pipeline("image-to-text", MODEL)

//   const out = await pipe(prompt)

//   out = JSON.stringify(out, null, 2)
// }

// GENERIC API CALL HANDLING
async function post(request) {
  try {
    const response = await fetch(request);
    const result = await response.json();
    console.log("Success:", result);
  } catch (error) {
    console.error("Error:", error);
  }
}

async function textImgTask(input){
  console.log('text-to-image task initiated')

  let MODEL = "multimodalart/FLUX.1-merged"

  let INPUT = input

  const client = await Client.connect(MODEL);
  const result = await client.predict("/infer", { 		
  		prompt: INPUT, 		
  		seed: 0, 		
  		randomize_seed: true, 		
  		width: 256, 		
  		height: 256, 		
  		guidance_scale: 1, 		
  		num_inference_steps: 1, 
  });

  console.log(result.data);

  let OUT = result.data[0]
  
  // const URL = 'https://multimodalart-flux-1-merged.hf.space/call/infer'
  // const seed = 0
  // const randomizeSeed = true
  // const width = 1024
  // const height = 1024
  // const guidaneceScale = 3.5
  // const inferenceSteps = 8
  
  // const options = [ prompt[0], seed, randomizeSeed, width, height, guidaneceScale, inferenceSteps ]

  // const request = new Request(URL,{
  //     method: "POST",
  //     body: JSON.stringify({"data": options }),
  //     headers: { "Content-Type": "application/json" }
  // })

  // let out = post(request)

  // console.log(out)
  // console.log("text-to-image task completed")

  return OUT
}



// RUN TEXT-GEN MODEL

// async function textGenTask(pre, prompt, blanks){
async function textGenTask(pre, prompts){
  console.log('text-gen task initiated')
  
  // Create concatenated prompt array including preprompt and all variable prompts
  // let promptArray = []
  let PROMPTS = pre.concat(prompts) //adds the preprompt to the front of the prompts list
  console.log(PROMPTS)

  // // Fill in blanks from our sample prompt and make new prompts using our variable list 'blanksArray'
  // blanks.forEach(b => {
  //   let p = prompt.replace('[BLANK]', b) // replace the string segment with an item from the blanksArray
  //   promptArray.push(p) // add the new prompt to the list we created
  // })

  // create combined fill prompt
  let INPUT = PROMPTS.toString()
  console.log(INPUT)
  // let INPUT = pre + prompt // simple concatenated input
  // let INPUT = prompt // basic prompt input

  // PICK MODEL 
  let MODEL = 'Xenova/flan-alpaca-large'

  // MODELS LIST
  // - Xenova/bloom-560m
  // - Xenova/distilgpt2
  // - Xenova/LaMini-Cerebras-256M
  // - Xenova/gpt-neo-125M // not working well
  // - Xenova/llama2.c-stories15M // only fairytails
  // - webml/TinyLlama-1.1B-Chat-v1.0
  // - Xenova/TinyLlama-1.1B-Chat-v1.0
  // - Xenova/flan-alpaca-large //text2text

  
  // const pipe = await pipeline('text-generation', MODEL) //different task type, also for text generation
  const pipe = await pipeline('text2text-generation', MODEL)

  var hyperparameters = { max_new_tokens: 300, top_k: 30, repetition_penalty: 1.5 }
    // setting hyperparameters
    // max_new_tokens: 256, top_k: 50, temperature: 0.7, do_sample: true, no_repeat_ngram_size: 2, num_return_sequences: 2 (must be 1?)

  // change model run to iterative for each prompt generated locally — will be more expensive??
  // promptArray.forEach(async i => {} //this was a loop to wrap model run multiple times
  
  // RUN INPUT THROUGH MODEL, 
  var out = await pipe(INPUT, hyperparameters)

  console.log(await out)
  console.log('text-gen task completed')
  
  // PARSE RESULTS as a list of outputs, two different ways depending on the model
  
  // parsing of output
  // await out.forEach(o => {
  //   console.log(o)
  //   OUTPUT_LIST.push(o.generated_text)
  // })
  
  // alternate format for parsing, for chat model type
  // await out.choices.forEach(o => {
  //   console.log(o)
  //   OUTPUT_LIST.push(o.message.content)
  // })

  let OUTPUT_LIST = out[0].generated_text //not a list anymore just one result
  // OUTPUT_LIST.push(out[0].generated_text)

  console.log(OUTPUT_LIST)
  console.log('text-gen parsing complete')

  return await OUTPUT_LIST
  // return await out
}

// RUN FILL-IN MODEL
async function fillInTask(input){
  console.log('fill-in task initiated')

  // MODELS LIST
  // - Xenova/bert-base-uncased

  const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased');
  
  var out = await pipe(input);

  console.log(await out) // yields { score, sequence, token, token_str } for each result

  let OUTPUT_LIST = [] // a blank array to store the results from the model

  // parsing of output
  await out.forEach(o => {
    console.log(o) // yields { score, sequence, token, token_str } for each result
    OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list
  })
  
  console.log(await OUTPUT_LIST)
  console.log('fill-in task completed')

  // return await out
  return await OUTPUT_LIST
}

//// p5.js Instance

new p5(function (p5){
  p5.setup = function(){
      p5.noCanvas()
      console.log('p5 instance loaded')
      makeTextModules()
      makeInputModule()
      makeOutputModule()
    }

  function makeTextModules(){
    const introDiv = p5.createDiv().class('module').id('intro')
    p5.createElement('h1','p5.js Critical AI Prompt Battle').parent(introDiv)
    p5.createP(`What do AI models really 'know' about you — about your community, your language, your culture? What do they 'know' about different concepts, ideas, and worldviews?`).parent(introDiv)
    p5.createP(`This tool lets you compare the results of multiple AI-generated texts and images side-by-side, using blanks you fill in to explore variations on a single prompt. For more info on prompt programming and critical AI, see <A href="">[TUTORIAL-LINK]</a>.`).parent(introDiv)

    const instructDiv = p5.createDiv().id('instructions').parent(introDiv)
    p5.createElement('h4', 'INSTRUCTIONS').class('header').parent(introDiv)
    p5.createP(`Write your prompt using [BLANK] and [MASK], where [BLANK] will be the variation you choose and fill in below, and [MASK] is a variation that the model will complete.`).parent(introDiv)
    p5.createP(`For best results, try to phrase your prompt so that [BLANK] and [MASK] highlight the qualities you want to investigate. See <A href="">[EXAMPLES]</a>`).parent(introDiv)
  }

  function makeInputModule(){
    const inputDiv = p5.createDiv().class('module', 'main').id('inputDiv')
    p5.createElement('h4', 'INPUT').parent(inputDiv)
    p5.createElement('h3', 'Enter your prompt').class('header').parent(inputDiv)
    p5.createP(`Write your prompt in the box below using one [BLANK] and one [MASK]`).parent(inputDiv)
    p5.createP(`e.g. Write "The [BLANK] was a [MASK]." and in the three blanks choose three occupations`).parent(inputDiv)
    p5.createP(`(This is taken from an actual example used to test GPT-3. (Brown et al. 2020, §6.2.1).)`).class('caption').parent(inputDiv)
    promptField = p5.createInput(PROMPT_INPUT).parent(inputDiv) // turns the string into an input; now access the text via PROMPT_INPUT.value()
    promptField.size(700)
    p5.createP(promptField.attribute('label')).parent(inputDiv)
    promptField.addClass("prompt")

    
    p5.createElement('h3', 'Fill in your blanks').class('header').parent(inputDiv)
    p5.createP('Add three words or phrases in the boxes below that will replace the [BLANK] in your prompt when the model runs.').parent(inputDiv)
    p5.createP('(e.g. doctor, secretary, circus performer)').parent(inputDiv)
    
    addField()
    addField()
    addField()
    
    // press to run model
    const submitButton = p5.createButton("RUN PROMPT")
    submitButton.size(170)
    submitButton.class('button').parent(inputDiv)
    submitButton.mousePressed(displayOutput)
 
  }

  function addField(){
        let f = p5.createInput("").parent(inputDiv)
        f.class("blank")
        blanksArray.push(f)
        console.log("made variable field")
        // // Cap the number to avoids token limit
        // let blanks = document.querySelectorAll(".blank")
        // if (blanks.length > 3){
        //     console.log(blanks.length)
        //     addButton.style('visibility','hidden')
        // }
      }

  // function makeButtons(){
  //   // // press to add more blanks to fill in
  //   // const addButton = p5.createButton("more blanks")
  //   // addButton.size(170)
  //   // // addButton.position(220,500)
  //   // addButton.mousePressed(addField)
  // }

  function makeOutputModule(){
    const outputDiv = p5.createDiv().class('module').id('outputDiv')
    const outHeader = p5.createElement('h4',"OUTPUT").parent(outputDiv)

    // // make output placeholders
    // text-only output
    p5.createElement('h3', 'Text output').parent(outputDiv)
    outText = p5.createP('').id('outText').parent(outputDiv)

    // placeholder DIV for images and captions
    p5.createElement('h3', 'Text-to-image output').parent(outputDiv)
    outPics = p5.createDiv().id('outPics').parent(outputDiv)
        

    // print info about model, prompt, and hyperparams
    p5.createElement('h3', 'Prompting info').parent(outputDiv)    
    outInfo = p5.createP('').id('outInfo').parent(outputDiv)
  }

  async function displayOutput(){
    console.log('submitButton pressed')

    // insert waiting dots into results space of interface
    outText.html('...', false)

    // GRAB CURRENT FIELD INPUTS FROM PROMPT & BLANKS
    PROMPT_INPUT = promptField.value() // grab update to the prompt if it's been changed
    console.log("latest prompt: ", PROMPT_INPUT)
    console.log(blanksArray)

    // create a list of the values in the blanks fields
    let blanksValues = []
    blanksArray.forEach(b => {
      blanksValues.push(b.value())
    })
    console.log(blanksValues)

    // Fill in blanks from our sample prompt and make new prompts list using our variable list 'blanksValues'
    blanksValues.forEach(b => {
      let p = PROMPT_INPUT.replace('[BLANK]', b) // replace the string segment with an item from the blanksValues
      promptArray.push(p) // add the new prompts to the prompt list
    })
    console.log(promptArray)
    
    // call the function that runs the model for the task of your choice here 
    // make sure to use the PROMPT_INPUT as a parameter, or also the PREPROMPT if valid for that task
    // let outs = await textGenTask(PREPROMPT, PROMPT_INPUT, blanksValues)
    
    let outs = await textGenTask(PREPROMPT, promptArray)
    console.log(outs)

    // insert the model outputs into the paragraph
    await outText.html(outs, false) // false valuereplaces text, true appends text

    let outPic = await textImgTask(promptArray)
    console.log(outPic[1])
    p5.createImage(outPic).parent('#outputDiv')
  }

  p5.draw = function(){
      // 
  }
});