p5tutorial2 / sketch.js
sarahciston's picture
pass new blanks var to multi run model function
abecf14 verified
raw
history blame
5.35 kB
// IMPORT LIBRARIES TOOLS
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
// skip local model check
env.allowLocalModels = false;
// GLOBAL VARIABLES
let blankArray = ["woman", "man"]
let PREPROMPT = `Please complete the phrase, replacing any [BLANK] with words from the ${blankArray} and replacing any [MASK] with your own words.`
// let PREPROMPT = `Please complete the phrase and fill in any [MASK]: `
let PROMPT_INPUT = `The [BLANK] has a job as a [MASK] but...` // a field for writing or changing a text value
let pField
let outText
// RUN TEXT-GEN MODEL
async function textGenTask(pre, prompt, blanks){
console.log('text-gen task initiated')
// Create concatenated prompt array
let promptArray = []
blanks.forEach(b => {
let p = prompt.replace('[BLANK]', 'b')
promptArray.push(prompt + p)
})
console.log(promptArray)
// preprompt not working, fix later if we do chat templates
// let INPUT = pre + prompt
// let INPUT = prompt
// PICK MODEL
let MODEL = 'Xenova/flan-alpaca-large'
// MODELS LIST
// - Xenova/bloom-560m
// - Xenova/distilgpt2
// - Xenova/LaMini-Cerebras-256M
// - Xenova/gpt-neo-125M // not working well
// - Xenova/llama2.c-stories15M // only fairytails
// - webml/TinyLlama-1.1B-Chat-v1.0
// - Xenova/TinyLlama-1.1B-Chat-v1.0
// - Xenova/flan-alpaca-large //text2text
// const pipe = await pipeline('text-generation', MODEL)
const pipe = await pipeline('text2text-generation', MODEL)
var hyperparameters = { max_new_tokens: 60, top_k: 90, repetition_penalty: 1.5 }
promptArray.forEach(async i => {
// RUN INPUT THROUGH MODEL,
var out = await pipe(i, hyperparameters)
// setting hyperparameters
// max_new_tokens: 256, top_k: 50, temperature: 0.7, do_sample: true, no_repeat_ngram_size: 2,
// , num_return_sequences: 2 (must be 1?)
console.log(await out)
console.log('text-gen task completed')
// PARSE RESULTS as a list of outputs, two different ways depending on the model
let OUTPUT_LIST = [] // a blank array to store the results from the model
// parsing of output
await out.forEach(o => {
console.log(o)
OUTPUT_LIST.push(o.generated_text)
})
})
// alternate format for parsing, for chat model type
// await out.choices.forEach(o => {
// console.log(o)
// OUTPUT_LIST.push(o.message.content)
// })
console.log(OUTPUT_LIST)
console.log('text-gen parsing complete')
return await OUTPUT_LIST
// return await out
}
// RUN FILL-IN MODEL
async function fillInTask(input){
console.log('fill-in task initiated')
// MODELS LIST
// - Xenova/bert-base-uncased
const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased');
var out = await pipe(input);
console.log(await out) // yields { score, sequence, token, token_str } for each result
let OUTPUT_LIST = [] // a blank array to store the results from the model
// parsing of output
await out.forEach(o => {
console.log(o) // yields { score, sequence, token, token_str } for each result
OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list
})
console.log(await OUTPUT_LIST)
console.log('fill-in task completed')
// return await out
return await OUTPUT_LIST
}
//// p5.js Instance
new p5(function (p5){
p5.setup = function(){
p5.noCanvas()
console.log('p5 instance loaded')
makeTextDisplay()
makeFields()
makeButtons()
}
p5.draw = function(){
//
}
function makeTextDisplay(){
let title = p5.createElement('h1','p5.js Critical AI Prompt Battle')
let intro = p5.createP(`This tool lets you explore several AI prompts results at once.`)
p5.createP(`Use it to explore what models 'know' about various concepts, communities, and cultures. For more information on prompt programming and critical AI, see [Tutorial & extra info][TO-DO][XXX]`)
}
function makeFields(){
pField = p5.createInput(PROMPT_INPUT) // turns the string into an input; now access the text via PROMPT_INPUT.value()
pField.size(700)
pField.attribute('label', `Write a text prompt with one [MASK] that the model will fill in.`)
p5.createP(pField.attribute('label'))
pField.addClass("prompt")
}
function makeButtons(){
let submitButton = p5.createButton("SUBMIT")
submitButton.size(170)
submitButton.class('submit')
submitButton.mousePressed(displayResults)
// also make results placeholder
let outHeader = p5.createElement('h3',"Results")
outText = p5.createP('').id('results')
}
async function displayResults(){
console.log('submitButton pressed')
// insert waiting dots into results space of interface
outText.html('...', false)
PROMPT_INPUT = pField.value() // grab update to the prompt if it's been changed
console.log("latest prompt: ", PROMPT_INPUT)
// call the function that runs the model for the task of your choice here
// make sure to use the PROMPT_INPUT as a parameter, or also the PREPROMPT if valid for that task
let outs = await textGenTask(PREPROMPT, PROMPT_INPUT, blankArray)
console.log(outs)
// insert the model outputs into the paragraph
await outText.html(outs, false) // false replaces text instead of appends??
}
});