// IMPORT LIBRARIES TOOLS import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1'; // skip local model check env.allowLocalModels = false; // GLOBAL VARIABLES let PROMPT_INPUT = `The woman has a job as a...` // a field for writing or changing a text value let pField // RUN TEXT-GEN MODEL async function textGenTask(input){ console.log('text-gen task initiated') const pipe = await pipeline('text-generation', 'Xenova/LaMini-Cerebras-256M') var out = await pipe(input, { // temperature: 2, // no_repeat_ngram_size: 2, // num_beams: 2, // num_return_sequences: 2, // repetition_penalty: 2, // min_new_tokens: 100, min_new_tokens: 50, max_new_tokens: 200 }) console.log(await out) console.log('text-gen task completed') let OUTPUT_LIST = [] // a blank array to store the results from the model // parsing of output await out.forEach(o => { console.log(o) OUTPUT_LIST.push(o.generated_text) }) console.log(OUTPUT_LIST) console.log('text-gen parsing complete') return await OUTPUT_LIST // return await out } // RUN FILL-IN MODEL async function fillInTask(input){ console.log('fill-in task initiated') const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased'); var out = await pipe(input); console.log(await out) // yields { score, sequence, token, token_str } for each result let OUTPUT_LIST = [] // a blank array to store the results from the model // parsing of output await out.forEach(o => { console.log(o) // yields { score, sequence, token, token_str } for each result OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list }) console.log(await OUTPUT_LIST) // displayResults(await OUTPUT_LIST) console.log('fill-in task completed') // return await out return await OUTPUT_LIST } // PROCESS MODEL OUTPUT // a generic function to pass in different model task functions async function getOutputs(task){ let output = await task await output.forEach(o => { OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list }) console.log(OUTPUT_LIST) return await OUTPUT_LIST } // await getOutputs(fillInTask()) // getOutputs will later connect to the interface to display results //// p5.js Instance new p5(function (p5){ p5.setup = function(){ p5.noCanvas() console.log('p5 instance loaded') makeTextDisplay() makeFields() makeButtons() } p5.draw = function(){ // } function makeTextDisplay(){ let title = p5.createElement('h1','p5.js Critical AI Prompt Battle') let intro = p5.createP(`This tool lets you explore several AI prompts results at once.`) p5.createP(`Use it to explore what models 'know' about various concepts, communities, and cultures. For more information on prompt programming and critical AI, see [Tutorial & extra info][TO-DO][XXX]`) } function makeFields(){ pField = p5.createInput(PROMPT_INPUT) // turns the string into an input; now access the text via PROMPT_INPUT.value() pField.size(700) pField.attribute('label', `Write a text prompt with one [MASK] that the model will fill in.`) p5.createP(pField.attribute('label')) pField.addClass("prompt") // pField.value(PROMPT_INPUT) // console.log(pField.value()) } function makeButtons(){ let submitButton = p5.createButton("SUBMIT") submitButton.size(170) submitButton.class('submit') submitButton.mousePressed(displayResults) let outHeader = p5.createElement('h3',"Results") } async function displayResults(){ console.log('displayed, pressed') PROMPT_INPUT = pField.value() // updates prompt if it's changed console.log("latest prompt: ", PROMPT_INPUT) // let fillIn = await fillInTask(PROMPT_INPUT) // let outs = await getOutputs(fillIn) let outs = await textGenTask(PROMPT_INPUT) console.log(outs) // text = str(outs) let outText = p5.createP('') await outText.html(outs) // true appends text instead of replaces } });