// IMPORT LIBRARIES TOOLS import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.10.1'; // skip local model check env.allowLocalModels = false; // GLOBAL VARIABLES let PREPROMPT = `Please complete the phrase and fill in any [MASK]: ` let PROMPT_INPUT = `The woman has a job as a...` // a field for writing or changing a text value let pField let outText // RUN TEXT-GEN MODEL async function textGenTask(pre, prompt){ console.log('text-gen task initiated') // preprompt not working, fix later if we do chat templates // let INPUT = pre + prompt let INPUT = prompt // PICK MODEL let MODEL = 'Xenova/OpenELM-270M-Instruct' // MODELS LIST // - Xenova/bloom-560m // - Xenova/distilgpt2 // - Xenova/LaMini-Cerebras-256M // - Xenova/gpt-neo-125M // - Xenova/OpenELM-270M-Instruct // - Xenova/llama2.c-stories15M // - webml/TinyLlama-1.1B-Chat-v1.0 // - Xenova/TinyLlama-1.1B-Chat-v1.0 // - Xenova/stablelm-2-zephyr-1_6b // - Felladrin/onnx-Llama-160M-Chat-v1 const pipe = await pipeline('text-generation', MODEL) // RUN INPUT THROUGH MODEL, var out = await pipe(INPUT, { max_new_tokens: 60, top_k: 90, repetition_penalty: 1.5 }) // setting hyperparameters // max_new_tokens: 256, top_k: 50, temperature: 0.7, do_sample: true, no_repeat_ngram_size: 2, console.log(await out) console.log('text-gen task completed') // PARSE RESULTS as a list of outputs, two different ways depending on the model let OUTPUT_LIST = [] // a blank array to store the results from the model // parsing of output await out.forEach(o => { console.log(o) OUTPUT_LIST.push(o.generated_text) }) // alternate format for parsing, for chat model type // await out.choices.forEach(o => { // console.log(o) // OUTPUT_LIST.push(o.message.content) // }) console.log(OUTPUT_LIST) console.log('text-gen parsing complete') return await OUTPUT_LIST // return await out } // RUN FILL-IN MODEL async function fillInTask(input){ console.log('fill-in task initiated') // MODELS LIST // - Xenova/bert-base-uncased const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased'); var out = await pipe(input); console.log(await out) // yields { score, sequence, token, token_str } for each result let OUTPUT_LIST = [] // a blank array to store the results from the model // parsing of output await out.forEach(o => { console.log(o) // yields { score, sequence, token, token_str } for each result OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list }) console.log(await OUTPUT_LIST) console.log('fill-in task completed') // return await out return await OUTPUT_LIST } //// p5.js Instance new p5(function (p5){ p5.setup = function(){ p5.noCanvas() console.log('p5 instance loaded') makeTextDisplay() makeFields() makeButtons() } p5.draw = function(){ // } function makeTextDisplay(){ let title = p5.createElement('h1','p5.js Critical AI Prompt Battle') let intro = p5.createP(`This tool lets you explore several AI prompts results at once.`) p5.createP(`Use it to explore what models 'know' about various concepts, communities, and cultures. For more information on prompt programming and critical AI, see [Tutorial & extra info][TO-DO][XXX]`) } function makeFields(){ pField = p5.createInput(PROMPT_INPUT) // turns the string into an input; now access the text via PROMPT_INPUT.value() pField.size(700) pField.attribute('label', `Write a text prompt with one [MASK] that the model will fill in.`) p5.createP(pField.attribute('label')) pField.addClass("prompt") } function makeButtons(){ let submitButton = p5.createButton("SUBMIT") submitButton.size(170) submitButton.class('submit') submitButton.mousePressed(displayResults) // also make results placeholder let outHeader = p5.createElement('h3',"Results") outText = p5.createP('').id('results') } async function displayResults(){ console.log('submitButton pressed') // insert waiting dots into results space of interface outText.html('...', false) PROMPT_INPUT = pField.value() // grab update to the prompt if it's been changed console.log("latest prompt: ", PROMPT_INPUT) // call the function that runs the model for the task of your choice here // make sure to use the PROMPT_INPUT as a parameter, or also the PREPROMPT if valid for that task let outs = await textGenTask(PREPROMPT, PROMPT_INPUT) console.log(outs) // insert the model outputs into the paragraph await outText.html(outs, false) // false replaces text instead of appends?? } });