sarahciston commited on
Commit
8c46069
1 Parent(s): f44657d

try Xenova/OpenELM-270M-Instruct model

Browse files
Files changed (1) hide show
  1. sketch.js +17 -2
sketch.js CHANGED
@@ -21,9 +21,21 @@ async function textGenTask(pre, prompt){
21
  let INPUT = prompt
22
 
23
  // PICK MODEL
24
- let MODEL = 'Xenova/llama2.c-stories15M'
25
- // const = modelsList = ['Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0', 'Xenova/distilgpt2', 'Xenova/bloom-560m']
 
 
 
 
 
 
 
 
 
 
 
26
 
 
27
  const pipe = await pipeline('text-generation', MODEL)
28
 
29
  // RUN INPUT THROUGH MODEL,
@@ -61,6 +73,9 @@ async function textGenTask(pre, prompt){
61
  async function fillInTask(input){
62
  console.log('fill-in task initiated')
63
 
 
 
 
64
  const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased');
65
 
66
  var out = await pipe(input);
 
21
  let INPUT = prompt
22
 
23
  // PICK MODEL
24
+ let MODEL = 'Xenova/OpenELM-270M-Instruct'
25
+
26
+ // MODELS LIST
27
+ // - Xenova/bloom-560m
28
+ // - Xenova/distilgpt2
29
+ // - Xenova/LaMini-Cerebras-256M
30
+ // - Xenova/gpt-neo-125M
31
+ // - Xenova/OpenELM-270M-Instruct
32
+ // - Xenova/llama2.c-stories15M
33
+ // - webml/TinyLlama-1.1B-Chat-v1.0
34
+ // - Xenova/TinyLlama-1.1B-Chat-v1.0
35
+ // - Xenova/stablelm-2-zephyr-1_6b
36
+ // - Felladrin/onnx-Llama-160M-Chat-v1
37
 
38
+
39
  const pipe = await pipeline('text-generation', MODEL)
40
 
41
  // RUN INPUT THROUGH MODEL,
 
73
  async function fillInTask(input){
74
  console.log('fill-in task initiated')
75
 
76
+ // MODELS LIST
77
+ // - Xenova/bert-base-uncased
78
+
79
  const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased');
80
 
81
  var out = await pipe(input);