Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -47,22 +47,22 @@ model_name = st.sidebar.radio("Model (only use patching for\nsmall (<4L) models
|
|
47 |
model = HookedTransformer.from_pretrained(model_name)
|
48 |
|
49 |
def predict_next_token(prompt):
|
50 |
-
"""
|
51 |
-
roneneldan/TinyStories-1M
|
52 |
-
roneneldan/TinyStories-3M
|
53 |
-
roneneldan/TinyStories-8M
|
54 |
-
roneneldan/TinyStories-28M
|
55 |
-
roneneldan/TinyStories-33M
|
56 |
-
roneneldan/TinyStories-1Layer-21M
|
57 |
-
roneneldan/TinyStories-2Layers-33M
|
58 |
-
roneneldan/TinyStories-Instruct-1M
|
59 |
-
roneneldan/TinyStories-Instruct-3M
|
60 |
-
roneneldan/TinyStories-Instruct-8M
|
61 |
-
roneneldan/TinyStories-Instruct-28M
|
62 |
-
roneneldan/TinyStories-Instruct-33M
|
63 |
-
roneneldan/TinyStories-Instuct-1Layer-21M
|
64 |
-
roneneldan/TinyStories-Instruct-2Layers-33M
|
65 |
-
"""
|
66 |
logits = model(prompt)[0,-1]
|
67 |
answer_index = logits.argmax()
|
68 |
answer = model.tokenizer.decode(answer_index)
|
|
|
47 |
model = HookedTransformer.from_pretrained(model_name)
|
48 |
|
49 |
def predict_next_token(prompt):
|
50 |
+
"""
|
51 |
+
roneneldan/TinyStories-1M
|
52 |
+
roneneldan/TinyStories-3M
|
53 |
+
roneneldan/TinyStories-8M
|
54 |
+
roneneldan/TinyStories-28M
|
55 |
+
roneneldan/TinyStories-33M
|
56 |
+
roneneldan/TinyStories-1Layer-21M
|
57 |
+
roneneldan/TinyStories-2Layers-33M
|
58 |
+
roneneldan/TinyStories-Instruct-1M
|
59 |
+
roneneldan/TinyStories-Instruct-3M
|
60 |
+
roneneldan/TinyStories-Instruct-8M
|
61 |
+
roneneldan/TinyStories-Instruct-28M
|
62 |
+
roneneldan/TinyStories-Instruct-33M
|
63 |
+
roneneldan/TinyStories-Instuct-1Layer-21M
|
64 |
+
roneneldan/TinyStories-Instruct-2Layers-33M
|
65 |
+
"""
|
66 |
logits = model(prompt)[0,-1]
|
67 |
answer_index = logits.argmax()
|
68 |
answer = model.tokenizer.decode(answer_index)
|