Spaces:
Running
on
Zero
Running
on
Zero
skip original prompt
Browse files
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
title: Google Gemma 7b
|
3 |
-
emoji:
|
4 |
colorFrom: purple
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
|
|
1 |
---
|
2 |
title: Google Gemma 7b
|
3 |
+
emoji: 🚀
|
4 |
colorFrom: purple
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
app.py
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
|
4 |
-
|
5 |
-
tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")
|
6 |
-
model = AutoModelForCausalLM.from_pretrained("google/gemma-7b")
|
7 |
-
streamer = TextStreamer(tokenizer)
|
8 |
|
9 |
|
10 |
def generate(inputs):
|
|
|
1 |
import gradio as gr
|
2 |
+
import os
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
|
4 |
+
token = os.environ["HF_TOKEN"]
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b",token=token)
|
6 |
+
model = AutoModelForCausalLM.from_pretrained("google/gemma-7b",token=token)
|
7 |
+
streamer = TextStreamer(tokenizer,skip_prompt=True)
|
8 |
|
9 |
|
10 |
def generate(inputs):
|