Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,16 +6,13 @@ from peft import PeftModel
|
|
6 |
|
7 |
#pipe = pipeline("text-generation", model="furquan/opt_2_7_b_prompt_tuned_sentiment_analysis", trust_remote_code=True, cache_dir="/local/home/furquanh/myProjects/week12/").to('cuda')
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", token="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")
|
12 |
-
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
13 |
|
14 |
-
model = PeftModel.from_pretrained(model, "furquan/llama2-sentiment-prompt-tuned")
|
15 |
|
16 |
|
17 |
title = "OPT-1.3B"
|
18 |
-
description = "This demo uses meta's
|
19 |
article = "<p style='text-align: center'><a href='https://arxiv.org/pdf/2104.08691.pdf' target='_blank'>The Power of Scale for Parameter-Efficient Prompt Tuning</a></p>"
|
20 |
|
21 |
|
|
|
6 |
|
7 |
#pipe = pipeline("text-generation", model="furquan/opt_2_7_b_prompt_tuned_sentiment_analysis", trust_remote_code=True, cache_dir="/local/home/furquanh/myProjects/week12/").to('cuda')
|
8 |
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained("furquan/opt-1-3b-prompt-tuned-sentiment-analysis", trust_remote_code=True)
|
10 |
+
model = AutoModel.from_pretrained("furquan/opt-1-3b-prompt-tuned-sentiment-analysis", trust_remote_code=True)
|
|
|
|
|
11 |
|
|
|
12 |
|
13 |
|
14 |
title = "OPT-1.3B"
|
15 |
+
description = "This demo uses meta's OPT-1.3B Causal LM as base model that was prompt tuned on the Stanford Sentiment Treebank dataset to only output the sentiment of a given text."
|
16 |
article = "<p style='text-align: center'><a href='https://arxiv.org/pdf/2104.08691.pdf' target='_blank'>The Power of Scale for Parameter-Efficient Prompt Tuning</a></p>"
|
17 |
|
18 |
|