alexkueck commited on
Commit
56f16a2
1 Parent(s): 724c4ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -10
app.py CHANGED
@@ -1,13 +1,36 @@
1
- import gradio as gr
 
 
 
2
 
 
 
 
3
 
4
- #demo = gr.load("mistralai/Mixtral-8x7B-Instruct-v0.1", src="models")
 
5
 
6
- description = "Test"
7
- title = "Test"
8
- interface = gr.Interface.load("mistralai/Mixtral-8x7B-Instruct-v0.1",
9
- description=description,
10
- title = title,
11
- examples=[["american robin"]]
12
- )
13
- interface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from huggingface_hub import InferenceClient, login
3
+ from transformers import AutoTokenizer
4
+ from langchain.chat_models import ChatOpenAI
5
 
6
+ # access token with permission to access the model and PRO subscription
7
+ hf_token = "YOUR_HF_TOKEN" # <https://huggingface.co/settings/tokens>
8
+ login(token=hf_token)
9
 
10
+ # tokenizer for generating prompt
11
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-70b-chat-hf")
12
 
13
+ # inference client
14
+ client = InferenceClient("https://api-inference.huggingface.co/models/meta-llama/Llama-2-70b-chat-hf")
15
+
16
+ # generate function
17
+ def generate(text):
18
+ payload = tokenizer.apply_chat_template([{"role":"user","content":text}],tokenize=False)
19
+ res = client.text_generation(
20
+ payload,
21
+ do_sample=True,
22
+ return_full_text=False,
23
+ max_new_tokens=2048,
24
+ top_p=0.9,
25
+ temperature=0.6,
26
+ )
27
+ return res.strip()
28
+
29
+ # test client
30
+ assert generate("What is 2+2?") == "The answer to 2+2 is 4."
31
+
32
+ # create evaluator
33
+ os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY" # https://platform.openai.com/account/api-keys
34
+ assert os.environ.get("OPENAI_API_KEY") is not None, "Please set OPENAI_API_KEY environment variable"
35
+
36
+ evaluation_llm = ChatOpenAI(model="gpt-4")