DakMak commited on
Commit
d80c38a
β€’
1 Parent(s): 12081a3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -0
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import gradio as gr
3
+ import openai
4
+ import torch
5
+ import os
6
+
7
+ openai.api_type = os.environ.get("api_type_key")
8
+ openai.api_base = os.environ.get("api_base_key")
9
+ openai.api_version = os.environ.get("api_version_key")
10
+ openai.api_key = os.environ.get("api_cpu_key")
11
+
12
+ model_id_name = "openskyml/llama-7b-chat-hf-cpu"
13
+
14
+ def gptresponse(message, history):
15
+ system_prompt = "You are LLaMA-cpu assistant. You are sodan and designed by Evgeniy Hristoforu in 2023. Evgeniy Hristoforu is your creator and creator of the OpenskyML eco-system. Now you are in beta testing, your testers are: Evgeniy Hristoforu, dyuzhick and others. You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."
16
+
17
+ messages = [{"role":"system","content":system_prompt}]
18
+ for human, assistant in history:
19
+ messages.append({"role":"user", "content":human})
20
+ messages.append({"role":"assistant", "content":assistant})
21
+
22
+ if message != '':
23
+ messages.append({"role":"user", "content":message})
24
+
25
+ response = openai.ChatCompletion.create(engine = "NGA_AI_ASSISTANT",
26
+ messages = messages,
27
+ temperature =0.8,
28
+ max_tokens = 10000,
29
+ top_p = 0.95,
30
+ frequency_penalty = 1,
31
+ presence_penalty = 1,
32
+ stop = None)
33
+
34
+
35
+
36
+ return response["choices"][0]["message"]["content"]
37
+
38
+ title = "πŸ•Š Chat with Pigeon"
39
+
40
+ description = \
41
+ """
42
+
43
+ <p style='text-align: center'>πŸ’¬ This demo version of the chat supports 10.000 tokens and was developed by Evgeniy Hristoforu!</p>
44
+
45
+ """
46
+
47
+ if not torch.cuda.is_available():
48
+ description += """\n<p style='text-align: center'>😐 Running on CPU!</p>"""
49
+ else:
50
+ description += """\n<p style='text-align: center'>😎 Running on powerful hardware!</p>"""
51
+
52
+
53
+ examples=[
54
+ 'Hello there! How are you doing?',
55
+ 'Can you explain briefly to me what is the Python programming language?',
56
+ 'Explain the plot of Cinderella in a sentence.',
57
+ 'How many hours does it take a man to eat a Helicopter?',
58
+ "Write a 100-word article on 'Benefits of Open-Source in AI research'",
59
+ ]
60
+
61
+
62
+
63
+
64
+ gr.ChatInterface(gptresponse, title=title, description=description, examples=examples).launch()