prithivMLmods commited on
Commit
e8bb21f
1 Parent(s): d8f2c61

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -57
app.py CHANGED
@@ -1,33 +1,14 @@
1
- import os
2
  import gradio as gr
3
- import google.generativeai as genai
4
- from dotenv import load_dotenv
5
- import time
6
-
7
- DESCRIPTIONx = """## GEMINI::GEN ♊
8
- """
9
 
10
  css = '''
11
- .gradio-container{max-width: 680px !important}
12
  h1{text-align:center}
13
  footer {
14
  visibility: hidden
15
  }
16
  '''
17
-
18
- load_dotenv()
19
-
20
- GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
21
-
22
- genai.configure(api_key=GEMINI_API_KEY)
23
-
24
- generation_config = {
25
- "temperature": 0.7,
26
- "top_p": 0.95,
27
- "top_k": 64,
28
- "max_output_tokens": 512,
29
- "response_mime_type": "text/plain",
30
- }
31
 
32
  mood_prompts = {
33
  "Fun": "Respond in a light-hearted, playful manner.",
@@ -80,40 +61,60 @@ mood_prompts = {
80
  "Worried": "Respond with concern and apprehension."
81
  }
82
 
83
- def generate_response(user_input, chat_history, mood):
84
-
85
- updated_system_content = f"{mood_prompts[mood]}"
86
-
87
- model = genai.GenerativeModel(
88
- model_name="gemini-1.5-pro",
89
- generation_config=generation_config,
90
- system_instruction=updated_system_content,
91
- )
92
-
93
- chat_history.append(user_input)
94
- chat_history = chat_history[-10:]
95
-
96
- try:
97
- chat_session = model.start_chat()
98
- response = chat_session.send_message("\n".join(chat_history))
99
- return response.text, chat_history
100
-
101
- except Exception as e:
102
- return f"Error: {str(e)}", chat_history
103
-
104
- with gr.Blocks(css=css, theme="allenai/gradio-theme") as iface:
105
- gr.Markdown(DESCRIPTIONx)
106
- chat_input = gr.Textbox(lines=4, label="Chatbot", placeholder="Enter your message here...")
107
- chat_history_state = gr.State([])
108
- response_output = gr.Textbox(label="Response", lines=4)
109
-
110
- generate_button = gr.Button("Submit")
111
- mood_selector = gr.Radio(choices=list(mood_prompts.keys()), value="Professional", label="Select Mood")
112
-
113
- generate_button.click(
114
- fn=generate_response,
115
- inputs=[chat_input, chat_history_state, mood_selector],
116
- outputs=[response_output, chat_history_state]
117
  )
118
 
119
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import InferenceClient
2
  import gradio as gr
 
 
 
 
 
 
3
 
4
  css = '''
5
+ .gradio-container{max-width: 1000px !important}
6
  h1{text-align:center}
7
  footer {
8
  visibility: hidden
9
  }
10
  '''
11
+ client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  mood_prompts = {
14
  "Fun": "Respond in a light-hearted, playful manner.",
 
61
  "Worried": "Respond with concern and apprehension."
62
  }
63
 
64
+ def format_prompt(message, history, system_prompt=None, mood=None):
65
+ prompt = "<s>"
66
+ if mood:
67
+ mood_description = mood_prompts.get(mood, "")
68
+ prompt += f"[SYS] {mood_description} [/SYS] "
69
+ for user_prompt, bot_response in history:
70
+ prompt += f"[INST] {user_prompt} [/INST]"
71
+ prompt += f" {bot_response}</s> "
72
+ if system_prompt:
73
+ prompt += f"[SYS] {system_prompt} [/SYS]"
74
+ prompt += f"[INST] {message} [/INST]"
75
+ return prompt
76
+
77
+ def generate(
78
+ prompt, history, system_prompt=None, mood=None, temperature=0.2, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0,
79
+ ):
80
+ temperature = float(temperature)
81
+ if temperature < 1e-2:
82
+ temperature = 1e-2
83
+ top_p = float(top_p)
84
+
85
+ generate_kwargs = dict(
86
+ temperature=temperature,
87
+ max_new_tokens=max_new_tokens,
88
+ top_p=top_p,
89
+ repetition_penalty=repetition_penalty,
90
+ do_sample=True,
91
+ seed=42,
 
 
 
 
 
 
92
  )
93
 
94
+ formatted_prompt = format_prompt(prompt, history, system_prompt, mood)
95
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
96
+ output = ""
97
+
98
+ for response in stream:
99
+ output += response.token.text
100
+ yield output
101
+ return output
102
+
103
+ def gradio_interface():
104
+ with gr.Blocks(css=css, theme="allenai/gradio-theme") as demo:
105
+ with gr.Row():
106
+ mood = gr.Dropdown(choices=list(mood_prompts.keys()), label="Response Mood", value="Friendly")
107
+ history = gr.State([])
108
+ system_prompt = gr.Textbox(placeholder="System prompt (optional)", lines=1)
109
+ prompt = gr.Textbox(placeholder="Enter your message", lines=2)
110
+ generate_btn = gr.Button("Generate")
111
+ output = gr.Chatbot()
112
+
113
+ generate_btn.click(
114
+ generate,
115
+ inputs=[prompt, history, system_prompt, mood],
116
+ outputs=[output]
117
+ )
118
+ demo.queue().launch(show_api=False)
119
+
120
+ gradio_interface()