Update app.py
Browse files
app.py
CHANGED
@@ -1,84 +1,58 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
from
|
4 |
-
from llama_cpp import Llama
|
5 |
-
import sys
|
6 |
-
from io import StringIO
|
7 |
-
from transformers import pipeline
|
8 |
-
from datasets import load_dataset
|
9 |
-
import torch
|
10 |
-
import json
|
11 |
-
from pathlib import Path
|
12 |
-
|
13 |
-
repo_name = "TheBloke/Mistral-7B-v0.1-GGUF"
|
14 |
-
model_file = "mistral-7b-v0.1.Q4_K_M.gguf"
|
15 |
-
|
16 |
-
|
17 |
-
print('Fetching model:', repo_name, model_file)
|
18 |
-
snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_file)
|
19 |
-
print('Done fetching model:')
|
20 |
-
|
21 |
-
DEFAULT_MODEL_PATH = model_file
|
22 |
-
|
23 |
-
llm = Llama(model_path=model_file, n_gpu_layers=100, n_ctx=2048)
|
24 |
-
|
25 |
-
|
26 |
-
# Global flag for first run
|
27 |
-
isFirstRun = True
|
28 |
-
context = ""
|
29 |
-
history = [] # Initialize history
|
30 |
-
initContext = """<|im_start|>You are playing the role of an aggressive patient called Barry Lawson. Barry has been waiting for hours at the hospital with severe pain in his chest and hasn't been seen yet. You are incredibly frustrated and angry that things are taking so long, you will only calm down if your concerns are addressed. Waiting will only make you more angry and aggressive towards the staff. At any moment you could break into a RAGE! Make sure you don't censor swear words. Make sure to explain your emotional state and thinking before responding, for example, Barry: (tired of waiting and very angry) What the fuck do I have to do around here to get some treatment!"""
|
31 |
-
feedback_file = Path("/content/datalog.json")
|
32 |
-
|
33 |
-
def AIPatient(message):
|
34 |
-
|
35 |
-
global isFirstRun, history,context
|
36 |
-
|
37 |
-
if isFirstRun:
|
38 |
-
context = initContext
|
39 |
-
isFirstRun = False
|
40 |
-
#else:
|
41 |
-
#for turn in history:
|
42 |
-
# context += f"\n<|im_start|> Nurse: {turn[0]}\n<|im_start|> Barry: {turn[1]}"
|
43 |
-
context += """
|
44 |
-
<|im_start|>nurse
|
45 |
-
Nurse: """+message+"""
|
46 |
-
<|im_start|>barry
|
47 |
-
Barry:
|
48 |
-
"""
|
49 |
-
|
50 |
-
response = ""
|
51 |
-
# Here, you should add the code to generate the response using your model
|
52 |
-
# For example:
|
53 |
-
while(len(response) < 1):
|
54 |
-
output = llm(context, max_tokens=400, stop=["Nurse:"], echo=False)
|
55 |
-
response = output["choices"][0]["text"]
|
56 |
-
response = response.strip()
|
57 |
-
|
58 |
-
#with feedback_file.open("a") as f:
|
59 |
-
# f.write(json.dumps({"Nurse": message, "Barry": response},indent=4))
|
60 |
-
# f.write("\n")
|
61 |
-
|
62 |
-
context += response
|
63 |
-
print (context)
|
64 |
-
|
65 |
-
history.append((message,response))
|
66 |
-
return history
|
67 |
-
|
68 |
-
with gr.Blocks() as demo:
|
69 |
-
gr.Markdown("# AI Patient Chatbot")
|
70 |
-
with gr.Group():
|
71 |
-
with gr.Tab("Patient Chatbot"):
|
72 |
-
chatbot = gr.Chatbot()
|
73 |
-
message = gr.Textbox(label="Enter your message to Barry", placeholder="Type here...", lines=2)
|
74 |
-
send_message = gr.Button("Submit")
|
75 |
-
send_message.click(AIPatient, inputs=[message], outputs=[chatbot])
|
76 |
-
save_chatlog = gr.Button("Save Chatlog")
|
77 |
-
#send_message.click(SaveChatlog, inputs=[message], outputs=[chatbot])
|
78 |
-
|
79 |
-
|
80 |
-
#message.submit(AIPatient, inputs=[message], outputs=[chatbot])
|
81 |
-
|
82 |
-
demo.launch(debug=True)
|
83 |
|
|
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain.prompts import PromptTemplate
|
3 |
+
from langchain.llms import CTransformers
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
+
## Function To get response from LLAma 2 model
|
6 |
|
7 |
+
def getLLamaresponse(input_text,no_words,blog_style):
|
8 |
+
|
9 |
+
### LLama2 model
|
10 |
+
llm=CTransformers(model='TheBloke/OpenHermes-2.5-Mistral-7B-GGUF',
|
11 |
+
model_type='llama',
|
12 |
+
config={'max_new_tokens':256,
|
13 |
+
'temperature':0.01})
|
14 |
+
|
15 |
+
## Prompt Template
|
16 |
+
|
17 |
+
template="""
|
18 |
+
Write a blog for {blog_style} job profile for a topic {input_text}
|
19 |
+
within {no_words} words.
|
20 |
+
"""
|
21 |
+
|
22 |
+
prompt=PromptTemplate(input_variables=["blog_style","input_text",'no_words'],
|
23 |
+
template=template)
|
24 |
+
|
25 |
+
## Generate the ressponse from the LLama 2 model
|
26 |
+
response=llm(prompt.format(blog_style=blog_style,input_text=input_text,no_words=no_words))
|
27 |
+
print(response)
|
28 |
+
return response
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
st.set_page_config(page_title="Generate Blogs",
|
36 |
+
page_icon='🤖',
|
37 |
+
layout='centered',
|
38 |
+
initial_sidebar_state='collapsed')
|
39 |
+
|
40 |
+
st.header("Generate Blogs 🤖")
|
41 |
+
|
42 |
+
input_text=st.text_input("Enter the Blog Topic")
|
43 |
+
|
44 |
+
## creating to more columns for additonal 2 fields
|
45 |
+
|
46 |
+
col1,col2=st.columns([5,5])
|
47 |
+
|
48 |
+
with col1:
|
49 |
+
no_words=st.text_input('No of Words')
|
50 |
+
with col2:
|
51 |
+
blog_style=st.selectbox('Writing the blog for',
|
52 |
+
('Researchers','Data Scientist','Common People'),index=0)
|
53 |
+
|
54 |
+
submit=st.button("Generate")
|
55 |
+
|
56 |
+
## Final response
|
57 |
+
if submit:
|
58 |
+
st.write(getLLamaresponse(input_text,no_words,blog_style))
|