Aditya0619 commited on
Commit
9910dfa
1 Parent(s): c4054ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -34
app.py CHANGED
@@ -4,74 +4,75 @@ from transformers import pipeline
4
  # Initialize the conversational model pipeline
5
  chatbot_pipeline = pipeline("text-generation", model="Aditya0619/Medbot")
6
 
7
- # Function to manage history and generate responses
8
  def respond(message, history, system_message, max_tokens, temperature, top_p):
9
- # Initialize history if it's None
10
  if history is None:
11
  history = []
12
 
13
- # Build input by concatenating past messages
14
  chat_input = ""
15
  for user_input, bot_response in history:
16
  chat_input += f"User: {user_input}\nBot: {bot_response}\n"
17
  chat_input += f"User: {message}\nBot:"
18
 
19
- # Generate a response using the pipeline
20
  response = chatbot_pipeline(
21
  chat_input,
22
  max_length=max_tokens,
23
  temperature=temperature,
24
  top_p=top_p,
25
- pad_token_id=50256 # Avoid padding issues for models like GPT-2 variants
26
  )[0]["generated_text"].split("Bot:")[-1].strip()
27
 
28
- # Update the conversation history
29
  history.append((message, response))
30
-
31
- # Return the updated chat history
32
  return history, history
33
 
34
- # Define the Gradio app layout
 
 
 
 
 
 
 
 
 
35
  with gr.Blocks() as demo:
36
- gr.Markdown("# 🤖 AI Chatbot with Memory\nChat with me! I’ll remember your messages.")
37
 
38
- # Taskbar with configurable parameters
39
  with gr.Row():
40
  with gr.Accordion("⚙️ Configure Chatbot Settings", open=False):
41
- system_message = gr.Textbox(
42
- label="System Message (Optional)",
43
- placeholder="e.g., You are a helpful assistant."
44
- )
45
- max_tokens = gr.Slider(
46
- label="Max Tokens", minimum=50, maximum=500, value=250, step=10
47
- )
48
- temperature = gr.Slider(
49
- label="Temperature", minimum=0.0, maximum=1.0, value=0.7, step=0.1
50
- )
51
- top_p = gr.Slider(
52
- label="Top P", minimum=0.0, maximum=1.0, value=0.9, step=0.1
53
- )
54
 
55
- # Chatbot and user input section
56
  chatbot = gr.Chatbot(label="Chat with AI")
57
  user_input = gr.Textbox(label="Your Message", placeholder="Type a message...", lines=2)
58
-
59
- # Hidden state to store the conversation history
60
- state = gr.State([])
61
-
62
- # Submit button
63
  submit = gr.Button("Send")
64
 
65
- # Connect user input to the chatbot response function
66
  submit.click(
67
  respond,
68
  inputs=[user_input, state, system_message, max_tokens, temperature, top_p],
69
  outputs=[chatbot, state]
70
  )
71
 
72
- # Display an initial greeting message
73
  demo.load(lambda: [("Hi! How can I assist you today?", "")], outputs=chatbot)
74
 
75
- # Launch the Gradio app
76
- demo.launch()
 
 
 
 
 
 
 
 
77
 
 
4
  # Initialize the conversational model pipeline
5
  chatbot_pipeline = pipeline("text-generation", model="Aditya0619/Medbot")
6
 
7
+ # Chatbot response function
8
  def respond(message, history, system_message, max_tokens, temperature, top_p):
 
9
  if history is None:
10
  history = []
11
 
12
+ # Build conversation context
13
  chat_input = ""
14
  for user_input, bot_response in history:
15
  chat_input += f"User: {user_input}\nBot: {bot_response}\n"
16
  chat_input += f"User: {message}\nBot:"
17
 
18
+ # Generate response
19
  response = chatbot_pipeline(
20
  chat_input,
21
  max_length=max_tokens,
22
  temperature=temperature,
23
  top_p=top_p,
24
+ pad_token_id=50256 # Avoid padding issues with GPT-2 models
25
  )[0]["generated_text"].split("Bot:")[-1].strip()
26
 
27
+ # Update history
28
  history.append((message, response))
 
 
29
  return history, history
30
 
31
+ # API function to expose chatbot responses programmatically
32
+ def api_chat(message, history=None):
33
+ if history is None:
34
+ history = []
35
+ updated_history, _ = respond(
36
+ message, history, "", max_tokens=250, temperature=0.7, top_p=0.9
37
+ )
38
+ return {"response": updated_history[-1][1], "history": updated_history}
39
+
40
+ # Gradio UI layout
41
  with gr.Blocks() as demo:
42
+ gr.Markdown("# 🤖 AI Chatbot with API Access\nChat with AI or use the API!")
43
 
44
+ # Configurable parameters in an accordion menu
45
  with gr.Row():
46
  with gr.Accordion("⚙️ Configure Chatbot Settings", open=False):
47
+ system_message = gr.Textbox(label="System Message (Optional)", placeholder="e.g., You are a helpful assistant.")
48
+ max_tokens = gr.Slider(label="Max Tokens", minimum=50, maximum=500, value=250, step=10)
49
+ temperature = gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, value=0.7, step=0.1)
50
+ top_p = gr.Slider(label="Top P", minimum=0.0, maximum=1.0, value=0.9, step=0.1)
 
 
 
 
 
 
 
 
 
51
 
52
+ # Chatbot interface and user input field
53
  chatbot = gr.Chatbot(label="Chat with AI")
54
  user_input = gr.Textbox(label="Your Message", placeholder="Type a message...", lines=2)
55
+ state = gr.State([]) # Store conversation history
 
 
 
 
56
  submit = gr.Button("Send")
57
 
58
+ # Link input to chatbot response
59
  submit.click(
60
  respond,
61
  inputs=[user_input, state, system_message, max_tokens, temperature, top_p],
62
  outputs=[chatbot, state]
63
  )
64
 
65
+ # Initial greeting message
66
  demo.load(lambda: [("Hi! How can I assist you today?", "")], outputs=chatbot)
67
 
68
+ # Launch Gradio app and print the hosted link in terminal
69
+ print("Launching the Gradio app...")
70
+
71
+ ui_url = demo.launch(share=True, server_name="0.0.0.0", server_port=7860)
72
+ print(f"App hosted at: {ui_url}")
73
+
74
+ # API endpoint setup with Gradio
75
+ api = gr.Interface(fn=api_chat, inputs=[gr.Textbox(), gr.State([])], outputs="json")
76
+ api_url = api.launch(share=True, server_name="0.0.0.0", server_port=7861)
77
+ print(f"API hosted at: {api_url}")
78