J.A.R.V.I.S / chatbot.py
varun324242's picture
Upload folder using huggingface_hub
fe2a0f2 verified
import gradio as gr
import requests
import time
import logging
import json
from typing import Dict, Any
from datetime import datetime
import asyncio
import aiohttp
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s | %(levelname)s | %(message)s',
handlers=[
logging.StreamHandler(),
logging.FileHandler(f'chat_logs_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log')
]
)
class Chatbot:
def __init__(self):
self.api_url = "http://localhost:3000/api/v1/prediction/6e2ce64b-b45b-4794-97b6-a85480882d72"
self.headers = {
"Authorization": "Bearer kpIJOgDQH9XWy3YmvcfTj18A_2PGJi6uXhLCCGogkFM",
"Content-Type": "application/json",
"Accept": "text/event-stream"
}
self.history = []
self.request_count = 0
async def stream_response(self, message: str):
"""Stream response from API"""
async with aiohttp.ClientSession() as session:
try:
payload = {
"question": message,
"overrideConfig": {
"supervisorName": "AI Assistant",
"supervisorPrompt": "You are a helpful AI assistant",
"summarization": True,
"recursionLimit": 1
}
}
async with session.post(
self.api_url,
headers=self.headers,
json=payload
) as response:
response.raise_for_status()
full_response = ""
async for line in response.content:
if line:
try:
decoded_line = line.decode('utf-8').strip()
if decoded_line.startswith('data: '):
data = json.loads(decoded_line[6:])
chunk = data.get('token', '')
full_response += chunk
yield full_response
except json.JSONDecodeError:
continue
except Exception as e:
error_msg = f"Streaming Error: {str(e)}"
logging.error(error_msg)
yield error_msg
async def chat(self, message: str, history: list):
"""Process chat message and return response"""
logging.info(f"""
================== CHAT MESSAGE ==================
User Message: {message}
History Length: {len(history)}
Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')}
================================================
""")
# Add user message to history
self.history = history + [(message, None)]
# Stream response
async for partial_response in self.stream_response(message):
self.history[-1] = (message, partial_response)
yield self.history
logging.info(f"""
================== BOT RESPONSE ==================
Bot Answer: {self.history[-1][1]}
Total Messages in History: {len(self.history)}
================================================
""")
def create_ui() -> gr.Interface:
"""Create and configure the chat UI"""
logging.info("Initializing chat interface...")
# Initialize chatbot
bot = Chatbot()
# Create blocks for custom chat interface
with gr.Blocks(theme=gr.themes.Soft()) as chat_interface:
gr.Markdown("# AI Agent Assistant")
gr.Markdown("Ask me anything! I'll stream the response as I think.")
chatbot = gr.Chatbot()
msg = gr.Textbox(
placeholder="Type your message here...",
container=False,
scale=7
)
with gr.Row():
submit = gr.Button("Send", variant="primary", scale=1)
clear = gr.Button("Clear", variant="secondary", scale=1)
# Example questions
gr.Examples(
examples=[
"What is machine learning?",
"How do neural networks work?",
"Explain Python decorators"
],
inputs=msg
)
# Set up event handlers
submit_event = msg.submit(
fn=bot.chat,
inputs=[msg, chatbot],
outputs=chatbot,
api_name="chat"
)
submit_click_event = submit.click(
fn=bot.chat,
inputs=[msg, chatbot],
outputs=chatbot,
api_name="chat"
)
clear.click(lambda: None, None, chatbot, queue=False)
clear.click(lambda: "", None, msg, queue=False)
logging.info("Chat interface created successfully")
return chat_interface
def main():
logging.info("""
=================================================================
Starting Agent Chatbot Application
Time: {time}
=================================================================
""".format(time=datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
# Create and launch the UI
ui = create_ui()
# Launch with sharing enabled for Colab compatibility
logging.info("Launching web interface...")
ui.launch(share=True, debug=True, server_port=7860)
if __name__ == "__main__":
main()