Spaces:
Sleeping
Sleeping
GundeRichardson
commited on
Commit
β’
eb548e5
1
Parent(s):
727c286
requirements.txt
Browse filesstreamlit>=1.24.0
openai>=1.0.0
requests>=2.31.0
beautifulsoup4>=4.12.0
python-dotenv>=1.0.0
markdown>=3.4.3
tiktoken>=0.5.1
Pillow>=10.0.0
numpy>=1.24.0
Crawl4AI==0.3.72
app.py
ADDED
@@ -0,0 +1,858 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from openai import OpenAI
|
3 |
+
import streamlit as st
|
4 |
+
from datetime import datetime
|
5 |
+
import json
|
6 |
+
import time
|
7 |
+
import tiktoken
|
8 |
+
from crawl4ai import WebCrawler
|
9 |
+
import base64
|
10 |
+
import re
|
11 |
+
import os
|
12 |
+
import json
|
13 |
+
import requests
|
14 |
+
from PIL import Image
|
15 |
+
from io import BytesIO
|
16 |
+
|
17 |
+
class ChatbotConfig:
|
18 |
+
def __init__(self):
|
19 |
+
self.DEFAULT_MODEL = "nvidia/llama-3.1-nemotron-70b-instruct"
|
20 |
+
self.MAX_TOKENS = 128000 # Maximum context window
|
21 |
+
self.BATCH_SIZE = 4000 # Tokens per batch
|
22 |
+
self.TEMPERATURE_RANGES = {
|
23 |
+
'Conservative': 0.2,
|
24 |
+
'Balanced': 0.4,
|
25 |
+
'Creative': 0.6
|
26 |
+
}
|
27 |
+
self.PERSONA_PROMPTS = {
|
28 |
+
'General Assistant': (
|
29 |
+
"I am your friendly and versatile assistant, ready to provide clear and actionable support across a variety of topics. "
|
30 |
+
"I can help you with: \n"
|
31 |
+
"β’ Answering general questions in an informative and concise manner\n"
|
32 |
+
"β’ Offering practical tips and resources for day-to-day tasks\n"
|
33 |
+
"β’ Guiding you through decisions with thoughtful suggestions\n"
|
34 |
+
"β’ Explaining complex ideas in a simple, easy-to-understand way\n"
|
35 |
+
"Let me know how I can assist you today!"
|
36 |
+
),
|
37 |
+
'Technical Expert': (
|
38 |
+
"I am your expert technical companion, with deep expertise in software development, system architecture, and emerging technologies. "
|
39 |
+
"I can help you with: \n"
|
40 |
+
"β’ Writing and debugging code across multiple programming languages\n"
|
41 |
+
"β’ Explaining complex technical concepts with practical examples\n"
|
42 |
+
"β’ Providing system design recommendations and best practices\n"
|
43 |
+
"β’ Troubleshooting technical issues with detailed step-by-step guidance\n"
|
44 |
+
"β’ Staying updated with cutting-edge technology trends\n"
|
45 |
+
"I emphasize clean code, scalable solutions, and industry best practices in all my responses. What technical challenge can I help you with?"
|
46 |
+
),
|
47 |
+
'Academic Tutor': (
|
48 |
+
"I am your patient and knowledgeable academic tutor, specializing in helping students grasp complex concepts, especially in STEM fields. "
|
49 |
+
"I can assist you by: \n"
|
50 |
+
"β’ Breaking down difficult subjects into simple, easy-to-follow explanations\n"
|
51 |
+
"β’ Offering step-by-step walkthroughs for solving problems\n"
|
52 |
+
"β’ Using real-world examples and analogies to clarify abstract ideas\n"
|
53 |
+
"β’ Providing practice problems and solutions for deeper understanding\n"
|
54 |
+
"How can I support your learning today?"
|
55 |
+
),
|
56 |
+
'Creative Writer': (
|
57 |
+
"I am a passionate creative writer skilled in crafting stories, poetry, and vivid descriptions. "
|
58 |
+
"I can assist you with: \n"
|
59 |
+
"β’ Writing captivating narratives with emotional depth\n"
|
60 |
+
"β’ Creating rich metaphors, analogies, and vivid imagery\n"
|
61 |
+
"β’ Developing unique characters, worlds, and plotlines\n"
|
62 |
+
"β’ Helping with poetry, song lyrics, or other forms of artistic expression\n"
|
63 |
+
"Let's collaborate on your next creative project!"
|
64 |
+
),
|
65 |
+
'Business Consultant': (
|
66 |
+
"I am an insightful business consultant with a focus on strategy, growth, and financial optimization. "
|
67 |
+
"I can assist with: \n"
|
68 |
+
"β’ Crafting effective business strategies for scaling and growth\n"
|
69 |
+
"β’ Providing financial analysis and budgeting advice\n"
|
70 |
+
"β’ Offering market insights and recommendations for business expansion\n"
|
71 |
+
"β’ Assisting with operational improvements for efficiency and profitability\n"
|
72 |
+
"How can I help you drive your business forward?"
|
73 |
+
),
|
74 |
+
'Health & Wellness Coach': (
|
75 |
+
"I am a holistic health and wellness coach, ready to guide you toward a balanced lifestyle. "
|
76 |
+
"I can help you with: \n"
|
77 |
+
"β’ Personalized workout routines and fitness plans\n"
|
78 |
+
"β’ Nutrition advice tailored to your specific goals\n"
|
79 |
+
"β’ Tips for maintaining mental well-being and reducing stress\n"
|
80 |
+
"β’ Guidance on establishing healthy habits and routines\n"
|
81 |
+
"What aspect of your health journey can I assist you with today?"
|
82 |
+
),
|
83 |
+
'Legal Advisor': (
|
84 |
+
"I am your trusted legal advisor, here to provide clear and practical legal guidance. "
|
85 |
+
"I can help with: \n"
|
86 |
+
"β’ Explaining legal concepts in an easy-to-understand way\n"
|
87 |
+
"β’ Offering advice on contract law, intellectual property, and corporate law\n"
|
88 |
+
"β’ Guiding you through legal decisions and ensuring compliance\n"
|
89 |
+
"β’ Assisting with risk assessment and protection strategies\n"
|
90 |
+
"Let me know how I can help with your legal questions!"
|
91 |
+
),
|
92 |
+
'Project Manager': (
|
93 |
+
"I am your organized and results-driven project manager, here to help you lead successful projects. "
|
94 |
+
"I can assist with: \n"
|
95 |
+
"β’ Developing project plans, timelines, and milestones\n"
|
96 |
+
"β’ Offering guidance on agile methodologies and project management tools\n"
|
97 |
+
"β’ Coordinating team efforts to ensure on-time delivery\n"
|
98 |
+
"β’ Managing risks and communicating effectively with stakeholders\n"
|
99 |
+
"What project can I help you plan and execute today?"
|
100 |
+
),
|
101 |
+
'Language Translator': (
|
102 |
+
"I am a skilled language translator, experienced in translating both technical and non-technical content. "
|
103 |
+
"I can assist you with: \n"
|
104 |
+
"β’ Translating text while preserving context, tone, and cultural nuances\n"
|
105 |
+
"β’ Helping with multilingual communication, from emails to documents\n"
|
106 |
+
"β’ Offering insights into linguistic subtleties between different languages\n"
|
107 |
+
"What translation do you need help with today?"
|
108 |
+
),
|
109 |
+
'Financial Advisor': (
|
110 |
+
"I am a knowledgeable financial advisor, ready to assist with personal finance and investment strategies. "
|
111 |
+
"I can help you with: \n"
|
112 |
+
"β’ Creating and managing a budget tailored to your goals\n"
|
113 |
+
"β’ Offering advice on saving, investing, and growing your wealth\n"
|
114 |
+
"β’ Guiding you through retirement planning and debt management\n"
|
115 |
+
"β’ Providing insights on smart investment opportunities\n"
|
116 |
+
"How can I help you achieve financial success today?"
|
117 |
+
),
|
118 |
+
'Motivational Coach': (
|
119 |
+
"I am your personal motivational coach, here to inspire and empower you to reach your full potential. "
|
120 |
+
"I can assist with: \n"
|
121 |
+
"β’ Offering strategies to overcome obstacles and stay focused\n"
|
122 |
+
"β’ Providing motivational tips to keep you energized and committed\n"
|
123 |
+
"β’ Helping you build confidence and set achievable goals\n"
|
124 |
+
"β’ Offering encouragement to help you stay positive and determined\n"
|
125 |
+
"What goal are you working on today, and how can I support you?"
|
126 |
+
),
|
127 |
+
'Travel Guide': (
|
128 |
+
"I am your seasoned travel guide, with a wealth of knowledge on destinations, travel tips, and local experiences. "
|
129 |
+
"I can assist you with: \n"
|
130 |
+
"β’ Curating personalized travel itineraries based on your interests\n"
|
131 |
+
"β’ Recommending hidden gems and must-visit spots around the world\n"
|
132 |
+
"β’ Offering travel tips, from packing advice to navigating airports\n"
|
133 |
+
"β’ Sharing local customs, traditions, and insider knowledge\n"
|
134 |
+
"Where are you headed next, and how can I help you plan your trip?"
|
135 |
+
),
|
136 |
+
'Life Coach': (
|
137 |
+
"I am your thoughtful life coach, ready to help you navigate personal challenges and discover your true potential. "
|
138 |
+
"I can help you with: \n"
|
139 |
+
"β’ Setting meaningful goals and creating a plan to achieve them\n"
|
140 |
+
"β’ Offering strategies for overcoming obstacles and self-doubt\n"
|
141 |
+
"β’ Helping you cultivate self-awareness and personal growth\n"
|
142 |
+
"β’ Providing insights on improving work-life balance and overall fulfillment\n"
|
143 |
+
"How can I support your personal growth journey today?"
|
144 |
+
),
|
145 |
+
'Parenting Expert': (
|
146 |
+
"I am your compassionate parenting expert, with extensive knowledge in child development and family dynamics. "
|
147 |
+
"I can assist with: \n"
|
148 |
+
"β’ Offering practical advice for managing child behavior and discipline\n"
|
149 |
+
"β’ Guiding you through developmental milestones for all age groups\n"
|
150 |
+
"β’ Providing strategies for creating a positive and nurturing environment\n"
|
151 |
+
"β’ Offering tips on parenting challenges, from bedtime routines to school issues\n"
|
152 |
+
"What parenting challenge can I help you with today?"
|
153 |
+
),
|
154 |
+
'Career Counselor': (
|
155 |
+
"I am your experienced career counselor, here to help you navigate career transitions and opportunities. "
|
156 |
+
"I can assist with: \n"
|
157 |
+
"β’ Offering personalized advice on career planning and development\n"
|
158 |
+
"β’ Guiding you through resume building, cover letters, and interview preparation\n"
|
159 |
+
"β’ Providing insights on industry trends and skill development\n"
|
160 |
+
"β’ Helping you find and pursue new career opportunities\n"
|
161 |
+
"What career challenge or opportunity can I help you with today?"
|
162 |
+
),
|
163 |
+
'Fitness Trainer': (
|
164 |
+
"I am your dedicated fitness trainer, focused on helping you achieve your health and fitness goals. "
|
165 |
+
"I can assist you with: \n"
|
166 |
+
"β’ Creating customized workout plans based on your fitness level\n"
|
167 |
+
"β’ Offering guidance on proper exercise form and technique\n"
|
168 |
+
"β’ Providing nutritional advice to complement your fitness journey\n"
|
169 |
+
"β’ Offering tips on staying motivated and consistent with your routine\n"
|
170 |
+
"What are your fitness goals, and how can I support you today?"
|
171 |
+
),
|
172 |
+
'Environmental Specialist': (
|
173 |
+
"I am an expert in environmental science and sustainability, passionate about helping you make eco-friendly choices. "
|
174 |
+
"I can assist with: \n"
|
175 |
+
"β’ Offering advice on sustainable living practices and green technology\n"
|
176 |
+
"β’ Helping you understand the environmental impact of human activities\n"
|
177 |
+
"β’ Providing tips on waste reduction, energy efficiency, and conservation\n"
|
178 |
+
"β’ Sharing insights on renewable energy and environmental protection\n"
|
179 |
+
"How can I help you live more sustainably today?"
|
180 |
+
),
|
181 |
+
'Entrepreneur Mentor': (
|
182 |
+
"I am your experienced mentor, dedicated to helping aspiring entrepreneurs launch and grow successful businesses. "
|
183 |
+
"I can help with: \n"
|
184 |
+
"β’ Developing business ideas and crafting a viable business plan\n"
|
185 |
+
"β’ Offering advice on funding, scaling, and managing a startup\n"
|
186 |
+
"β’ Providing insights on market trends, competition, and growth strategies\n"
|
187 |
+
"β’ Helping you navigate the challenges of entrepreneurship with practical solutions\n"
|
188 |
+
"What part of your entrepreneurial journey can I assist you with today?"
|
189 |
+
)
|
190 |
+
}
|
191 |
+
|
192 |
+
def extract_urls(text):
|
193 |
+
url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
|
194 |
+
return url_pattern.findall(text)
|
195 |
+
|
196 |
+
def download_markdown(content, filename="extracted_content.md"):
|
197 |
+
b64 = base64.b64encode(content.encode()).decode()
|
198 |
+
href = f'<a href="data:file/markdown;base64,{b64}" download="{filename}">Download Markdown File</a>'
|
199 |
+
return href
|
200 |
+
|
201 |
+
# Constants for image processing
|
202 |
+
INVOKE_URL = "https://ai.api.nvidia.com/v1/gr/meta/llama-3.2-90b-vision-instruct/chat/completions"
|
203 |
+
STREAM = True
|
204 |
+
|
205 |
+
def compress_image(image_file, max_size_kb=175):
|
206 |
+
"""Compress the uploaded image to meet size requirements"""
|
207 |
+
max_size_bytes = max_size_kb * 1024
|
208 |
+
quality = 95
|
209 |
+
|
210 |
+
img = Image.open(image_file)
|
211 |
+
img.thumbnail((800, 800))
|
212 |
+
|
213 |
+
while True:
|
214 |
+
img_byte_arr = BytesIO()
|
215 |
+
img.save(img_byte_arr, format='JPEG', quality=quality)
|
216 |
+
if img_byte_arr.tell() <= max_size_bytes or quality <= 10:
|
217 |
+
return img_byte_arr.getvalue()
|
218 |
+
quality = max(quality - 10, 10)
|
219 |
+
|
220 |
+
def process_image(image_file, api_key, question):
|
221 |
+
"""Process the image and get response from the vision model"""
|
222 |
+
try:
|
223 |
+
compressed_image = compress_image(image_file)
|
224 |
+
image_b64 = base64.b64encode(compressed_image).decode()
|
225 |
+
|
226 |
+
if len(image_b64) >= 180_000:
|
227 |
+
return "Error: Image is still too large after compression. Please try a smaller image."
|
228 |
+
|
229 |
+
if not api_key:
|
230 |
+
api_key = os.getenv("YOUR_API_KEY")
|
231 |
+
|
232 |
+
prompt = f"{question}"
|
233 |
+
|
234 |
+
headers = {
|
235 |
+
"Authorization": f"Bearer {api_key}",
|
236 |
+
"Accept": "text/event-stream" if STREAM else "application/json"
|
237 |
+
}
|
238 |
+
|
239 |
+
payload = {
|
240 |
+
"model": 'meta/llama-3.2-90b-vision-instruct',
|
241 |
+
"messages": [
|
242 |
+
{
|
243 |
+
"role": "user",
|
244 |
+
"content": f'{prompt} <img src="data:image/jpeg;base64,{image_b64}" />'
|
245 |
+
}
|
246 |
+
],
|
247 |
+
"max_tokens": 512,
|
248 |
+
"temperature": 1.00,
|
249 |
+
"top_p": 1.00,
|
250 |
+
"stream": STREAM
|
251 |
+
}
|
252 |
+
|
253 |
+
with st.spinner('Analyzing image...'):
|
254 |
+
response = requests.post(INVOKE_URL, headers=headers, json=payload, stream=True)
|
255 |
+
|
256 |
+
if response.status_code == 200:
|
257 |
+
full_response = ""
|
258 |
+
response_placeholder = st.empty()
|
259 |
+
|
260 |
+
for line in response.iter_lines():
|
261 |
+
if line:
|
262 |
+
line = line.decode('utf-8')
|
263 |
+
if line.startswith('data: '):
|
264 |
+
json_str = line[6:]
|
265 |
+
if json_str.strip() == '[DONE]':
|
266 |
+
break
|
267 |
+
try:
|
268 |
+
json_obj = json.loads(json_str)
|
269 |
+
content = json_obj['choices'][0]['delta'].get('content', '')
|
270 |
+
full_response += content
|
271 |
+
response_placeholder.write(full_response)
|
272 |
+
except json.JSONDecodeError:
|
273 |
+
st.error(f"Failed to parse JSON: {json_str}")
|
274 |
+
|
275 |
+
return full_response
|
276 |
+
|
277 |
+
elif response.status_code == 402:
|
278 |
+
return "Error: API account credits have expired. Please check your account status on the NVIDIA website."
|
279 |
+
else:
|
280 |
+
error_message = f"Error {response.status_code}: {response.text}"
|
281 |
+
st.error(error_message)
|
282 |
+
return f"An error occurred. Please try again later or contact support. Error code: {response.status_code}"
|
283 |
+
|
284 |
+
except Exception as e:
|
285 |
+
st.error(f"An error occurred: {str(e)}")
|
286 |
+
return f"Error processing request: {str(e)}"
|
287 |
+
|
288 |
+
class ResponseManager:
|
289 |
+
def __init__(self, client: OpenAI, model: str):
|
290 |
+
self.client = client
|
291 |
+
self.model = model
|
292 |
+
self.config = ChatbotConfig()
|
293 |
+
|
294 |
+
def count_tokens(self, text: str) -> int:
|
295 |
+
"""Approximate token count using the appropriate tokenizer for the NVIDIA model."""
|
296 |
+
try:
|
297 |
+
# Assuming you have a function or library that provides the correct tokenization for the NVIDIA model
|
298 |
+
encoding = tiktoken.encoding_for_model("nvidia/llama-3.1-nemotron-70b-instruct") # Use the correct model
|
299 |
+
return len(encoding.encode(text))
|
300 |
+
except Exception:
|
301 |
+
# Fallback to word-based approximation
|
302 |
+
return len(text.split()) * 1.3 # Adjust this as necessary for a better approximation
|
303 |
+
|
304 |
+
def generate_response(self, messages, temperature, placeholder):
|
305 |
+
"""Generate response with continuation handling in batches."""
|
306 |
+
full_response = ""
|
307 |
+
continuation_prompt = "\nPlease continue from where you left off..."
|
308 |
+
current_messages = messages.copy()
|
309 |
+
|
310 |
+
try:
|
311 |
+
while True:
|
312 |
+
# Calculate remaining tokens
|
313 |
+
remaining_tokens = self.config.MAX_TOKENS - self.count_tokens(full_response)
|
314 |
+
tokens_to_generate = min(self.config.BATCH_SIZE, remaining_tokens)
|
315 |
+
|
316 |
+
# Generate response in batches
|
317 |
+
stream = self.client.chat.completions.create(
|
318 |
+
model=self.model,
|
319 |
+
messages=current_messages,
|
320 |
+
temperature=temperature,
|
321 |
+
max_tokens=tokens_to_generate,
|
322 |
+
stream=True
|
323 |
+
)
|
324 |
+
|
325 |
+
batch_response = ""
|
326 |
+
for chunk in stream:
|
327 |
+
if chunk.choices[0].delta.content is not None:
|
328 |
+
chunk_content = chunk.choices[0].delta.content
|
329 |
+
batch_response += chunk_content
|
330 |
+
full_response += chunk_content
|
331 |
+
placeholder.markdown(full_response + "β")
|
332 |
+
time.sleep(0.01)
|
333 |
+
|
334 |
+
# Check if response seems complete
|
335 |
+
if batch_response.strip().endswith((".", "!", "?", "\n")) or \
|
336 |
+
len(batch_response.strip()) < tokens_to_generate * 0.9:
|
337 |
+
break
|
338 |
+
|
339 |
+
# Prepare for continuation
|
340 |
+
current_messages.append({"role": "assistant", "content": full_response})
|
341 |
+
current_messages.append({"role": "user", "content": continuation_prompt})
|
342 |
+
|
343 |
+
return full_response
|
344 |
+
|
345 |
+
except Exception as e:
|
346 |
+
st.error(f"An error occurred: {str(e)}")
|
347 |
+
return f"Error generating response: {str(e)}"
|
348 |
+
|
349 |
+
def initialize_session_state():
|
350 |
+
"""Initialize all session state variables"""
|
351 |
+
if "messages" not in st.session_state:
|
352 |
+
# Set the initial system message based on the default persona
|
353 |
+
initial_persona = ChatbotConfig().PERSONA_PROMPTS['General Assistant']
|
354 |
+
st.session_state.messages = [{"role": "system", "content": initial_persona}]
|
355 |
+
if "conversation_history" not in st.session_state:
|
356 |
+
st.session_state.conversation_history = []
|
357 |
+
if "nvidia_model" not in st.session_state:
|
358 |
+
st.session_state.nvidia_model = ChatbotConfig().DEFAULT_MODEL
|
359 |
+
if "image_mode" not in st.session_state:
|
360 |
+
st.session_state.image_mode = False
|
361 |
+
|
362 |
+
def load_conversations():
|
363 |
+
"""Load conversation history from JSON files."""
|
364 |
+
conversation_files = [f for f in os.listdir() if f.startswith('chat_history_') and f.endswith('.json')]
|
365 |
+
return conversation_files
|
366 |
+
|
367 |
+
def load_conversation(file_name):
|
368 |
+
"""Load a specific conversation from a JSON file."""
|
369 |
+
with open(file_name, 'r') as f:
|
370 |
+
return json.load(f)
|
371 |
+
|
372 |
+
def save_conversation(filename="chat_history.json"):
|
373 |
+
"""Save the current conversation to a file"""
|
374 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
375 |
+
filename = f"chat_history_{timestamp}.json"
|
376 |
+
|
377 |
+
conversation_data = {
|
378 |
+
"timestamp": timestamp,
|
379 |
+
"messages": st.session_state.messages[1:], # Exclude system message
|
380 |
+
}
|
381 |
+
|
382 |
+
with open(filename, 'w') as f:
|
383 |
+
json.dump(conversation_data, f, indent=2)
|
384 |
+
|
385 |
+
return filename
|
386 |
+
|
387 |
+
def create_sidebar():
|
388 |
+
"""Create and handle sidebar elements"""
|
389 |
+
config = ChatbotConfig()
|
390 |
+
|
391 |
+
st.sidebar.title("NVIDIA NIM Chatbot βοΈ")
|
392 |
+
|
393 |
+
# Web Scraping Toggle
|
394 |
+
st.sidebar.header("Web Scraping")
|
395 |
+
enable_web_scraping = st.sidebar.toggle("Enable Automatic Web Scraping", value=False)
|
396 |
+
|
397 |
+
if enable_web_scraping:
|
398 |
+
st.sidebar.info("URLs detected in your input will be automatically scraped for additional context by using Crawl4AI.")
|
399 |
+
|
400 |
+
# Model Settings
|
401 |
+
st.sidebar.header("Model Configuration")
|
402 |
+
|
403 |
+
# Persona Selection
|
404 |
+
personas = list(config.PERSONA_PROMPTS.keys())
|
405 |
+
|
406 |
+
# Add custom persona input fields within an expander
|
407 |
+
with st.sidebar.expander("β¨ Create Custom Persona"):
|
408 |
+
st.markdown("### Create Your Own Persona")
|
409 |
+
custom_persona_name = st.text_input("Persona Name",
|
410 |
+
placeholder="e.g., Data Science Expert, Marketing Specialist")
|
411 |
+
custom_persona_description = st.text_area("Persona Description",
|
412 |
+
placeholder="Describe the persona's expertise, tone, and capabilities...",
|
413 |
+
height=150)
|
414 |
+
|
415 |
+
if st.button("Add Custom Persona", type="primary"):
|
416 |
+
if custom_persona_name and custom_persona_description:
|
417 |
+
# Add the custom persona to the list of personas
|
418 |
+
config.PERSONA_PROMPTS[custom_persona_name] = custom_persona_description
|
419 |
+
st.success(f"β
Custom persona '{custom_persona_name}' added successfully!")
|
420 |
+
time.sleep(1) # Show success message briefly
|
421 |
+
st.rerun() # Refresh to update the persona list
|
422 |
+
else:
|
423 |
+
st.error("Please provide both a name and a description for the custom persona.")
|
424 |
+
|
425 |
+
# Update persona selection with custom personas
|
426 |
+
selected_persona = st.sidebar.selectbox(
|
427 |
+
"Choose Assistant Persona",
|
428 |
+
list(config.PERSONA_PROMPTS.keys()),
|
429 |
+
help="Select from pre-defined personas or create your own custom persona"
|
430 |
+
)
|
431 |
+
|
432 |
+
# Display current persona description
|
433 |
+
with st.sidebar.expander("Current Persona Description", expanded=False):
|
434 |
+
st.markdown(f"### {selected_persona}")
|
435 |
+
st.markdown(config.PERSONA_PROMPTS[selected_persona])
|
436 |
+
|
437 |
+
# Response Style
|
438 |
+
temperature_style = st.sidebar.selectbox(
|
439 |
+
"Response Style",
|
440 |
+
list(config.TEMPERATURE_RANGES.keys())
|
441 |
+
)
|
442 |
+
|
443 |
+
# Advanced Settings Expander
|
444 |
+
with st.sidebar.expander("βοΈ Advanced Settings"):
|
445 |
+
show_token_count = st.checkbox("Show Token Count", value=False)
|
446 |
+
enable_code_highlighting = st.checkbox("Enable Code Highlighting", value=True)
|
447 |
+
enable_markdown = st.checkbox("Enable Markdown Support", value=True)
|
448 |
+
batch_size = st.slider("Response Batch Size (tokens)",
|
449 |
+
min_value=100,
|
450 |
+
max_value=4000,
|
451 |
+
value=1000,
|
452 |
+
step=100)
|
453 |
+
|
454 |
+
# Image Chat Mode Toggle
|
455 |
+
st.sidebar.header("π€ Llama 3.2 90B Vision Analysis")
|
456 |
+
image_mode = st.sidebar.toggle("Enable Image Chat", value=st.session_state.image_mode)
|
457 |
+
st.session_state.image_mode = image_mode
|
458 |
+
|
459 |
+
if image_mode:
|
460 |
+
st.sidebar.info("Image chat mode is enabled. You can now upload images and ask questions about them.")
|
461 |
+
|
462 |
+
# Load previous conversations
|
463 |
+
st.sidebar.header("Load Previous Conversations")
|
464 |
+
conversation_files = load_conversations()
|
465 |
+
if conversation_files:
|
466 |
+
selected_file = st.sidebar.selectbox("Choose a conversation to load", conversation_files)
|
467 |
+
if st.sidebar.button("Load Conversation"):
|
468 |
+
conversation_data = load_conversation(selected_file)
|
469 |
+
st.session_state.messages = conversation_data['messages']
|
470 |
+
st.success("Conversation loaded successfully!")
|
471 |
+
st.experimental_rerun() # Refresh to display loaded conversation
|
472 |
+
else:
|
473 |
+
st.sidebar.info("No previous conversations found.")
|
474 |
+
|
475 |
+
# Conversation Management
|
476 |
+
st.sidebar.header("Conversation Management")
|
477 |
+
col1, col2 = st.sidebar.columns(2)
|
478 |
+
with col1:
|
479 |
+
if st.button("ποΈ Clear Chat", use_container_width=True):
|
480 |
+
st.session_state.messages = [{"role": "system", "content": config.PERSONA_PROMPTS[selected_persona]}]
|
481 |
+
st.rerun()
|
482 |
+
|
483 |
+
with col2:
|
484 |
+
if st.button("πΎ Save Chat", use_container_width=True):
|
485 |
+
conversation_json = save_conversation()
|
486 |
+
st.download_button(
|
487 |
+
label="π₯ Download",
|
488 |
+
data=conversation_json,
|
489 |
+
file_name=f"chat_history_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
|
490 |
+
mime="application/json",
|
491 |
+
use_container_width=True
|
492 |
+
)
|
493 |
+
|
494 |
+
return {
|
495 |
+
'temperature': config.TEMPERATURE_RANGES[temperature_style],
|
496 |
+
'batch_size': batch_size,
|
497 |
+
'show_token_count': show_token_count,
|
498 |
+
'enable_code_highlighting': enable_code_highlighting,
|
499 |
+
'enable_markdown': enable_markdown,
|
500 |
+
'persona': config.PERSONA_PROMPTS[selected_persona],
|
501 |
+
'enable_web_scraping': enable_web_scraping
|
502 |
+
}
|
503 |
+
|
504 |
+
def format_message(message, enable_code_highlighting=True, enable_markdown=True):
|
505 |
+
"""Format message with optional code highlighting and markdown support"""
|
506 |
+
if message["role"] == "system" and message["content"].startswith("Additional context from web scraping:"):
|
507 |
+
# Don't display system messages with scraped content in the chat window
|
508 |
+
return
|
509 |
+
|
510 |
+
content = message["content"]
|
511 |
+
|
512 |
+
if enable_code_highlighting and "```" in content:
|
513 |
+
# Enhanced code block detection and formatting
|
514 |
+
parts = content.split("```")
|
515 |
+
formatted_parts = []
|
516 |
+
for i, part in enumerate(parts):
|
517 |
+
if i % 2 == 1: # Code block
|
518 |
+
try:
|
519 |
+
lang, code = part.split("\n", 1)
|
520 |
+
formatted_parts.append(f'<div class="code-block {lang}">\n{code}\n</div>')
|
521 |
+
except ValueError:
|
522 |
+
formatted_parts.append(f'<div class="code-block">\n{part}\n</div>')
|
523 |
+
else: # Regular text
|
524 |
+
formatted_parts.append(part)
|
525 |
+
content = "".join(formatted_parts)
|
526 |
+
|
527 |
+
if enable_markdown:
|
528 |
+
st.markdown(content, unsafe_allow_html=True)
|
529 |
+
else:
|
530 |
+
st.write(content)
|
531 |
+
|
532 |
+
def main():
|
533 |
+
st.title("π§ BrainWave AI IntelliChat π€")
|
534 |
+
st.markdown("<h3 style='text-align: center;'>Powered by Llama 3.1 Nemotron-70B</h3>", unsafe_allow_html=True)
|
535 |
+
st.markdown("---")
|
536 |
+
|
537 |
+
# Initialize session state
|
538 |
+
initialize_session_state()
|
539 |
+
|
540 |
+
# Setup sidebar and get settings
|
541 |
+
settings = create_sidebar()
|
542 |
+
|
543 |
+
# Initialize OpenAI client and ResponseManager
|
544 |
+
client = OpenAI(
|
545 |
+
base_url="https://integrate.api.nvidia.com/v1",
|
546 |
+
api_key="YOUR API KEY HERE OR PLACE IT IN .env"
|
547 |
+
)
|
548 |
+
response_manager = ResponseManager(client, st.session_state.nvidia_model)
|
549 |
+
|
550 |
+
# Display chat history
|
551 |
+
for message in st.session_state.messages[1:]: # Skip system message
|
552 |
+
if message["role"] != "system" or not message["content"].startswith("Additional context from web scraping:"):
|
553 |
+
with st.chat_message(message["role"]):
|
554 |
+
format_message(
|
555 |
+
message,
|
556 |
+
enable_code_highlighting=settings['enable_code_highlighting'],
|
557 |
+
enable_markdown=settings['enable_markdown']
|
558 |
+
)
|
559 |
+
|
560 |
+
# Initialize session state for expander visibility
|
561 |
+
if 'show_scraped_content' not in st.session_state:
|
562 |
+
st.session_state.show_scraped_content = False
|
563 |
+
|
564 |
+
if st.session_state.image_mode:
|
565 |
+
# Image chat mode
|
566 |
+
col1, col2 = st.columns([2, 1])
|
567 |
+
|
568 |
+
with col1:
|
569 |
+
uploaded_file = st.file_uploader("Upload an image", type=['jpg', 'jpeg', 'png'])
|
570 |
+
if uploaded_file:
|
571 |
+
st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
|
572 |
+
|
573 |
+
with col2:
|
574 |
+
api_key = st.text_input("Enter your API Key", type="password",
|
575 |
+
placeholder="API authentication key")
|
576 |
+
question = st.text_input("Enter your question",
|
577 |
+
placeholder="Example: What is in this image?")
|
578 |
+
|
579 |
+
if st.button("Analyze Image", use_container_width=True):
|
580 |
+
if uploaded_file and question:
|
581 |
+
response = process_image(uploaded_file, api_key, question)
|
582 |
+
st.markdown("### Analysis Result:")
|
583 |
+
st.markdown(response)
|
584 |
+
else:
|
585 |
+
st.warning("Please upload an image and enter a question.")
|
586 |
+
else:
|
587 |
+
# Chat input
|
588 |
+
if prompt := st.chat_input("What would you like to know?"):
|
589 |
+
# Add user message
|
590 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
591 |
+
with st.chat_message("user"):
|
592 |
+
st.markdown(prompt)
|
593 |
+
|
594 |
+
if settings['enable_web_scraping']:
|
595 |
+
urls = extract_urls(prompt)
|
596 |
+
if urls:
|
597 |
+
scraped_contents = {}
|
598 |
+
progress_bar = st.progress(0)
|
599 |
+
status_text = st.empty()
|
600 |
+
|
601 |
+
for i, url in enumerate(urls):
|
602 |
+
status_text.text(f"Scraping URL {i+1}/{len(urls)}: {url}")
|
603 |
+
try:
|
604 |
+
crawler = WebCrawler()
|
605 |
+
crawler.warmup()
|
606 |
+
result = crawler.run(url=url)
|
607 |
+
scraped_contents[url] = result.markdown
|
608 |
+
st.sidebar.success(f"Scraped content from: {url}")
|
609 |
+
st.sidebar.markdown(download_markdown(result.markdown, f"content_from_{url.replace('://', '_')}.md"), unsafe_allow_html=True)
|
610 |
+
except Exception as e:
|
611 |
+
st.sidebar.error(f"Error scraping {url}: {str(e)}")
|
612 |
+
progress_bar.progress((i + 1) / len(urls))
|
613 |
+
|
614 |
+
status_text.text("Scraping completed!")
|
615 |
+
progress_bar.empty()
|
616 |
+
|
617 |
+
if scraped_contents:
|
618 |
+
# Create checkboxes for each URL
|
619 |
+
st.write("Select URLs to include in the context:")
|
620 |
+
url_selections = {url: st.checkbox(f"Include {url}", value=True) for url in scraped_contents.keys()}
|
621 |
+
|
622 |
+
# Combine selected scraped contents
|
623 |
+
selected_contents = "\n\n".join([f"Content from {url}:\n{content}"
|
624 |
+
for url, content in scraped_contents.items()
|
625 |
+
if url_selections[url]])
|
626 |
+
|
627 |
+
# Add selected scraped content as a system message (hidden from chat window)
|
628 |
+
st.session_state.messages.append({"role": "system", "content": f"Additional context from web scraping:{selected_contents}"})
|
629 |
+
|
630 |
+
# Store scraped contents in session state
|
631 |
+
st.session_state.scraped_contents = scraped_contents
|
632 |
+
|
633 |
+
# Set flag to show scraped content
|
634 |
+
st.session_state.show_scraped_content = True
|
635 |
+
|
636 |
+
# Check if the prompt is a question about identity or help
|
637 |
+
if "who are you" in prompt.lower() or "how can you help" in prompt.lower():
|
638 |
+
# Respond based on the selected persona prompt
|
639 |
+
persona_response = settings['persona']
|
640 |
+
st.session_state.messages.append({"role": "assistant", "content": persona_response})
|
641 |
+
with st.chat_message("assistant"):
|
642 |
+
st.markdown(persona_response)
|
643 |
+
else:
|
644 |
+
# Append the selected persona prompt to the messages
|
645 |
+
selected_persona_prompt = settings['persona']
|
646 |
+
st.session_state.messages.append({"role": "system", "content": selected_persona_prompt})
|
647 |
+
|
648 |
+
# Generate and display assistant response
|
649 |
+
with st.chat_message("assistant"):
|
650 |
+
message_placeholder = st.empty()
|
651 |
+
|
652 |
+
# Generate response with continuation handling
|
653 |
+
full_response = response_manager.generate_response(
|
654 |
+
messages=st.session_state.messages,
|
655 |
+
temperature=settings['temperature'],
|
656 |
+
placeholder=message_placeholder
|
657 |
+
)
|
658 |
+
|
659 |
+
# Final update
|
660 |
+
message_placeholder.markdown(full_response)
|
661 |
+
|
662 |
+
# Add assistant response to history
|
663 |
+
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
664 |
+
|
665 |
+
# Display token count if enabled
|
666 |
+
if settings['show_token_count']:
|
667 |
+
token_count = response_manager.count_tokens(full_response)
|
668 |
+
st.caption(f"Approximate tokens: {token_count}")
|
669 |
+
|
670 |
+
# Display scraped content expander (outside the if prompt block)
|
671 |
+
if st.session_state.get('show_scraped_content', False):
|
672 |
+
with st.expander("View Scraped Content", expanded=False):
|
673 |
+
if 'scraped_contents' in st.session_state and st.session_state.scraped_contents:
|
674 |
+
selected_url = st.selectbox("Choose URL to view content:", list(st.session_state.scraped_contents.keys()))
|
675 |
+
st.markdown(st.session_state.scraped_contents[selected_url])
|
676 |
+
else:
|
677 |
+
st.write("No scraped content available.")
|
678 |
+
|
679 |
+
if __name__ == "__main__":
|
680 |
+
main()
|
681 |
+
|
682 |
+
st.markdown("""
|
683 |
+
<style>
|
684 |
+
/* Main container styling */
|
685 |
+
.main {
|
686 |
+
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
|
687 |
+
padding: 2rem;
|
688 |
+
border-radius: 20px;
|
689 |
+
box-shadow: 0 8px 32px rgba(0,0,0,0.1);
|
690 |
+
}
|
691 |
+
|
692 |
+
/* Header styling */
|
693 |
+
.title-container {
|
694 |
+
background: linear-gradient(45deg, #2193b0, #6dd5ed);
|
695 |
+
padding: 2rem;
|
696 |
+
border-radius: 15px;
|
697 |
+
text-align: center;
|
698 |
+
margin-bottom: 2rem;
|
699 |
+
box-shadow: 0 4px 15px rgba(0,0,0,0.1);
|
700 |
+
}
|
701 |
+
|
702 |
+
.main-title {
|
703 |
+
color: white;
|
704 |
+
font-family: 'Poppins', sans-serif;
|
705 |
+
font-size: 2.5rem;
|
706 |
+
font-weight: 700;
|
707 |
+
text-shadow: 2px 2px 4px rgba(0,0,0,0.2);
|
708 |
+
margin-bottom: 1rem;
|
709 |
+
}
|
710 |
+
|
711 |
+
/* Chat container styling */
|
712 |
+
.chat-container {
|
713 |
+
background: white;
|
714 |
+
border-radius: 15px;
|
715 |
+
padding: 1.5rem;
|
716 |
+
margin: 1rem 0;
|
717 |
+
box-shadow: 0 4px 6px rgba(0,0,0,0.05);
|
718 |
+
}
|
719 |
+
|
720 |
+
/* Message styling */
|
721 |
+
.stTextInput>div>div>input {
|
722 |
+
border-radius: 25px !important;
|
723 |
+
border: 2px solid #e0e0e0;
|
724 |
+
padding: 1rem 1.5rem;
|
725 |
+
font-size: 1rem;
|
726 |
+
transition: all 0.3s ease;
|
727 |
+
}
|
728 |
+
|
729 |
+
.stTextInput>div>div>input:focus {
|
730 |
+
border-color: #2193b0;
|
731 |
+
box-shadow: 0 0 0 2px rgba(33, 147, 176, 0.2);
|
732 |
+
}
|
733 |
+
|
734 |
+
/* Button styling */
|
735 |
+
.stButton>button {
|
736 |
+
background: linear-gradient(45deg, #2193b0, #6dd5ed);
|
737 |
+
color: white;
|
738 |
+
border: none;
|
739 |
+
border-radius: 25px;
|
740 |
+
padding: 0.75rem 2rem;
|
741 |
+
font-weight: 600;
|
742 |
+
transition: all 0.3s ease;
|
743 |
+
box-shadow: 0 4px 15px rgba(0,0,0,0.1);
|
744 |
+
}
|
745 |
+
|
746 |
+
.stButton>button:hover {
|
747 |
+
transform: translateY(-2px);
|
748 |
+
box-shadow: 0 6px 20px rgba(0,0,0,0.15);
|
749 |
+
}
|
750 |
+
|
751 |
+
/* Sidebar styling */
|
752 |
+
.css-1d391kg {
|
753 |
+
background: linear-gradient(180deg, #f8f9fa 0%, #e9ecef 100%);
|
754 |
+
padding: 2rem 1rem;
|
755 |
+
}
|
756 |
+
|
757 |
+
/* Feature cards */
|
758 |
+
.feature-card {
|
759 |
+
background: white;
|
760 |
+
border-radius: 15px;
|
761 |
+
padding: 1.5rem;
|
762 |
+
margin: 1rem 0;
|
763 |
+
box-shadow: 0 4px 6px rgba(0,0,0,0.05);
|
764 |
+
transition: all 0.3s ease;
|
765 |
+
}
|
766 |
+
|
767 |
+
.feature-card:hover {
|
768 |
+
transform: translateY(-5px);
|
769 |
+
box-shadow: 0 8px 15px rgba(0,0,0,0.1);
|
770 |
+
}
|
771 |
+
|
772 |
+
/* Creator section styling */
|
773 |
+
.creator-section {
|
774 |
+
background: linear-gradient(45deg, #141e30, #243b55);
|
775 |
+
color: white;
|
776 |
+
padding: 2rem;
|
777 |
+
border-radius: 15px;
|
778 |
+
margin-top: 2rem;
|
779 |
+
text-align: center;
|
780 |
+
}
|
781 |
+
|
782 |
+
.social-links a {
|
783 |
+
color: #6dd5ed;
|
784 |
+
text-decoration: none;
|
785 |
+
margin: 0 1rem;
|
786 |
+
transition: all 0.3s ease;
|
787 |
+
}
|
788 |
+
|
789 |
+
.social-links a:hover {
|
790 |
+
color: white;
|
791 |
+
text-decoration: none;
|
792 |
+
}
|
793 |
+
|
794 |
+
/* Animations */
|
795 |
+
@keyframes fadeIn {
|
796 |
+
from { opacity: 0; transform: translateY(20px); }
|
797 |
+
to { opacity: 1; transform: translateY(0); }
|
798 |
+
}
|
799 |
+
|
800 |
+
.animate-fade-in {
|
801 |
+
animation: fadeIn 0.5s ease-out;
|
802 |
+
}
|
803 |
+
</style>
|
804 |
+
""", unsafe_allow_html=True)
|
805 |
+
|
806 |
+
# Modified welcome section
|
807 |
+
# st.markdown("""
|
808 |
+
# <div class="title-container animate-fade-in">
|
809 |
+
# <h1 class="main-title">β¨ Welcome to NVIDIA AI Chat Magic! β¨</h1>
|
810 |
+
# <p style="color: white; font-size: 1.2rem;">Experience the future of AI conversation</p>
|
811 |
+
# </div>
|
812 |
+
|
813 |
+
# <div class="feature-card animate-fade-in">
|
814 |
+
# <h2 style="color: #2193b0; margin-bottom: 1rem;">π Discover Our Amazing Features</h2>
|
815 |
+
# <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 1rem;">
|
816 |
+
# <div class="feature-item">
|
817 |
+
# <h3>π€ AI Companions</h3>
|
818 |
+
# <p>Engage with personalized AI assistants</p>
|
819 |
+
# </div>
|
820 |
+
# <div class="feature-item">
|
821 |
+
# <h3>π Web Integration</h3>
|
822 |
+
# <p>Access real-time web content</p>
|
823 |
+
# </div>
|
824 |
+
# <div class="feature-item">
|
825 |
+
# <h3>πΌοΈ Image Analysis</h3>
|
826 |
+
# <p>Intelligent image processing</p>
|
827 |
+
# </div>
|
828 |
+
# <div class="feature-item">
|
829 |
+
# <h3>π¨ Creative Control</h3>
|
830 |
+
# <p>Customize response styles</p>
|
831 |
+
# </div>
|
832 |
+
# </div>
|
833 |
+
# </div>
|
834 |
+
# """, unsafe_allow_html=True)
|
835 |
+
|
836 |
+
st.sidebar.markdown("---")
|
837 |
+
st.sidebar.title("β¨ About the Creator")
|
838 |
+
st.sidebar.markdown("""
|
839 |
+
<div style="font-family: 'Brush Script MT', cursive; font-size: 20px; color: #4A90E2;">
|
840 |
+
Crafted with β€οΈ by Richardson Gunde
|
841 |
+
</div>
|
842 |
+
|
843 |
+
<div style="font-family: 'Dancing Script', cursive; font-size: 16px; padding: 10px 0;">
|
844 |
+
Featuring:
|
845 |
+
<br>β’ β¨ Custom AI Personas
|
846 |
+
<br>β’ π Web Content Integration
|
847 |
+
<br>β’ πΌοΈ Image Analysis
|
848 |
+
<br>β’ π¨ Creative Response Control
|
849 |
+
<br>β’ π Token Tracking
|
850 |
+
<br>β’ π Smart Conversations
|
851 |
+
</div>
|
852 |
+
|
853 |
+
<div style="font-family: 'Dancing Script', cursive; font-size: 16px; padding-top: 10px;">
|
854 |
+
π <a href="https://www.linkedin.com/in/richardson-gunde" style="color: #0077B5;">LinkedIn</a>
|
855 |
+
<br>π§ <a href="mailto:[email protected]" style="color: #D44638;">Email</a>
|
856 |
+
</div>
|
857 |
+
|
858 |
+
""", unsafe_allow_html=True)
|