mouse-chat / app.py
openfree's picture
Rename app (26).py to app.py
d555623 verified
raw
history blame
44.5 kB
import os
import re
import random
from http import HTTPStatus
from typing import Dict, List, Optional, Tuple
import base64
import anthropic
import openai
import asyncio
import time
from functools import partial
import json
import gradio as gr
import modelscope_studio.components.base as ms
import modelscope_studio.components.legacy as legacy
import modelscope_studio.components.antd as antd
import html
import urllib.parse
from huggingface_hub import HfApi, create_repo, hf_hub_download
import string
import requests
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import WebDriverException, TimeoutException
from PIL import Image
from io import BytesIO
from datetime import datetime
import spaces
from safetensors.torch import load_file
from diffusers import FluxPipeline
import torch
from os import path # ์ด ์ค„์„ ์ถ”๊ฐ€
# ์บ์‹œ ๊ฒฝ๋กœ ์„ค์ •
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
os.environ["TRANSFORMERS_CACHE"] = cache_path
os.environ["HF_HUB_CACHE"] = cache_path
os.environ["HF_HOME"] = cache_path
# Hugging Face ํ† ํฐ ์„ค์ •
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
print("Warning: HF_TOKEN not found in environment variables")
# FLUX ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
if not path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
try:
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
torch_dtype=torch.bfloat16,
use_auth_token=HF_TOKEN # Hugging Face ํ† ํฐ ์ถ”๊ฐ€
)
pipe.load_lora_weights(
hf_hub_download(
"ByteDance/Hyper-SD",
"Hyper-FLUX.1-dev-8steps-lora.safetensors",
token=HF_TOKEN # Hugging Face ํ† ํฐ ์ถ”๊ฐ€
)
)
pipe.fuse_lora(lora_scale=0.125)
pipe.to(device="cuda", dtype=torch.bfloat16)
print("Successfully initialized FLUX model with authentication")
except Exception as e:
print(f"Error initializing FLUX model: {str(e)}")
pipe = None
# ์ด๋ฏธ์ง€ ์ƒ์„ฑ ํ•จ์ˆ˜ ์ถ”๊ฐ€
@spaces.GPU
def generate_image(prompt, height=512, width=512, steps=8, scales=3.5, seed=3413):
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
return pipe(
prompt=[prompt],
generator=torch.Generator().manual_seed(int(seed)),
num_inference_steps=int(steps),
guidance_scale=float(scales),
height=int(height),
width=int(width),
max_sequence_length=256
).images[0]
# SystemPrompt ๋ถ€๋ถ„์„ ์ง์ ‘ ์ •์˜
SystemPrompt = """You are 'MOUSE-I', an advanced AI visualization expert. Your mission is to transform every response into a visually stunning and highly informative presentation.
Core Capabilities:
- Transform text responses into rich visual experiences
- Create interactive data visualizations and charts
- Design beautiful and intuitive user interfaces
- Utilize engaging animations and transitions
- Present information in a clear, structured manner
Visual Elements to Include:
- Charts & Graphs (using Chart.js, D3.js)
- Interactive Data Visualizations
- Modern UI Components
- Engaging Animations
- Informative Icons & Emojis
- Color-coded Information Blocks
- Progress Indicators
- Timeline Visualizations
- Statistical Representations
- Comparison Tables
Technical Requirements:
- Modern HTML5/CSS3/JavaScript
- Responsive Design
- Interactive Elements
- Clean Typography
- Professional Color Schemes
- Smooth Animations
- Cross-browser Compatibility
Libraries Available:
- Chart.js for Data Visualization
- D3.js for Complex Graphics
- Bootstrap for Layout
- jQuery for Interactions
- Three.js for 3D Elements
Design Principles:
- Visual Hierarchy
- Clear Information Flow
- Consistent Styling
- Intuitive Navigation
- Engaging User Experience
- Accessibility Compliance
Remember to:
- Present data in the most visually appealing way
- Use appropriate charts for different data types
- Include interactive elements where relevant
- Maintain a professional and modern aesthetic
- Ensure responsive design for all devices
Return only HTML code wrapped in code blocks, focusing on creating visually stunning and informative presentations.
"""
from config import DEMO_LIST
class Role:
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
History = List[Tuple[str, str]]
Messages = List[Dict[str, str]]
# ์ด๋ฏธ์ง€ ์บ์‹œ๋ฅผ ๋ฉ”๋ชจ๋ฆฌ์— ์ €์žฅ
IMAGE_CACHE = {}
# boost_prompt ํ•จ์ˆ˜์™€ handle_boost ํ•จ์ˆ˜๋ฅผ ์ถ”๊ฐ€ํ•ฉ๋‹ˆ๋‹ค
def boost_prompt(prompt: str) -> str:
if not prompt:
return ""
# ์ฆ๊ฐ•์„ ์œ„ํ•œ ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ
boost_system_prompt = """
๋‹น์‹ ์€ ์›น ๊ฐœ๋ฐœ ํ”„๋กฌํ”„ํŠธ ์ „๋ฌธ๊ฐ€์ž…๋‹ˆ๋‹ค.
์ฃผ์–ด์ง„ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋ถ„์„ํ•˜์—ฌ ๋” ์ƒ์„ธํ•˜๊ณ  ์ „๋ฌธ์ ์ธ ์š”๊ตฌ์‚ฌํ•ญ์œผ๋กœ ํ™•์žฅํ•˜๋˜,
์›๋ž˜ ์˜๋„์™€ ๋ชฉ์ ์€ ๊ทธ๋Œ€๋กœ ์œ ์ง€ํ•˜๋ฉด์„œ ๋‹ค์Œ ๊ด€์ ๋“ค์„ ๊ณ ๋ คํ•˜์—ฌ ์ฆ๊ฐ•ํ•˜์‹ญ์‹œ์˜ค:
1. ๊ธฐ์ˆ ์  ๊ตฌํ˜„ ์ƒ์„ธ
2. UI/UX ๋””์ž์ธ ์š”์†Œ
3. ์‚ฌ์šฉ์ž ๊ฒฝํ—˜ ์ตœ์ ํ™”
4. ์„ฑ๋Šฅ๊ณผ ๋ณด์•ˆ
5. ์ ‘๊ทผ์„ฑ๊ณผ ํ˜ธํ™˜์„ฑ
๊ธฐ์กด SystemPrompt์˜ ๋ชจ๋“  ๊ทœ์น™์„ ์ค€์ˆ˜ํ•˜๋ฉด์„œ ์ฆ๊ฐ•๋œ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ƒ์„ฑํ•˜์‹ญ์‹œ์˜ค.
"""
try:
# Claude API ์‹œ๋„
try:
response = claude_client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=2000,
messages=[{
"role": "user",
"content": f"๋‹ค์Œ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋ถ„์„ํ•˜๊ณ  ์ฆ๊ฐ•ํ•˜์‹œ์˜ค: {prompt}"
}]
)
if hasattr(response, 'content') and len(response.content) > 0:
return response.content[0].text
raise Exception("Claude API ์‘๋‹ต ํ˜•์‹ ์˜ค๋ฅ˜")
except Exception as claude_error:
print(f"Claude API ์—๋Ÿฌ, OpenAI๋กœ ์ „ํ™˜: {str(claude_error)}")
# OpenAI API ์‹œ๋„
completion = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": boost_system_prompt},
{"role": "user", "content": f"๋‹ค์Œ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๋ถ„์„ํ•˜๊ณ  ์ฆ๊ฐ•ํ•˜์‹œ์˜ค: {prompt}"}
],
max_tokens=2000,
temperature=0.7
)
if completion.choices and len(completion.choices) > 0:
return completion.choices[0].message.content
raise Exception("OpenAI API ์‘๋‹ต ํ˜•์‹ ์˜ค๋ฅ˜")
except Exception as e:
print(f"ํ”„๋กฌํ”„ํŠธ ์ฆ๊ฐ• ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}")
return prompt # ์˜ค๋ฅ˜ ๋ฐœ์ƒ์‹œ ์›๋ณธ ํ”„๋กฌํ”„ํŠธ ๋ฐ˜ํ™˜
# Boost ๋ฒ„ํŠผ ์ด๋ฒคํŠธ ํ•ธ๋“ค๋Ÿฌ
def handle_boost(prompt: str):
try:
boosted_prompt = boost_prompt(prompt)
return boosted_prompt, gr.update(active_key="empty")
except Exception as e:
print(f"Boost ์ฒ˜๋ฆฌ ์ค‘ ์˜ค๋ฅ˜: {str(e)}")
return prompt, gr.update(active_key="empty")
def get_image_base64(image_path):
if image_path in IMAGE_CACHE:
return IMAGE_CACHE[image_path]
try:
with open(image_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode()
IMAGE_CACHE[image_path] = encoded_string
return encoded_string
except:
return IMAGE_CACHE.get('default.png', '')
def history_to_messages(history: History, system: str) -> Messages:
messages = [{'role': Role.SYSTEM, 'content': system}]
for h in history:
messages.append({'role': Role.USER, 'content': h[0]})
messages.append({'role': Role.ASSISTANT, 'content': h[1]})
return messages
def messages_to_history(messages: Messages) -> History:
assert messages[0]['role'] == Role.SYSTEM
history = []
for q, r in zip(messages[1::2], messages[2::2]):
history.append([q['content'], r['content']])
return history
# API ํด๋ผ์ด์–ธํŠธ ์ดˆ๊ธฐํ™”
YOUR_ANTHROPIC_TOKEN = os.getenv('ANTHROPIC_API_KEY', '') # ๊ธฐ๋ณธ๊ฐ’ ์ถ”๊ฐ€
YOUR_OPENAI_TOKEN = os.getenv('OPENAI_API_KEY', '') # ๊ธฐ๋ณธ๊ฐ’ ์ถ”๊ฐ€
# API ํ‚ค ๊ฒ€์ฆ
if not YOUR_ANTHROPIC_TOKEN or not YOUR_OPENAI_TOKEN:
print("Warning: API keys not found in environment variables")
# API ํด๋ผ์ด์–ธํŠธ ์ดˆ๊ธฐํ™” ์‹œ ์˜ˆ์™ธ ์ฒ˜๋ฆฌ ์ถ”๊ฐ€
try:
claude_client = anthropic.Anthropic(api_key=YOUR_ANTHROPIC_TOKEN)
openai_client = openai.OpenAI(api_key=YOUR_OPENAI_TOKEN)
except Exception as e:
print(f"Error initializing API clients: {str(e)}")
claude_client = None
openai_client = None
# try_claude_api ํ•จ์ˆ˜ ์ˆ˜์ •
async def try_claude_api(system_message, claude_messages, timeout=15):
try:
start_time = time.time()
with claude_client.messages.stream(
model="claude-3-5-sonnet-20241022",
max_tokens=7860,
system=system_message,
messages=claude_messages
) as stream:
collected_content = ""
for chunk in stream:
current_time = time.time()
if current_time - start_time > timeout:
print(f"Claude API response time: {current_time - start_time:.2f} seconds")
raise TimeoutError("Claude API timeout")
if chunk.type == "content_block_delta":
collected_content += chunk.delta.text
yield collected_content
await asyncio.sleep(0)
start_time = current_time
except Exception as e:
print(f"Claude API error: {str(e)}")
raise e
async def try_openai_api(openai_messages):
try:
stream = openai_client.chat.completions.create(
model="gpt-4o",
messages=openai_messages,
stream=True,
max_tokens=4096,
temperature=0.7
)
collected_content = ""
for chunk in stream:
if chunk.choices[0].delta.content is not None:
collected_content += chunk.choices[0].delta.content
yield collected_content
except Exception as e:
print(f"OpenAI API error: {str(e)}")
raise e
class Demo:
def __init__(self):
pass
async def generation_code(self, query: Optional[str], _setting: Dict[str, str]):
if not query or query.strip() == '':
query = get_random_placeholder()
# ์ด๋ฏธ์ง€ ์ƒ์„ฑ์ด ํ•„์š”ํ•œ์ง€ ํ™•์ธ
needs_image = '์ด๋ฏธ์ง€' in query or '๊ทธ๋ฆผ' in query or 'image' in query.lower()
image_prompt = None
# ์ด๋ฏธ์ง€ ํ”„๋กฌํ”„ํŠธ ์ถ”์ถœ
if needs_image:
for keyword in ['์ด๋ฏธ์ง€:', '๊ทธ๋ฆผ:', 'image:']:
if keyword in query.lower():
image_prompt = query.split(keyword)[1].strip()
break
if not image_prompt:
image_prompt = query # ๋ช…์‹œ์  ํ”„๋กฌํ”„ํŠธ๊ฐ€ ์—†์œผ๋ฉด ์ „์ฒด ์ฟผ๋ฆฌ ์‚ฌ์šฉ
messages = [{'role': Role.SYSTEM, 'content': _setting['system']}]
messages.append({'role': Role.USER, 'content': query})
system_message = messages[0]['content']
claude_messages = [{"role": "user", "content": query}]
openai_messages = [
{"role": "system", "content": system_message},
{"role": "user", "content": query}
]
try:
yield [
"",
None,
gr.update(active_key="loading"),
gr.update(open=True)
]
await asyncio.sleep(0)
collected_content = None
try:
async for content in try_claude_api(system_message, claude_messages):
yield [
"",
None,
gr.update(active_key="loading"),
gr.update(open=True)
]
await asyncio.sleep(0)
collected_content = content
except Exception as claude_error:
print(f"Falling back to OpenAI API due to Claude error: {str(claude_error)}")
async for content in try_openai_api(openai_messages):
yield [
"",
None,
gr.update(active_key="loading"),
gr.update(open=True)
]
await asyncio.sleep(0)
collected_content = content
if collected_content:
# ์ด๋ฏธ์ง€ ์ƒ์„ฑ์ด ํ•„์š”ํ•œ ๊ฒฝ์šฐ
if needs_image and image_prompt:
try:
print(f"Generating image for prompt: {image_prompt}")
# FLUX ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์ด๋ฏธ์ง€ ์ƒ์„ฑ
if pipe is not None:
image = generate_image(
prompt=image_prompt,
height=512,
width=512,
steps=8,
scales=3.5,
seed=random.randint(1, 10000)
)
# ์ด๋ฏธ์ง€๋ฅผ Base64๋กœ ์ธ์ฝ”๋”ฉ
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
# HTML์— ์ด๋ฏธ์ง€ ์ถ”๊ฐ€
image_html = f'''
<div class="generated-image" style="margin: 20px 0; text-align: center;">
<h3 style="color: #333; margin-bottom: 10px;">Generated Image:</h3>
<img src="data:image/png;base64,{img_str}"
style="max-width: 100%;
border-radius: 10px;
box-shadow: 0 4px 8px rgba(0,0,0,0.1);">
<p style="color: #666; margin-top: 10px; font-style: italic;">
Prompt: {html.escape(image_prompt)}
</p>
</div>
'''
# HTML ์‘๋‹ต์— ์ด๋ฏธ์ง€ ์‚ฝ์ž…
if '```html' in collected_content:
# HTML ์ฝ”๋“œ ๋ธ”๋ก ๋‚ด๋ถ€์— ์ด๋ฏธ์ง€ ์ถ”๊ฐ€
collected_content = collected_content.replace('```html\n', f'```html\n{image_html}')
else:
# HTML ์ฝ”๋“œ ๋ธ”๋ก์œผ๋กœ ๊ฐ์‹ธ์„œ ์ด๋ฏธ์ง€ ์ถ”๊ฐ€
collected_content = f'```html\n{image_html}\n```\n{collected_content}'
print("Image generation successful")
else:
raise Exception("FLUX model not initialized")
except Exception as e:
print(f"Image generation error: {str(e)}")
error_message = f'''
<div style="color: #ff4d4f; padding: 10px; margin: 10px 0;
border-left: 4px solid #ff4d4f; background: #fff2f0;">
<p>Failed to generate image: {str(e)}</p>
</div>
'''
if '```html' in collected_content:
collected_content = collected_content.replace('```html\n', f'```html\n{error_message}')
else:
collected_content = f'```html\n{error_message}\n```\n{collected_content}'
# ์ตœ์ข… ๊ฒฐ๊ณผ ํ‘œ์‹œ
yield [
collected_content,
send_to_sandbox(remove_code_block(collected_content)),
gr.update(active_key="render"),
gr.update(open=False)
]
else:
raise ValueError("No content was generated from either API")
except Exception as e:
print(f"Error details: {str(e)}")
raise ValueError(f'Error calling APIs: {str(e)}')
def clear_history(self):
return []
def remove_code_block(text):
pattern = r'```html\n(.+?)\n```'
match = re.search(pattern, text, re.DOTALL)
if match:
return match.group(1).strip()
else:
return text.strip()
def history_render(history: History):
return gr.update(open=True), history
def send_to_sandbox(code):
encoded_html = base64.b64encode(code.encode('utf-8')).decode('utf-8')
data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
return f"""
<iframe
src="{data_uri}"
style="width:100%; height:800px; border:none;"
frameborder="0"
></iframe>
"""
# ๋ฐฐํฌ ๊ด€๋ จ ํ•จ์ˆ˜ ์ถ”๊ฐ€
def generate_space_name():
"""6์ž๋ฆฌ ๋žœ๋ค ์˜๋ฌธ ์ด๋ฆ„ ์ƒ์„ฑ"""
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(6))
def deploy_to_vercel(code: str):
try:
token = "A8IFZmgW2cqA4yUNlLPnci0N"
if not token:
return "Vercel ํ† ํฐ์ด ์„ค์ •๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค."
# 6์ž๋ฆฌ ์˜๋ฌธ ํ”„๋กœ์ ํŠธ ์ด๋ฆ„ ์ƒ์„ฑ
project_name = ''.join(random.choice(string.ascii_lowercase) for i in range(6))
# Vercel API ์—”๋“œํฌ์ธํŠธ
deploy_url = "https://api.vercel.com/v13/deployments"
# ํ—ค๋” ์„ค์ •
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
# package.json ํŒŒ์ผ ์ƒ์„ฑ
package_json = {
"name": project_name,
"version": "1.0.0",
"private": True, # true -> True๋กœ ์ˆ˜์ •
"dependencies": {
"vite": "^5.0.0"
},
"scripts": {
"dev": "vite",
"build": "echo 'No build needed' && mkdir -p dist && cp index.html dist/",
"preview": "vite preview"
}
}
# ๋ฐฐํฌํ•  ํŒŒ์ผ ๋ฐ์ดํ„ฐ ๊ตฌ์กฐ
files = [
{
"file": "index.html",
"data": code
},
{
"file": "package.json",
"data": json.dumps(package_json, indent=2) # indent ์ถ”๊ฐ€๋กœ ๊ฐ€๋…์„ฑ ํ–ฅ์ƒ
}
]
# ํ”„๋กœ์ ํŠธ ์„ค์ •
project_settings = {
"buildCommand": "npm run build",
"outputDirectory": "dist",
"installCommand": "npm install",
"framework": None
}
# ๋ฐฐํฌ ์š”์ฒญ ๋ฐ์ดํ„ฐ
deploy_data = {
"name": project_name,
"files": files,
"target": "production",
"projectSettings": project_settings
}
deploy_response = requests.post(deploy_url, headers=headers, json=deploy_data)
if deploy_response.status_code != 200:
return f"๋ฐฐํฌ ์‹คํŒจ: {deploy_response.text}"
# URL ํ˜•์‹ ์ˆ˜์ • - 6์ž๋ฆฌ.vercel.app ํ˜•ํƒœ๋กœ ๋ฐ˜ํ™˜
deployment_url = f"{project_name}.vercel.app"
time.sleep(5)
return f"""๋ฐฐํฌ ์™„๋ฃŒ! <a href="https://{deployment_url}" target="_blank" style="color: #1890ff; text-decoration: underline; cursor: pointer;">https://{deployment_url}</a>"""
except Exception as e:
return f"๋ฐฐํฌ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
theme = gr.themes.Soft()
def get_random_placeholder():
return random.choice(DEMO_LIST)['description']
def update_placeholder():
return gr.update(placeholder=get_random_placeholder())
def create_main_interface():
"""๋ฉ”์ธ ์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ ํ•จ์ˆ˜"""
def execute_code(query: str):
if not query or query.strip() == '':
return None, gr.update(active_key="empty")
try:
if '```html' in query and '```' in query:
code = remove_code_block(query)
else:
code = query.strip()
return send_to_sandbox(code), gr.update(active_key="render")
except Exception as e:
print(f"Error executing code: {str(e)}")
return None, gr.update(active_key="empty")
# CSS ํŒŒ์ผ ๋‚ด์šฉ์„ ์ง์ ‘ ์ ์šฉ
with open('app.css', 'r', encoding='utf-8') as f:
custom_css = f.read()
demo = gr.Blocks(css=custom_css + """
.empty-content {
padding: 40px !important;
background: #f8f9fa !important;
border-radius: 10px !important;
margin: 20px !important;
}
.container {
background: #f0f0f0;
min-height: 100vh;
padding: 20px;
display: flex;
justify-content: center;
align-items: center;
font-family: -apple-system, BlinkMacSystemFont, sans-serif;
}
.app-window {
background: white;
border-radius: 10px;
box-shadow: 0 20px 40px rgba(0,0,0,0.1);
width: 100%;
max-width: 1400px;
overflow: hidden;
}
.window-header {
background: #f0f0f0;
padding: 12px 16px;
display: flex;
align-items: center;
border-bottom: 1px solid #e0e0e0;
}
.window-controls {
display: flex;
gap: 8px;
}
.control {
width: 12px;
height: 12px;
border-radius: 50%;
cursor: pointer;
}
.control.close { background: #ff5f57; }
.control.minimize { background: #febc2e; }
.control.maximize { background: #28c840; }
.window-title {
flex: 1;
text-align: center;
color: #333;
font-size: 14px;
font-weight: 500;
}
.main-content {
display: flex;
height: calc(100vh - 100px);
}
.left-panel {
width: 40%;
border-right: 1px solid #e0e0e0;
padding: 20px;
display: flex;
flex-direction: column;
}
.right-panel {
width: 60%;
background: #fff;
position: relative;
}
.input-area {
background: #f8f9fa;
border-radius: 10px;
padding: 20px;
margin-top: 20px;
}
.button-group {
display: flex;
gap: 10px;
margin-top: 20px;
}
.custom-button {
background: #007aff;
color: white;
border: none;
padding: 10px 20px;
border-radius: 6px;
cursor: pointer;
transition: all 0.2s;
}
.custom-button:hover {
background: #0056b3;
}
""", theme=theme)
with demo:
with gr.Tabs(elem_classes="main-tabs") as tabs:
with gr.Tab("Visual AI Assistant", elem_id="mouse-tab", elem_classes="mouse-tab"):
# SystemPrompt ์„ค์ •์„ ์œ„ํ•œ State ์ถ”๊ฐ€
setting = gr.State({
"system": SystemPrompt,
})
with ms.Application() as app:
with antd.ConfigProvider():
with antd.Drawer(open=False, title="AI is Creating...", placement="left", width="750px") as code_drawer:
gr.HTML("""
<div class="thinking-container">
<style>
.custom-textarea {
background: #f8f9fa !important;
border: 1px solid #e0e0e0 !important;
border-radius: 10px !important;
padding: 15px !important;
min-height: 150px !important;
font-family: -apple-system, BlinkMacSystemFont, sans-serif !important;
}
.custom-textarea:focus {
border-color: #007aff !important;
box-shadow: 0 0 0 2px rgba(0,122,255,0.2) !important;
}
.thinking-container {
text-align: center;
padding: 20px;
background: #f8f9fa;
border-radius: 15px;
font-family: -apple-system, BlinkMacSystemFont, sans-serif;
}
.progress-bar {
width: 100%;
height: 4px;
background: #e9ecef;
border-radius: 4px;
margin: 20px 0;
overflow: hidden;
}
.progress-bar-inner {
width: 30%;
height: 100%;
background: linear-gradient(90deg, #007aff, #28c840);
animation: progress 2s ease-in-out infinite;
}
.thinking-icon {
font-size: 48px;
margin: 20px 0;
animation: bounce 1s ease infinite;
}
.tip-box {
background: white;
padding: 20px;
border-radius: 10px;
box-shadow: 0 4px 12px rgba(0,0,0,0.1);
margin: 20px 0;
transition: all 0.3s ease;
}
.status-text {
color: #007aff;
font-size: 18px;
margin: 15px 0;
animation: fade 1.5s ease infinite;
}
.icon-grid {
display: grid;
grid-template-columns: repeat(4, 1fr);
gap: 15px;
margin: 20px 0;
}
.icon-item {
padding: 10px;
background: rgba(0,122,255,0.1);
border-radius: 8px;
animation: pulse 2s ease infinite;
}
@keyframes progress {
0% { transform: translateX(-100%); }
100% { transform: translateX(400%); }
}
@keyframes bounce {
0%, 100% { transform: translateY(0); }
50% { transform: translateY(-10px); }
}
@keyframes fade {
0%, 100% { opacity: 1; }
50% { opacity: 0.6; }
}
@keyframes pulse {
0% { transform: scale(1); }
50% { transform: scale(1.05); }
100% { transform: scale(1); }
}
</style>
<div class="thinking-icon">๐ŸŽจ</div>
<div class="status-text">Creating Your Visualization...</div>
<div class="progress-bar">
<div class="progress-bar-inner"></div>
</div>
<div class="icon-grid">
<div class="icon-item">๐Ÿ“Š</div>
<div class="icon-item">๐ŸŽฏ</div>
<div class="icon-item">๐Ÿ’ก</div>
<div class="icon-item">โœจ</div>
</div>
<div class="tip-box">
<h3 style="color: #007aff; margin-bottom: 10px;">Did You Know?</h3>
<div id="tip-content" style="font-size: 16px; line-height: 1.6;"></div>
</div>
<script>
const tips = [
"MOUSE-I is creating responsive and interactive visualizations! ๐Ÿ“Š",
"We're applying modern design principles for the best user experience! ๐ŸŽจ",
"Your content will be optimized for all devices! ๐Ÿ“ฑ",
"Adding engaging animations to bring your data to life! โœจ",
"Crafting a beautiful presentation just for you! ๐ŸŽฏ",
"Implementing interactive elements for better engagement! ๐ŸŽฎ",
"Optimizing colors and layout for visual appeal! ๐ŸŽช",
"Creating smooth transitions and animations! ๐ŸŒŸ"
];
function updateTip() {
const tipElement = document.getElementById('tip-content');
if (tipElement) {
const randomTip = tips[Math.floor(Math.random() * tips.length)];
tipElement.innerHTML = randomTip;
tipElement.style.opacity = 0;
setTimeout(() => {
tipElement.style.transition = 'opacity 0.5s ease';
tipElement.style.opacity = 1;
}, 100);
}
}
updateTip();
setInterval(updateTip, 3000);
</script>
</div>
""")
code_output = legacy.Markdown(visible=False)
# ๋ฉ”์ธ ์ปจํ…์ธ ๋ฅผ ์œ„ํ•œ Row
with antd.Row(gutter=[32, 12]) as layout:
# ์ขŒ์ธก ํŒจ๋„
with antd.Col(span=24, md=8):
with antd.Flex(vertical=True, gap="middle", wrap=True):
# macOS ์Šคํƒ€์ผ ์œˆ๋„์šฐ ํ—ค๋”
header = gr.HTML("""
<div class="window-frame">
<div class="window-header">
<div class="window-controls">
<div class="control close"></div>
<div class="control minimize"></div>
<div class="control maximize"></div>
</div>
<div class="window-title">
<div class="window-address">
<div class="secure-icon">๐Ÿ”’</div>
<div class="url-bar">https://VIDraft-mouse-chat.hf.space</div>
</div>
</div>
</div>
<div class="app-content">
<img src="data:image/gif;base64,{}" width="360px" />
<h1 class="app-title">MOUSE-Chat Visual AI</h1>
<p class="app-description">Creates visualized web pages from text input, and when you include keywords like "image:", "๊ทธ๋ฆผ:", or "image:" in your input, it automatically generates AI images based on the description and incorporates them into the web page.
Use the "Generate" button for basic creation, "Enhance" button for prompt improvement, "Share" button to deploy results to the web, and input like "image: a dog playing in the park" to create results containing both text and generated images.</p>
</div>
</div>
""".format(get_image_base64('mouse.gif')))
# ์ž…๋ ฅ ์˜์—ญ
input = antd.InputTextarea(
size="large",
allow_clear=True,
placeholder=get_random_placeholder(),
elem_classes="custom-textarea" # style ๋Œ€์‹  class ์‚ฌ์šฉ
)
# ๋ฒ„ํŠผ ๊ทธ๋ฃน
with antd.Flex(gap="small", justify="flex-start"):
btn = antd.Button(
"Generate",
type="primary",
size="large",
elem_classes="generate-btn"
)
boost_btn = antd.Button(
"Enhance",
type="default",
size="large",
elem_classes="enhance-btn"
)
deploy_btn = antd.Button(
"Share",
type="default",
size="large",
elem_classes="share-btn"
)
deploy_result = gr.HTML(
label="Share Result",
elem_classes="deploy-result"
)
# ์šฐ์ธก ํŒจ๋„
with antd.Col(span=24, md=16):
with ms.Div(elem_classes="right_panel"):
# macOS ์Šคํƒ€์ผ ์œˆ๋„์šฐ ํ—ค๋”
gr.HTML("""
<div class="window-frame">
<div class="window-header">
<div class="window-controls">
<div class="control close"></div>
<div class="control minimize"></div>
<div class="control maximize"></div>
</div>
<div class="window-title">Preview</div>
</div>
</div>
""")
# ํƒญ ์ปจํ…์ธ 
with antd.Tabs(active_key="empty", render_tab_bar="() => null") as state_tab:
with antd.Tabs.Item(key="empty"):
empty = antd.Empty(
description="Enter your question to begin",
elem_classes="right_content empty-content" # style ๋Œ€์‹  class ์‚ฌ์šฉ
)
with antd.Tabs.Item(key="loading"):
loading = antd.Spin(
True,
tip="Creating visual presentation...",
size="large",
elem_classes="right_content"
)
with antd.Tabs.Item(key="render"):
sandbox = gr.HTML(elem_classes="html_content")
btn.click(
demo_instance.generation_code,
inputs=[input, setting], # setting์ด ์ด์ œ ์ •์˜๋จ
outputs=[code_output, sandbox, state_tab, code_drawer]
).then(
fn=update_placeholder,
inputs=[],
outputs=[input]
)
boost_btn.click(
fn=handle_boost,
inputs=[input],
outputs=[input, state_tab]
)
deploy_btn.click(
fn=lambda code: deploy_to_vercel(remove_code_block(code)) if code else "No code to share.",
inputs=[code_output],
outputs=[deploy_result]
)
gr.HTML("""
<style>
.generate-btn {
background: #007aff !important;
border-radius: 8px !important;
box-shadow: 0 2px 4px rgba(0,0,0,0.1) !important;
}
.enhance-btn {
border-radius: 8px !important;
border: 1px solid #007aff !important;
color: #007aff !important;
}
.share-btn {
border-radius: 8px !important;
border: 1px solid #28c840 !important;
color: #28c840 !important;
}
/* hover ํšจ๊ณผ */
.generate-btn:hover {
background: #0056b3 !important;
}
.enhance-btn:hover {
background: rgba(0,122,255,0.1) !important;
}
.share-btn:hover {
background: rgba(40,200,64,0.1) !important;
}
.app-content {
padding: 20px;
text-align: center;
}
.app-title {
font-size: 24px;
color: #333;
margin: 20px 0 10px;
font-weight: 600;
}
.app-description {
color: #666;
font-size: 14px;
margin-bottom: 30px;
}
.deploy-result {
margin-top: 20px;
padding: 15px;
background: #f8f9fa;
border-radius: 8px;
font-family: -apple-system, BlinkMacSystemFont, sans-serif;
}
.deploy-result a {
color: #007aff;
text-decoration: none;
font-weight: 500;
}
.deploy-result a:hover {
text-decoration: underline;
}
/* ๋ฐ˜์‘ํ˜• ๋””์ž์ธ์„ ์œ„ํ•œ ๋ฏธ๋””์–ด ์ฟผ๋ฆฌ */
@media (max-width: 768px) {
.window-frame {
border-radius: 0;
}
.left-panel, .right-panel {
width: 100%;
}
.main-content {
flex-direction: column;
}
}
</style>
""")
return demo
# ๋ฉ”์ธ ์‹คํ–‰ ๋ถ€๋ถ„
if __name__ == "__main__":
try:
demo_instance = Demo() # Demo ์ธ์Šคํ„ด์Šค ์ƒ์„ฑ
demo = create_main_interface() # ์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ
demo.queue(
default_concurrency_limit=20, # concurrency_count ๋Œ€์‹  default_concurrency_limit ์‚ฌ์šฉ
status_update_rate=10,
api_open=False
).launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
debug=False
)
except Exception as e:
print(f"Initialization error: {e}")
raise