Spaces:
Running
on
Zero
Running
on
Zero
import os | |
import re | |
import random | |
from http import HTTPStatus | |
from typing import Dict, List, Optional, Tuple | |
import base64 | |
import anthropic | |
import openai | |
import asyncio | |
import time | |
from functools import partial | |
import json | |
import gradio as gr | |
import modelscope_studio.components.base as ms | |
import modelscope_studio.components.legacy as legacy | |
import modelscope_studio.components.antd as antd | |
import html | |
import urllib.parse | |
from huggingface_hub import HfApi, create_repo, hf_hub_download | |
import string | |
import requests | |
from selenium import webdriver | |
from selenium.webdriver.support.ui import WebDriverWait | |
from selenium.webdriver.support import expected_conditions as EC | |
from selenium.webdriver.common.by import By | |
from selenium.common.exceptions import WebDriverException, TimeoutException | |
from PIL import Image | |
from io import BytesIO | |
from datetime import datetime | |
import spaces | |
from safetensors.torch import load_file | |
from diffusers import FluxPipeline | |
import torch | |
from os import path # ์ด ์ค์ ์ถ๊ฐ | |
# ์บ์ ๊ฒฝ๋ก ์ค์ | |
cache_path = path.join(path.dirname(path.abspath(__file__)), "models") | |
os.environ["TRANSFORMERS_CACHE"] = cache_path | |
os.environ["HF_HUB_CACHE"] = cache_path | |
os.environ["HF_HOME"] = cache_path | |
# Hugging Face ํ ํฐ ์ค์ | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
if not HF_TOKEN: | |
print("Warning: HF_TOKEN not found in environment variables") | |
# FLUX ๋ชจ๋ธ ์ด๊ธฐํ | |
if not path.exists(cache_path): | |
os.makedirs(cache_path, exist_ok=True) | |
try: | |
pipe = FluxPipeline.from_pretrained( | |
"black-forest-labs/FLUX.1-dev", | |
torch_dtype=torch.bfloat16, | |
use_auth_token=HF_TOKEN # Hugging Face ํ ํฐ ์ถ๊ฐ | |
) | |
pipe.load_lora_weights( | |
hf_hub_download( | |
"ByteDance/Hyper-SD", | |
"Hyper-FLUX.1-dev-8steps-lora.safetensors", | |
token=HF_TOKEN # Hugging Face ํ ํฐ ์ถ๊ฐ | |
) | |
) | |
pipe.fuse_lora(lora_scale=0.125) | |
pipe.to(device="cuda", dtype=torch.bfloat16) | |
print("Successfully initialized FLUX model with authentication") | |
except Exception as e: | |
print(f"Error initializing FLUX model: {str(e)}") | |
pipe = None | |
# ์ด๋ฏธ์ง ์์ฑ ํจ์ ์ถ๊ฐ | |
def generate_image(prompt, height=512, width=512, steps=8, scales=3.5, seed=3413): | |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): | |
return pipe( | |
prompt=[prompt], | |
generator=torch.Generator().manual_seed(int(seed)), | |
num_inference_steps=int(steps), | |
guidance_scale=float(scales), | |
height=int(height), | |
width=int(width), | |
max_sequence_length=256 | |
).images[0] | |
# SystemPrompt ๋ถ๋ถ์ ์ง์ ์ ์ | |
SystemPrompt = """You are 'MOUSE-I', an advanced AI visualization expert. Your mission is to transform every response into a visually stunning and highly informative presentation. | |
Core Capabilities: | |
- Transform text responses into rich visual experiences | |
- Create interactive data visualizations and charts | |
- Design beautiful and intuitive user interfaces | |
- Utilize engaging animations and transitions | |
- Present information in a clear, structured manner | |
Visual Elements to Include: | |
- Charts & Graphs (using Chart.js, D3.js) | |
- Interactive Data Visualizations | |
- Modern UI Components | |
- Engaging Animations | |
- Informative Icons & Emojis | |
- Color-coded Information Blocks | |
- Progress Indicators | |
- Timeline Visualizations | |
- Statistical Representations | |
- Comparison Tables | |
Technical Requirements: | |
- Modern HTML5/CSS3/JavaScript | |
- Responsive Design | |
- Interactive Elements | |
- Clean Typography | |
- Professional Color Schemes | |
- Smooth Animations | |
- Cross-browser Compatibility | |
Libraries Available: | |
- Chart.js for Data Visualization | |
- D3.js for Complex Graphics | |
- Bootstrap for Layout | |
- jQuery for Interactions | |
- Three.js for 3D Elements | |
Design Principles: | |
- Visual Hierarchy | |
- Clear Information Flow | |
- Consistent Styling | |
- Intuitive Navigation | |
- Engaging User Experience | |
- Accessibility Compliance | |
Remember to: | |
- Present data in the most visually appealing way | |
- Use appropriate charts for different data types | |
- Include interactive elements where relevant | |
- Maintain a professional and modern aesthetic | |
- Ensure responsive design for all devices | |
Return only HTML code wrapped in code blocks, focusing on creating visually stunning and informative presentations. | |
""" | |
from config import DEMO_LIST | |
class Role: | |
SYSTEM = "system" | |
USER = "user" | |
ASSISTANT = "assistant" | |
History = List[Tuple[str, str]] | |
Messages = List[Dict[str, str]] | |
# ์ด๋ฏธ์ง ์บ์๋ฅผ ๋ฉ๋ชจ๋ฆฌ์ ์ ์ฅ | |
IMAGE_CACHE = {} | |
# boost_prompt ํจ์์ handle_boost ํจ์๋ฅผ ์ถ๊ฐํฉ๋๋ค | |
def boost_prompt(prompt: str) -> str: | |
if not prompt: | |
return "" | |
# ์ฆ๊ฐ์ ์ํ ์์คํ ํ๋กฌํํธ | |
boost_system_prompt = """ | |
๋น์ ์ ์น ๊ฐ๋ฐ ํ๋กฌํํธ ์ ๋ฌธ๊ฐ์ ๋๋ค. | |
์ฃผ์ด์ง ํ๋กฌํํธ๋ฅผ ๋ถ์ํ์ฌ ๋ ์์ธํ๊ณ ์ ๋ฌธ์ ์ธ ์๊ตฌ์ฌํญ์ผ๋ก ํ์ฅํ๋, | |
์๋ ์๋์ ๋ชฉ์ ์ ๊ทธ๋๋ก ์ ์งํ๋ฉด์ ๋ค์ ๊ด์ ๋ค์ ๊ณ ๋ คํ์ฌ ์ฆ๊ฐํ์ญ์์ค: | |
1. ๊ธฐ์ ์ ๊ตฌํ ์์ธ | |
2. UI/UX ๋์์ธ ์์ | |
3. ์ฌ์ฉ์ ๊ฒฝํ ์ต์ ํ | |
4. ์ฑ๋ฅ๊ณผ ๋ณด์ | |
5. ์ ๊ทผ์ฑ๊ณผ ํธํ์ฑ | |
๊ธฐ์กด SystemPrompt์ ๋ชจ๋ ๊ท์น์ ์ค์ํ๋ฉด์ ์ฆ๊ฐ๋ ํ๋กฌํํธ๋ฅผ ์์ฑํ์ญ์์ค. | |
""" | |
try: | |
# Claude API ์๋ | |
try: | |
response = claude_client.messages.create( | |
model="claude-3-5-sonnet-20241022", | |
max_tokens=2000, | |
messages=[{ | |
"role": "user", | |
"content": f"๋ค์ ํ๋กฌํํธ๋ฅผ ๋ถ์ํ๊ณ ์ฆ๊ฐํ์์ค: {prompt}" | |
}] | |
) | |
if hasattr(response, 'content') and len(response.content) > 0: | |
return response.content[0].text | |
raise Exception("Claude API ์๋ต ํ์ ์ค๋ฅ") | |
except Exception as claude_error: | |
print(f"Claude API ์๋ฌ, OpenAI๋ก ์ ํ: {str(claude_error)}") | |
# OpenAI API ์๋ | |
completion = openai_client.chat.completions.create( | |
model="gpt-4", | |
messages=[ | |
{"role": "system", "content": boost_system_prompt}, | |
{"role": "user", "content": f"๋ค์ ํ๋กฌํํธ๋ฅผ ๋ถ์ํ๊ณ ์ฆ๊ฐํ์์ค: {prompt}"} | |
], | |
max_tokens=2000, | |
temperature=0.7 | |
) | |
if completion.choices and len(completion.choices) > 0: | |
return completion.choices[0].message.content | |
raise Exception("OpenAI API ์๋ต ํ์ ์ค๋ฅ") | |
except Exception as e: | |
print(f"ํ๋กฌํํธ ์ฆ๊ฐ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}") | |
return prompt # ์ค๋ฅ ๋ฐ์์ ์๋ณธ ํ๋กฌํํธ ๋ฐํ | |
# Boost ๋ฒํผ ์ด๋ฒคํธ ํธ๋ค๋ฌ | |
def handle_boost(prompt: str): | |
try: | |
boosted_prompt = boost_prompt(prompt) | |
return boosted_prompt, gr.update(active_key="empty") | |
except Exception as e: | |
print(f"Boost ์ฒ๋ฆฌ ์ค ์ค๋ฅ: {str(e)}") | |
return prompt, gr.update(active_key="empty") | |
def get_image_base64(image_path): | |
if image_path in IMAGE_CACHE: | |
return IMAGE_CACHE[image_path] | |
try: | |
with open(image_path, "rb") as image_file: | |
encoded_string = base64.b64encode(image_file.read()).decode() | |
IMAGE_CACHE[image_path] = encoded_string | |
return encoded_string | |
except: | |
return IMAGE_CACHE.get('default.png', '') | |
def history_to_messages(history: History, system: str) -> Messages: | |
messages = [{'role': Role.SYSTEM, 'content': system}] | |
for h in history: | |
messages.append({'role': Role.USER, 'content': h[0]}) | |
messages.append({'role': Role.ASSISTANT, 'content': h[1]}) | |
return messages | |
def messages_to_history(messages: Messages) -> History: | |
assert messages[0]['role'] == Role.SYSTEM | |
history = [] | |
for q, r in zip(messages[1::2], messages[2::2]): | |
history.append([q['content'], r['content']]) | |
return history | |
# API ํด๋ผ์ด์ธํธ ์ด๊ธฐํ | |
YOUR_ANTHROPIC_TOKEN = os.getenv('ANTHROPIC_API_KEY', '') # ๊ธฐ๋ณธ๊ฐ ์ถ๊ฐ | |
YOUR_OPENAI_TOKEN = os.getenv('OPENAI_API_KEY', '') # ๊ธฐ๋ณธ๊ฐ ์ถ๊ฐ | |
# API ํค ๊ฒ์ฆ | |
if not YOUR_ANTHROPIC_TOKEN or not YOUR_OPENAI_TOKEN: | |
print("Warning: API keys not found in environment variables") | |
# API ํด๋ผ์ด์ธํธ ์ด๊ธฐํ ์ ์์ธ ์ฒ๋ฆฌ ์ถ๊ฐ | |
try: | |
claude_client = anthropic.Anthropic(api_key=YOUR_ANTHROPIC_TOKEN) | |
openai_client = openai.OpenAI(api_key=YOUR_OPENAI_TOKEN) | |
except Exception as e: | |
print(f"Error initializing API clients: {str(e)}") | |
claude_client = None | |
openai_client = None | |
# try_claude_api ํจ์ ์์ | |
async def try_claude_api(system_message, claude_messages, timeout=15): | |
try: | |
start_time = time.time() | |
with claude_client.messages.stream( | |
model="claude-3-5-sonnet-20241022", | |
max_tokens=7860, | |
system=system_message, | |
messages=claude_messages | |
) as stream: | |
collected_content = "" | |
for chunk in stream: | |
current_time = time.time() | |
if current_time - start_time > timeout: | |
print(f"Claude API response time: {current_time - start_time:.2f} seconds") | |
raise TimeoutError("Claude API timeout") | |
if chunk.type == "content_block_delta": | |
collected_content += chunk.delta.text | |
yield collected_content | |
await asyncio.sleep(0) | |
start_time = current_time | |
except Exception as e: | |
print(f"Claude API error: {str(e)}") | |
raise e | |
async def try_openai_api(openai_messages): | |
try: | |
stream = openai_client.chat.completions.create( | |
model="gpt-4o", | |
messages=openai_messages, | |
stream=True, | |
max_tokens=4096, | |
temperature=0.7 | |
) | |
collected_content = "" | |
for chunk in stream: | |
if chunk.choices[0].delta.content is not None: | |
collected_content += chunk.choices[0].delta.content | |
yield collected_content | |
except Exception as e: | |
print(f"OpenAI API error: {str(e)}") | |
raise e | |
class Demo: | |
def __init__(self): | |
pass | |
async def generation_code(self, query: Optional[str], _setting: Dict[str, str]): | |
if not query or query.strip() == '': | |
query = get_random_placeholder() | |
# ์ด๋ฏธ์ง ์์ฑ์ด ํ์ํ์ง ํ์ธ | |
needs_image = '์ด๋ฏธ์ง' in query or '๊ทธ๋ฆผ' in query or 'image' in query.lower() | |
image_prompt = None | |
# ์ด๋ฏธ์ง ํ๋กฌํํธ ์ถ์ถ | |
if needs_image: | |
for keyword in ['์ด๋ฏธ์ง:', '๊ทธ๋ฆผ:', 'image:']: | |
if keyword in query.lower(): | |
image_prompt = query.split(keyword)[1].strip() | |
break | |
if not image_prompt: | |
image_prompt = query # ๋ช ์์ ํ๋กฌํํธ๊ฐ ์์ผ๋ฉด ์ ์ฒด ์ฟผ๋ฆฌ ์ฌ์ฉ | |
messages = [{'role': Role.SYSTEM, 'content': _setting['system']}] | |
messages.append({'role': Role.USER, 'content': query}) | |
system_message = messages[0]['content'] | |
claude_messages = [{"role": "user", "content": query}] | |
openai_messages = [ | |
{"role": "system", "content": system_message}, | |
{"role": "user", "content": query} | |
] | |
try: | |
yield [ | |
"", | |
None, | |
gr.update(active_key="loading"), | |
gr.update(open=True) | |
] | |
await asyncio.sleep(0) | |
collected_content = None | |
try: | |
async for content in try_claude_api(system_message, claude_messages): | |
yield [ | |
"", | |
None, | |
gr.update(active_key="loading"), | |
gr.update(open=True) | |
] | |
await asyncio.sleep(0) | |
collected_content = content | |
except Exception as claude_error: | |
print(f"Falling back to OpenAI API due to Claude error: {str(claude_error)}") | |
async for content in try_openai_api(openai_messages): | |
yield [ | |
"", | |
None, | |
gr.update(active_key="loading"), | |
gr.update(open=True) | |
] | |
await asyncio.sleep(0) | |
collected_content = content | |
if collected_content: | |
# ์ด๋ฏธ์ง ์์ฑ์ด ํ์ํ ๊ฒฝ์ฐ | |
if needs_image and image_prompt: | |
try: | |
print(f"Generating image for prompt: {image_prompt}") | |
# FLUX ๋ชจ๋ธ์ ์ฌ์ฉํ์ฌ ์ด๋ฏธ์ง ์์ฑ | |
if pipe is not None: | |
image = generate_image( | |
prompt=image_prompt, | |
height=512, | |
width=512, | |
steps=8, | |
scales=3.5, | |
seed=random.randint(1, 10000) | |
) | |
# ์ด๋ฏธ์ง๋ฅผ Base64๋ก ์ธ์ฝ๋ฉ | |
buffered = BytesIO() | |
image.save(buffered, format="PNG") | |
img_str = base64.b64encode(buffered.getvalue()).decode() | |
# HTML์ ์ด๋ฏธ์ง ์ถ๊ฐ | |
image_html = f''' | |
<div class="generated-image" style="margin: 20px 0; text-align: center;"> | |
<h3 style="color: #333; margin-bottom: 10px;">Generated Image:</h3> | |
<img src="data:image/png;base64,{img_str}" | |
style="max-width: 100%; | |
border-radius: 10px; | |
box-shadow: 0 4px 8px rgba(0,0,0,0.1);"> | |
<p style="color: #666; margin-top: 10px; font-style: italic;"> | |
Prompt: {html.escape(image_prompt)} | |
</p> | |
</div> | |
''' | |
# HTML ์๋ต์ ์ด๋ฏธ์ง ์ฝ์ | |
if '```html' in collected_content: | |
# HTML ์ฝ๋ ๋ธ๋ก ๋ด๋ถ์ ์ด๋ฏธ์ง ์ถ๊ฐ | |
collected_content = collected_content.replace('```html\n', f'```html\n{image_html}') | |
else: | |
# HTML ์ฝ๋ ๋ธ๋ก์ผ๋ก ๊ฐ์ธ์ ์ด๋ฏธ์ง ์ถ๊ฐ | |
collected_content = f'```html\n{image_html}\n```\n{collected_content}' | |
print("Image generation successful") | |
else: | |
raise Exception("FLUX model not initialized") | |
except Exception as e: | |
print(f"Image generation error: {str(e)}") | |
error_message = f''' | |
<div style="color: #ff4d4f; padding: 10px; margin: 10px 0; | |
border-left: 4px solid #ff4d4f; background: #fff2f0;"> | |
<p>Failed to generate image: {str(e)}</p> | |
</div> | |
''' | |
if '```html' in collected_content: | |
collected_content = collected_content.replace('```html\n', f'```html\n{error_message}') | |
else: | |
collected_content = f'```html\n{error_message}\n```\n{collected_content}' | |
# ์ต์ข ๊ฒฐ๊ณผ ํ์ | |
yield [ | |
collected_content, | |
send_to_sandbox(remove_code_block(collected_content)), | |
gr.update(active_key="render"), | |
gr.update(open=False) | |
] | |
else: | |
raise ValueError("No content was generated from either API") | |
except Exception as e: | |
print(f"Error details: {str(e)}") | |
raise ValueError(f'Error calling APIs: {str(e)}') | |
def clear_history(self): | |
return [] | |
def remove_code_block(text): | |
pattern = r'```html\n(.+?)\n```' | |
match = re.search(pattern, text, re.DOTALL) | |
if match: | |
return match.group(1).strip() | |
else: | |
return text.strip() | |
def history_render(history: History): | |
return gr.update(open=True), history | |
def send_to_sandbox(code): | |
encoded_html = base64.b64encode(code.encode('utf-8')).decode('utf-8') | |
data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}" | |
return f""" | |
<iframe | |
src="{data_uri}" | |
style="width:100%; height:800px; border:none;" | |
frameborder="0" | |
></iframe> | |
""" | |
# ๋ฐฐํฌ ๊ด๋ จ ํจ์ ์ถ๊ฐ | |
def generate_space_name(): | |
"""6์๋ฆฌ ๋๋ค ์๋ฌธ ์ด๋ฆ ์์ฑ""" | |
letters = string.ascii_lowercase | |
return ''.join(random.choice(letters) for i in range(6)) | |
def deploy_to_vercel(code: str): | |
try: | |
token = "A8IFZmgW2cqA4yUNlLPnci0N" | |
if not token: | |
return "Vercel ํ ํฐ์ด ์ค์ ๋์ง ์์์ต๋๋ค." | |
# 6์๋ฆฌ ์๋ฌธ ํ๋ก์ ํธ ์ด๋ฆ ์์ฑ | |
project_name = ''.join(random.choice(string.ascii_lowercase) for i in range(6)) | |
# Vercel API ์๋ํฌ์ธํธ | |
deploy_url = "https://api.vercel.com/v13/deployments" | |
# ํค๋ ์ค์ | |
headers = { | |
"Authorization": f"Bearer {token}", | |
"Content-Type": "application/json" | |
} | |
# package.json ํ์ผ ์์ฑ | |
package_json = { | |
"name": project_name, | |
"version": "1.0.0", | |
"private": True, # true -> True๋ก ์์ | |
"dependencies": { | |
"vite": "^5.0.0" | |
}, | |
"scripts": { | |
"dev": "vite", | |
"build": "echo 'No build needed' && mkdir -p dist && cp index.html dist/", | |
"preview": "vite preview" | |
} | |
} | |
# ๋ฐฐํฌํ ํ์ผ ๋ฐ์ดํฐ ๊ตฌ์กฐ | |
files = [ | |
{ | |
"file": "index.html", | |
"data": code | |
}, | |
{ | |
"file": "package.json", | |
"data": json.dumps(package_json, indent=2) # indent ์ถ๊ฐ๋ก ๊ฐ๋ ์ฑ ํฅ์ | |
} | |
] | |
# ํ๋ก์ ํธ ์ค์ | |
project_settings = { | |
"buildCommand": "npm run build", | |
"outputDirectory": "dist", | |
"installCommand": "npm install", | |
"framework": None | |
} | |
# ๋ฐฐํฌ ์์ฒญ ๋ฐ์ดํฐ | |
deploy_data = { | |
"name": project_name, | |
"files": files, | |
"target": "production", | |
"projectSettings": project_settings | |
} | |
deploy_response = requests.post(deploy_url, headers=headers, json=deploy_data) | |
if deploy_response.status_code != 200: | |
return f"๋ฐฐํฌ ์คํจ: {deploy_response.text}" | |
# URL ํ์ ์์ - 6์๋ฆฌ.vercel.app ํํ๋ก ๋ฐํ | |
deployment_url = f"{project_name}.vercel.app" | |
time.sleep(5) | |
return f"""๋ฐฐํฌ ์๋ฃ! <a href="https://{deployment_url}" target="_blank" style="color: #1890ff; text-decoration: underline; cursor: pointer;">https://{deployment_url}</a>""" | |
except Exception as e: | |
return f"๋ฐฐํฌ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}" | |
theme = gr.themes.Soft() | |
def get_random_placeholder(): | |
return random.choice(DEMO_LIST)['description'] | |
def update_placeholder(): | |
return gr.update(placeholder=get_random_placeholder()) | |
def create_main_interface(): | |
"""๋ฉ์ธ ์ธํฐํ์ด์ค ์์ฑ ํจ์""" | |
def execute_code(query: str): | |
if not query or query.strip() == '': | |
return None, gr.update(active_key="empty") | |
try: | |
if '```html' in query and '```' in query: | |
code = remove_code_block(query) | |
else: | |
code = query.strip() | |
return send_to_sandbox(code), gr.update(active_key="render") | |
except Exception as e: | |
print(f"Error executing code: {str(e)}") | |
return None, gr.update(active_key="empty") | |
# CSS ํ์ผ ๋ด์ฉ์ ์ง์ ์ ์ฉ | |
with open('app.css', 'r', encoding='utf-8') as f: | |
custom_css = f.read() | |
demo = gr.Blocks(css=custom_css + """ | |
.empty-content { | |
padding: 40px !important; | |
background: #f8f9fa !important; | |
border-radius: 10px !important; | |
margin: 20px !important; | |
} | |
.container { | |
background: #f0f0f0; | |
min-height: 100vh; | |
padding: 20px; | |
display: flex; | |
justify-content: center; | |
align-items: center; | |
font-family: -apple-system, BlinkMacSystemFont, sans-serif; | |
} | |
.app-window { | |
background: white; | |
border-radius: 10px; | |
box-shadow: 0 20px 40px rgba(0,0,0,0.1); | |
width: 100%; | |
max-width: 1400px; | |
overflow: hidden; | |
} | |
.window-header { | |
background: #f0f0f0; | |
padding: 12px 16px; | |
display: flex; | |
align-items: center; | |
border-bottom: 1px solid #e0e0e0; | |
} | |
.window-controls { | |
display: flex; | |
gap: 8px; | |
} | |
.control { | |
width: 12px; | |
height: 12px; | |
border-radius: 50%; | |
cursor: pointer; | |
} | |
.control.close { background: #ff5f57; } | |
.control.minimize { background: #febc2e; } | |
.control.maximize { background: #28c840; } | |
.window-title { | |
flex: 1; | |
text-align: center; | |
color: #333; | |
font-size: 14px; | |
font-weight: 500; | |
} | |
.main-content { | |
display: flex; | |
height: calc(100vh - 100px); | |
} | |
.left-panel { | |
width: 40%; | |
border-right: 1px solid #e0e0e0; | |
padding: 20px; | |
display: flex; | |
flex-direction: column; | |
} | |
.right-panel { | |
width: 60%; | |
background: #fff; | |
position: relative; | |
} | |
.input-area { | |
background: #f8f9fa; | |
border-radius: 10px; | |
padding: 20px; | |
margin-top: 20px; | |
} | |
.button-group { | |
display: flex; | |
gap: 10px; | |
margin-top: 20px; | |
} | |
.custom-button { | |
background: #007aff; | |
color: white; | |
border: none; | |
padding: 10px 20px; | |
border-radius: 6px; | |
cursor: pointer; | |
transition: all 0.2s; | |
} | |
.custom-button:hover { | |
background: #0056b3; | |
} | |
""", theme=theme) | |
with demo: | |
with gr.Tabs(elem_classes="main-tabs") as tabs: | |
with gr.Tab("Visual AI Assistant", elem_id="mouse-tab", elem_classes="mouse-tab"): | |
# SystemPrompt ์ค์ ์ ์ํ State ์ถ๊ฐ | |
setting = gr.State({ | |
"system": SystemPrompt, | |
}) | |
with ms.Application() as app: | |
with antd.ConfigProvider(): | |
with antd.Drawer(open=False, title="AI is Creating...", placement="left", width="750px") as code_drawer: | |
gr.HTML(""" | |
<div class="thinking-container"> | |
<style> | |
.custom-textarea { | |
background: #f8f9fa !important; | |
border: 1px solid #e0e0e0 !important; | |
border-radius: 10px !important; | |
padding: 15px !important; | |
min-height: 150px !important; | |
font-family: -apple-system, BlinkMacSystemFont, sans-serif !important; | |
} | |
.custom-textarea:focus { | |
border-color: #007aff !important; | |
box-shadow: 0 0 0 2px rgba(0,122,255,0.2) !important; | |
} | |
.thinking-container { | |
text-align: center; | |
padding: 20px; | |
background: #f8f9fa; | |
border-radius: 15px; | |
font-family: -apple-system, BlinkMacSystemFont, sans-serif; | |
} | |
.progress-bar { | |
width: 100%; | |
height: 4px; | |
background: #e9ecef; | |
border-radius: 4px; | |
margin: 20px 0; | |
overflow: hidden; | |
} | |
.progress-bar-inner { | |
width: 30%; | |
height: 100%; | |
background: linear-gradient(90deg, #007aff, #28c840); | |
animation: progress 2s ease-in-out infinite; | |
} | |
.thinking-icon { | |
font-size: 48px; | |
margin: 20px 0; | |
animation: bounce 1s ease infinite; | |
} | |
.tip-box { | |
background: white; | |
padding: 20px; | |
border-radius: 10px; | |
box-shadow: 0 4px 12px rgba(0,0,0,0.1); | |
margin: 20px 0; | |
transition: all 0.3s ease; | |
} | |
.status-text { | |
color: #007aff; | |
font-size: 18px; | |
margin: 15px 0; | |
animation: fade 1.5s ease infinite; | |
} | |
.icon-grid { | |
display: grid; | |
grid-template-columns: repeat(4, 1fr); | |
gap: 15px; | |
margin: 20px 0; | |
} | |
.icon-item { | |
padding: 10px; | |
background: rgba(0,122,255,0.1); | |
border-radius: 8px; | |
animation: pulse 2s ease infinite; | |
} | |
@keyframes progress { | |
0% { transform: translateX(-100%); } | |
100% { transform: translateX(400%); } | |
} | |
@keyframes bounce { | |
0%, 100% { transform: translateY(0); } | |
50% { transform: translateY(-10px); } | |
} | |
@keyframes fade { | |
0%, 100% { opacity: 1; } | |
50% { opacity: 0.6; } | |
} | |
@keyframes pulse { | |
0% { transform: scale(1); } | |
50% { transform: scale(1.05); } | |
100% { transform: scale(1); } | |
} | |
</style> | |
<div class="thinking-icon">๐จ</div> | |
<div class="status-text">Creating Your Visualization...</div> | |
<div class="progress-bar"> | |
<div class="progress-bar-inner"></div> | |
</div> | |
<div class="icon-grid"> | |
<div class="icon-item">๐</div> | |
<div class="icon-item">๐ฏ</div> | |
<div class="icon-item">๐ก</div> | |
<div class="icon-item">โจ</div> | |
</div> | |
<div class="tip-box"> | |
<h3 style="color: #007aff; margin-bottom: 10px;">Did You Know?</h3> | |
<div id="tip-content" style="font-size: 16px; line-height: 1.6;"></div> | |
</div> | |
<script> | |
const tips = [ | |
"MOUSE-I is creating responsive and interactive visualizations! ๐", | |
"We're applying modern design principles for the best user experience! ๐จ", | |
"Your content will be optimized for all devices! ๐ฑ", | |
"Adding engaging animations to bring your data to life! โจ", | |
"Crafting a beautiful presentation just for you! ๐ฏ", | |
"Implementing interactive elements for better engagement! ๐ฎ", | |
"Optimizing colors and layout for visual appeal! ๐ช", | |
"Creating smooth transitions and animations! ๐" | |
]; | |
function updateTip() { | |
const tipElement = document.getElementById('tip-content'); | |
if (tipElement) { | |
const randomTip = tips[Math.floor(Math.random() * tips.length)]; | |
tipElement.innerHTML = randomTip; | |
tipElement.style.opacity = 0; | |
setTimeout(() => { | |
tipElement.style.transition = 'opacity 0.5s ease'; | |
tipElement.style.opacity = 1; | |
}, 100); | |
} | |
} | |
updateTip(); | |
setInterval(updateTip, 3000); | |
</script> | |
</div> | |
""") | |
code_output = legacy.Markdown(visible=False) | |
# ๋ฉ์ธ ์ปจํ ์ธ ๋ฅผ ์ํ Row | |
with antd.Row(gutter=[32, 12]) as layout: | |
# ์ข์ธก ํจ๋ | |
with antd.Col(span=24, md=8): | |
with antd.Flex(vertical=True, gap="middle", wrap=True): | |
# macOS ์คํ์ผ ์๋์ฐ ํค๋ | |
header = gr.HTML(""" | |
<div class="window-frame"> | |
<div class="window-header"> | |
<div class="window-controls"> | |
<div class="control close"></div> | |
<div class="control minimize"></div> | |
<div class="control maximize"></div> | |
</div> | |
<div class="window-title"> | |
<div class="window-address"> | |
<div class="secure-icon">๐</div> | |
<div class="url-bar">https://VIDraft-mouse-chat.hf.space</div> | |
</div> | |
</div> | |
</div> | |
<div class="app-content"> | |
<img src="data:image/gif;base64,{}" width="360px" /> | |
<h1 class="app-title">MOUSE-Chat Visual AI</h1> | |
<p class="app-description">Creates visualized web pages from text input, and when you include keywords like "image:", "๊ทธ๋ฆผ:", or "image:" in your input, it automatically generates AI images based on the description and incorporates them into the web page. | |
Use the "Generate" button for basic creation, "Enhance" button for prompt improvement, "Share" button to deploy results to the web, and input like "image: a dog playing in the park" to create results containing both text and generated images.</p> | |
</div> | |
</div> | |
""".format(get_image_base64('mouse.gif'))) | |
# ์ ๋ ฅ ์์ญ | |
input = antd.InputTextarea( | |
size="large", | |
allow_clear=True, | |
placeholder=get_random_placeholder(), | |
elem_classes="custom-textarea" # style ๋์ class ์ฌ์ฉ | |
) | |
# ๋ฒํผ ๊ทธ๋ฃน | |
with antd.Flex(gap="small", justify="flex-start"): | |
btn = antd.Button( | |
"Generate", | |
type="primary", | |
size="large", | |
elem_classes="generate-btn" | |
) | |
boost_btn = antd.Button( | |
"Enhance", | |
type="default", | |
size="large", | |
elem_classes="enhance-btn" | |
) | |
deploy_btn = antd.Button( | |
"Share", | |
type="default", | |
size="large", | |
elem_classes="share-btn" | |
) | |
deploy_result = gr.HTML( | |
label="Share Result", | |
elem_classes="deploy-result" | |
) | |
# ์ฐ์ธก ํจ๋ | |
with antd.Col(span=24, md=16): | |
with ms.Div(elem_classes="right_panel"): | |
# macOS ์คํ์ผ ์๋์ฐ ํค๋ | |
gr.HTML(""" | |
<div class="window-frame"> | |
<div class="window-header"> | |
<div class="window-controls"> | |
<div class="control close"></div> | |
<div class="control minimize"></div> | |
<div class="control maximize"></div> | |
</div> | |
<div class="window-title">Preview</div> | |
</div> | |
</div> | |
""") | |
# ํญ ์ปจํ ์ธ | |
with antd.Tabs(active_key="empty", render_tab_bar="() => null") as state_tab: | |
with antd.Tabs.Item(key="empty"): | |
empty = antd.Empty( | |
description="Enter your question to begin", | |
elem_classes="right_content empty-content" # style ๋์ class ์ฌ์ฉ | |
) | |
with antd.Tabs.Item(key="loading"): | |
loading = antd.Spin( | |
True, | |
tip="Creating visual presentation...", | |
size="large", | |
elem_classes="right_content" | |
) | |
with antd.Tabs.Item(key="render"): | |
sandbox = gr.HTML(elem_classes="html_content") | |
btn.click( | |
demo_instance.generation_code, | |
inputs=[input, setting], # setting์ด ์ด์ ์ ์๋จ | |
outputs=[code_output, sandbox, state_tab, code_drawer] | |
).then( | |
fn=update_placeholder, | |
inputs=[], | |
outputs=[input] | |
) | |
boost_btn.click( | |
fn=handle_boost, | |
inputs=[input], | |
outputs=[input, state_tab] | |
) | |
deploy_btn.click( | |
fn=lambda code: deploy_to_vercel(remove_code_block(code)) if code else "No code to share.", | |
inputs=[code_output], | |
outputs=[deploy_result] | |
) | |
gr.HTML(""" | |
<style> | |
.generate-btn { | |
background: #007aff !important; | |
border-radius: 8px !important; | |
box-shadow: 0 2px 4px rgba(0,0,0,0.1) !important; | |
} | |
.enhance-btn { | |
border-radius: 8px !important; | |
border: 1px solid #007aff !important; | |
color: #007aff !important; | |
} | |
.share-btn { | |
border-radius: 8px !important; | |
border: 1px solid #28c840 !important; | |
color: #28c840 !important; | |
} | |
/* hover ํจ๊ณผ */ | |
.generate-btn:hover { | |
background: #0056b3 !important; | |
} | |
.enhance-btn:hover { | |
background: rgba(0,122,255,0.1) !important; | |
} | |
.share-btn:hover { | |
background: rgba(40,200,64,0.1) !important; | |
} | |
.app-content { | |
padding: 20px; | |
text-align: center; | |
} | |
.app-title { | |
font-size: 24px; | |
color: #333; | |
margin: 20px 0 10px; | |
font-weight: 600; | |
} | |
.app-description { | |
color: #666; | |
font-size: 14px; | |
margin-bottom: 30px; | |
} | |
.deploy-result { | |
margin-top: 20px; | |
padding: 15px; | |
background: #f8f9fa; | |
border-radius: 8px; | |
font-family: -apple-system, BlinkMacSystemFont, sans-serif; | |
} | |
.deploy-result a { | |
color: #007aff; | |
text-decoration: none; | |
font-weight: 500; | |
} | |
.deploy-result a:hover { | |
text-decoration: underline; | |
} | |
/* ๋ฐ์ํ ๋์์ธ์ ์ํ ๋ฏธ๋์ด ์ฟผ๋ฆฌ */ | |
@media (max-width: 768px) { | |
.window-frame { | |
border-radius: 0; | |
} | |
.left-panel, .right-panel { | |
width: 100%; | |
} | |
.main-content { | |
flex-direction: column; | |
} | |
} | |
</style> | |
""") | |
return demo | |
# ๋ฉ์ธ ์คํ ๋ถ๋ถ | |
if __name__ == "__main__": | |
try: | |
demo_instance = Demo() # Demo ์ธ์คํด์ค ์์ฑ | |
demo = create_main_interface() # ์ธํฐํ์ด์ค ์์ฑ | |
demo.queue( | |
default_concurrency_limit=20, # concurrency_count ๋์ default_concurrency_limit ์ฌ์ฉ | |
status_update_rate=10, | |
api_open=False | |
).launch( | |
server_name="0.0.0.0", | |
server_port=7860, | |
share=False, | |
debug=False | |
) | |
except Exception as e: | |
print(f"Initialization error: {e}") | |
raise |