Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,14 +4,27 @@ import discord
|
|
4 |
from discord.ext import commands
|
5 |
import torch
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
MODEL = "LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct"
|
9 |
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
|
10 |
DISCORD_CHANNEL_ID = int(os.getenv("DISCORD_CHANNEL_ID"))
|
11 |
|
|
|
|
|
|
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
13 |
|
|
|
14 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
|
|
|
|
|
|
15 |
model = AutoModelForCausalLM.from_pretrained(
|
16 |
MODEL,
|
17 |
torch_dtype=torch.bfloat16,
|
@@ -19,12 +32,14 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
19 |
trust_remote_code=True,
|
20 |
ignore_mismatched_sizes=True
|
21 |
)
|
|
|
22 |
|
23 |
intents = discord.Intents.default()
|
24 |
intents.message_content = True
|
25 |
bot = commands.Bot(command_prefix="!", intents=intents)
|
26 |
|
27 |
async def generate_response(message, history, system_prompt):
|
|
|
28 |
conversation = [{"role": "system", "content": system_prompt}]
|
29 |
for prompt, answer in history:
|
30 |
conversation.extend([
|
@@ -53,29 +68,39 @@ async def generate_response(message, history, system_prompt):
|
|
53 |
)
|
54 |
|
55 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
|
|
56 |
return response.split("Assistant:")[-1].strip()
|
57 |
|
58 |
@bot.event
|
59 |
async def on_ready():
|
60 |
-
|
61 |
|
62 |
@bot.event
|
63 |
async def on_message(message):
|
64 |
if message.author == bot.user:
|
65 |
return
|
66 |
|
|
|
|
|
|
|
67 |
if message.channel.id != DISCORD_CHANNEL_ID:
|
|
|
68 |
return
|
69 |
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
77 |
|
78 |
if __name__ == "__main__":
|
79 |
import subprocess
|
|
|
80 |
subprocess.Popen(["python", "web.py"])
|
|
|
81 |
bot.run(DISCORD_TOKEN)
|
|
|
4 |
from discord.ext import commands
|
5 |
import torch
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
7 |
+
import logging
|
8 |
+
|
9 |
+
# 로깅 설정
|
10 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
11 |
+
logger = logging.getLogger(__name__)
|
12 |
|
13 |
MODEL = "LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct"
|
14 |
DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
|
15 |
DISCORD_CHANNEL_ID = int(os.getenv("DISCORD_CHANNEL_ID"))
|
16 |
|
17 |
+
logger.info(f"Discord Token: {'Set' if DISCORD_TOKEN else 'Not Set'}")
|
18 |
+
logger.info(f"Discord Channel ID: {DISCORD_CHANNEL_ID}")
|
19 |
+
|
20 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
21 |
+
logger.info(f"Using device: {device}")
|
22 |
|
23 |
+
logger.info("Loading tokenizer...")
|
24 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
25 |
+
logger.info("Tokenizer loaded successfully")
|
26 |
+
|
27 |
+
logger.info("Loading model...")
|
28 |
model = AutoModelForCausalLM.from_pretrained(
|
29 |
MODEL,
|
30 |
torch_dtype=torch.bfloat16,
|
|
|
32 |
trust_remote_code=True,
|
33 |
ignore_mismatched_sizes=True
|
34 |
)
|
35 |
+
logger.info("Model loaded successfully")
|
36 |
|
37 |
intents = discord.Intents.default()
|
38 |
intents.message_content = True
|
39 |
bot = commands.Bot(command_prefix="!", intents=intents)
|
40 |
|
41 |
async def generate_response(message, history, system_prompt):
|
42 |
+
logger.info(f"Generating response for message: {message[:50]}...") # Log first 50 chars of message
|
43 |
conversation = [{"role": "system", "content": system_prompt}]
|
44 |
for prompt, answer in history:
|
45 |
conversation.extend([
|
|
|
68 |
)
|
69 |
|
70 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
71 |
+
logger.info(f"Generated response: {response[:100]}...") # Log first 100 chars of response
|
72 |
return response.split("Assistant:")[-1].strip()
|
73 |
|
74 |
@bot.event
|
75 |
async def on_ready():
|
76 |
+
logger.info(f"{bot.user} has connected to Discord!")
|
77 |
|
78 |
@bot.event
|
79 |
async def on_message(message):
|
80 |
if message.author == bot.user:
|
81 |
return
|
82 |
|
83 |
+
logger.info(f"Received message: {message.content[:50]}...") # Log first 50 chars of message
|
84 |
+
logger.info(f"Message channel ID: {message.channel.id}")
|
85 |
+
|
86 |
if message.channel.id != DISCORD_CHANNEL_ID:
|
87 |
+
logger.info("Message not in target channel")
|
88 |
return
|
89 |
|
90 |
+
try:
|
91 |
+
response = await generate_response(message.content, [], "You are EXAONE model from LG AI Research, a helpful assistant.")
|
92 |
+
|
93 |
+
chunks = [response[i:i+2000] for i in range(0, len(response), 2000)]
|
94 |
+
|
95 |
+
for i, chunk in enumerate(chunks):
|
96 |
+
await message.channel.send(chunk)
|
97 |
+
logger.info(f"Sent response chunk {i+1}/{len(chunks)}")
|
98 |
+
except Exception as e:
|
99 |
+
logger.error(f"Error generating or sending response: {e}")
|
100 |
|
101 |
if __name__ == "__main__":
|
102 |
import subprocess
|
103 |
+
logger.info("Starting web.py...")
|
104 |
subprocess.Popen(["python", "web.py"])
|
105 |
+
logger.info("Running Discord bot...")
|
106 |
bot.run(DISCORD_TOKEN)
|