Spaces:
Running
on
Zero
Running
on
Zero
File size: 10,085 Bytes
8889bbb d3c19b3 8889bbb 8db8be5 8889bbb d31d891 8889bbb 16aef89 8889bbb 5d92b76 8889bbb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 |
# from transformers_stream_generator import init_stream_support
# init_stream_support()
try:
import spaces
except ModuleNotFoundError:
print(f'Cannot import hf `spaces` with `import spaces`.')
import os
import numpy as np
import argparse
import torch
import gradio as gr
from typing import Any, Iterator
from typing import Iterator, List, Optional, Tuple
import filelock
import glob
import json
import time
from gradio.routes import Request
from gradio.utils import SyncToAsyncIterator, async_iteration
from gradio.helpers import special_args
import anyio
from typing import AsyncGenerator, Callable, Literal, Union, cast
from gradio_client.documentation import document, set_documentation_group
from typing import List, Optional, Union, Dict, Tuple
from tqdm.auto import tqdm
from huggingface_hub import snapshot_download
from gradio.components import Button
from gradio.events import Dependency, EventListenerMethod
from transformers import AutoConfig, AutoTokenizer, PreTrainedTokenizer
import types
import sys
from .base_engine import BaseEngine
from .transformers_engine import TransformersEngine, NewGenerationMixin
from ..configs import (
STREAM_CHECK_MULTIPLE,
STREAM_YIELD_MULTIPLE,
)
CODE_PATH = os.environ.get("CODE_PATH", "")
MODEL_PATH = os.environ.get("MODEL_PATH", "")
IMAGE_TOKEN = "[IMAGE]<|image|>[/IMAGE]"
IMAGE_LENGTH = 576
MAX_PACHES = 1
BLOCK_LANGS = str(os.environ.get("BLOCK_LANGS", ""))
BLOCK_LANGS = [x.strip() for x in BLOCK_LANGS.strip().split(";")] if len(BLOCK_LANGS.strip()) > 0 else []
LANG_BLOCK_HISTORY = bool(int(os.environ.get("LANG_BLOCK_HISTORY", "0")))
KEYWORDS = os.environ.get("KEYWORDS", "").strip()
KEYWORDS = KEYWORDS.split(";") if len(KEYWORDS) > 0 else []
KEYWORDS = [x.lower() for x in KEYWORDS]
LANG_BLOCK_MESSAGE = """Unsupported language."""
KEYWORD_BLOCK_MESSAGE = "Invalid request."
def _detect_lang(text):
# Disable language that may have safety risk
from langdetect import detect as detect_lang
dlang = None
try:
dlang = detect_lang(text)
except Exception as e:
if "No features in text." in str(e):
return "en"
else:
return "zh"
return dlang
def block_lang(
message: str,
history: List[Tuple[str, str]] = None,
) -> str:
# relieve history base block
if len(BLOCK_LANGS) == 0:
return False
if LANG_BLOCK_HISTORY and history is not None and any((LANG_BLOCK_MESSAGE in x[1].strip()) for x in history):
return True
else:
_lang = _detect_lang(message)
if _lang in BLOCK_LANGS:
# print(f'Detect blocked {_lang}: {message}')
return True
else:
return False
def safety_check(text, history=None, ) -> Optional[str]:
"""
Despite our effort in safety tuning and red teaming, our models may still generate harmful or illegal content.
This provides an additional security measure to enhance safety and compliance with local regulations.
"""
if len(KEYWORDS) > 0 and any(x in text.lower() for x in KEYWORDS):
return KEYWORD_BLOCK_MESSAGE
if len(BLOCK_LANGS) > 0:
if block_lang(text, history):
return LANG_BLOCK_MESSAGE
return None
def safety_check_conversation_string(text, delimiter=None) -> Optional[str]:
if len(KEYWORDS) > 0 and any(x in text.lower() for x in KEYWORDS):
return KEYWORD_BLOCK_MESSAGE
if len(BLOCK_LANGS) > 0:
import re
delimiter = delimiter or (r"</s><\|im_start\|>user\n", r"</s><\|im_start\|>assistant\n", r"<\|im_start\|>system\n")
turns = re.split(r"|".join(delimiter), text)
turns = [t for t in turns if t.strip() != '']
for t in turns:
if block_lang(t):
return LANG_BLOCK_MESSAGE
return None
def is_check_safety():
return len(KEYWORDS) > 0 or len(BLOCK_LANGS) > 0
def safety_check_conversation(conversation) -> Optional[str]:
"""
Despite our effort in safety tuning and red teaming, our models may still generate harmful or illegal content.
This provides an additional security measure to enhance safety and compliance with local regulations.
"""
texts = [c['content'] for c in conversation]
for text in texts:
if len(KEYWORDS) > 0 and any(x in text.lower() for x in KEYWORDS):
return KEYWORD_BLOCK_MESSAGE
if len(BLOCK_LANGS) > 0:
if block_lang(text):
return LANG_BLOCK_MESSAGE
return None
class SeaLMMMv0Engine(TransformersEngine):
@property
def image_token(self):
return IMAGE_TOKEN
@property
def max_position_embeddings(self) -> int:
return self._model.config.max_position_embeddings
@property
def tokenizer(self):
return self._tokenizer
@property
def processor(self):
return self._processor
def load_model(self):
from transformers import AutoProcessor
import sys
# caution: path[0] is reserved for script path (or '' in REPL)
# sys.path.append(CODE_PATH)
# from examples.llm.src.models.sealmm.modeling_sealmm import (
# SeaLMMForCausalLM
# )
from .modeling_sealmm import (SeaLMMForCausalLM, )
model_path = MODEL_PATH
print(f'Loading model from {model_path}')
print(f'model_path={model_path}')
if os.path.exists(f"{model_path}/pytorch_model_fsdp.bin") and not os.path.exists(f"{model_path}/pytorch_model.bin"):
os.symlink("pytorch_model_fsdp.bin", f"{model_path}/pytorch_model.bin")
self._processor = AutoProcessor.from_pretrained(model_path)
self._model = SeaLMMForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16, device_map="cuda").eval()
self._model.sample_old = self._model.sample
self._model.sample = types.MethodType(NewGenerationMixin.sample_stream, self._model)
self._tokenizer = self._processor.tokenizer
print(self._model)
print(f"{self.max_position_embeddings=}")
def get_multimodal_tokens(self, full_prompt, image_paths=None):
num_tokens = len(self.tokenizer.encode(full_prompt))
for image_path in image_paths:
num_tokens += IMAGE_LENGTH * MAX_PACHES
return num_tokens
def maybe_raise_safety(self, message, gen_index=-1):
if is_check_safety():
if gen_index < 0:
message_safety = safety_check_conversation_string(message)
if message_safety is not None:
raise gr.Error(message_safety)
else:
if STREAM_CHECK_MULTIPLE > 0 and gen_index % STREAM_CHECK_MULTIPLE == 0:
message_safety = safety_check_conversation_string(message)
if message_safety is not None:
raise gr.Error(message_safety)
@spaces.GPU
def generate_yield_string(self, prompt, temperature, max_tokens, stop_strings: Optional[Tuple[str]] = None, **kwargs):
from transformers.generation.utils import GenerationConfig
from PIL import Image
image_paths = kwargs.get("image_paths", None)
image_paths = image_paths or []
images = [Image.open(x) for x in image_paths] if len(image_paths) > 0 else None
# 4.38 .sample
# 4.39 ._sample
# need to put @spaces.GPU on the gradio function call
self._model.sample = types.MethodType(NewGenerationMixin.sample_stream, self._model)
with torch.no_grad():
inputs = self.processor(prompt, images, return_tensors='pt')
# inputs = {k: v.to("cuda", torch.bfloat16) for k, v in inputs.items() if v is not None}
# model.device
inputs = {k: v.to(self._model.device) for k, v in inputs.items() if v is not None}
num_tokens = self.get_multimodal_tokens(prompt, image_paths)
# non-streaming generation
# output = self._model.generate(
# **inputs,
# do_sample=True,
# temperature=temperature,
# max_new_tokens=max_tokens,
# pad_token_id=self.processor.tokenizer.pad_token_id,
# )
# # response = self.processor.tokenizer.decode(output[0][-inputs.input_ids.size(-1):], skip_special_tokens=True)
# full_output_text = self.processor.decode(output[0], skip_special_tokens=True)
# response = full_output_text.split("<|im_start|>assistant\n")[-1]
# num_tokens = self.get_multimodal_tokens(prompt + response, image_paths)
# print(prompt)
# print(response)
# print(num_tokens)
# yield response, num_tokens
# if i % 4 == 0 and i > 1:
# message_safety = safety_check(response)
# if message_safety is not None:
# history = undo_history(history)
# yield history, "", None
# raise gr.Error(message_safety)
self.maybe_raise_safety(prompt)
# # ! streaming
generator = self._model.generate(
**inputs,
do_sample=True,
temperature=temperature,
max_new_tokens=max_tokens,
pad_token_id=self.processor.tokenizer.pad_token_id,
)
out_tokens = []
response = None
for index, token in enumerate(generator):
out_tokens.append(token.item())
response = self.processor.tokenizer.decode(out_tokens)
self.maybe_raise_safety(response, gen_index=index)
yield response, num_tokens
del generator
if response is not None:
self.maybe_raise_safety(prompt)
full_text = prompt + response
num_tokens = self.get_multimodal_tokens(full_text, image_paths)
yield response, num_tokens
|