File size: 2,206 Bytes
6e7d08d
 
 
 
30f0864
6e7d08d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30f0864
6e7d08d
 
 
 
 
 
30f0864
6e7d08d
 
e0fbe2a
6e7d08d
 
 
 
 
 
 
 
 
 
 
 
 
 
8e44aac
 
6e7d08d
8e44aac
 
 
 
 
30f0864
6e7d08d
8e44aac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# ------------------- LIBRARIES -------------------- #
import os, logging, torch, streamlit as st
from transformers import (
    AutoTokenizer, AutoModelForCausalLM)
st.balloons()

# --------------------- HELPER --------------------- #
def C(text, color="yellow"):
    color_dict: dict = dict(
            red="\033[01;31m",
          green="\033[01;32m",
         yellow="\033[01;33m",
           blue="\033[01;34m",
        magenta="\033[01;35m",
           cyan="\033[01;36m",
    )
    color_dict[None] = "\033[0m"
    return (
        f"{color_dict.get(color, None)}"
        f"{text}{color_dict[None]}")
st.balloons()

# ------------------ ENVIORNMENT ------------------- #
os.environ["HF_ENDPOINT"] = "https://huggingface.co"
device = ("cuda" 
    if torch.cuda.is_available() else "cpu")
logging.info(C("[INFO] "f"device = {device}"))
st.balloons()

# ------------------ INITITALIZE ------------------- #
@st.cache
def model_init():
    tokenizer = AutoTokenizer.from_pretrained(
        "ckip-joint/bloom-1b1-zh")
    model = AutoModelForCausalLM.from_pretrained(
        "ckip-joint/bloom-1b1-zh",
        # Ref.: Eric, Thanks!
        # torch_dtype="auto", 
        # device_map="auto",
    # Ref. for `half`: Chan-Jan, Thanks!
    ).eval().to(device)
    st.balloons()
    logging.info(C("[INFO] "f"Model init success!"))
    return tokenizer, model

# tokenizer, model = model_init()
# st.balloons()

# try:
#     # ===================== INPUT ====================== #
#     # prompt = "\u554F\uFF1A\u53F0\u7063\u6700\u9AD8\u7684\u5EFA\u7BC9\u7269\u662F\uFF1F\u7B54\uFF1A"  #@param {type:"string"}
#     prompt = st.text_input("Prompt: ")
#     st.balloons()
    

#     # =================== INFERENCE ==================== #
#     if prompt:
#         st.balloons()
#         with torch.no_grad():
#             [texts_out] = model.generate(
#                 **tokenizer(
#                     prompt, return_tensors="pt"
#                 ).to(device))
#         st.balloons()
#         output_text = tokenizer.decode(texts_out)
#         st.balloons()
#         st.markdown(output_text)
#         st.balloons()
# except Exception as err:
#     st.write(str(err))
#     st.snow()

st.snow()