Spaces:
Sleeping
Sleeping
lanzhiwang
commited on
Commit
•
19600fe
1
Parent(s):
3c3d1c4
first commit
Browse files- Dockerfile +30 -0
- app-download-model.py +41 -0
- app.py +0 -9
- learn/app1.py +9 -0
- learn/app2.ipynb +0 -0
- learn/app3.ipynb +0 -0
Dockerfile
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM docker.io/library/python:3.10
|
2 |
+
|
3 |
+
RUN apt-get update && apt-get install -y git git-lfs ffmpeg libsm6 libxext6 cmake libgl1-mesa-glx && rm -rf /var/lib/apt/lists/* && git lfs install
|
4 |
+
|
5 |
+
RUN pip install --no-cache-dir pip==22.3.1 && pip install --no-cache-dir datasets "huggingface-hub>=0.12.1" "protobuf<4" "click<8.1" "pydantic~=1.0" gradio[oauth]==3.44.1 uvicorn>=0.14.0 spaces==0.18.0
|
6 |
+
|
7 |
+
WORKDIR /home/user/app
|
8 |
+
|
9 |
+
RUN useradd -m -u 1000 user
|
10 |
+
|
11 |
+
COPY --chown=1000 ./ /home/user/app
|
12 |
+
|
13 |
+
RUN if [ -f ./packages.txt ]; then \
|
14 |
+
apt-get update && xargs -r -a ./packages.txt apt-get install -y && rm -rf /var/lib/apt/lists/*; \
|
15 |
+
else \
|
16 |
+
echo "packages.txt not found!"; \
|
17 |
+
fi
|
18 |
+
|
19 |
+
RUN if [ -f ./requirements.txt ]; then \
|
20 |
+
pip install --no-cache-dir -r ./requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple; \
|
21 |
+
else \
|
22 |
+
echo "requirements.txt not found!"; \
|
23 |
+
fi
|
24 |
+
|
25 |
+
ENV PYTHONUNBUFFERED=1
|
26 |
+
|
27 |
+
EXPOSE 7860
|
28 |
+
|
29 |
+
# ENTRYPOINT ["python", "app.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
30 |
+
|
app-download-model.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import subprocess
|
3 |
+
import gradio as gr
|
4 |
+
from transformers import pipeline
|
5 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
6 |
+
|
7 |
+
# 下载模型
|
8 |
+
base_dir = "/root/.cache/huggingface/hub"
|
9 |
+
if not os.path.isdir(base_dir):
|
10 |
+
os.makedirs(base_dir)
|
11 |
+
|
12 |
+
cmd_list = ["cd", base_dir, "&&", "git lfs install", "&&", "git clone", "https://gitee.com/lanzhiwang/gpt2.git", "models"]
|
13 |
+
cmd_str = " ".join(cmd_list)
|
14 |
+
print("cmd_str:", cmd_str)
|
15 |
+
ret, out = subprocess.getstatusoutput(cmd_str)
|
16 |
+
print("ret:", ret)
|
17 |
+
print("out:", out)
|
18 |
+
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="/root/.cache/huggingface/hub/models")
|
20 |
+
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path="/root/.cache/huggingface/hub/models")
|
21 |
+
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
22 |
+
|
23 |
+
# generator = pipeline('text-generation', model='gpt2')
|
24 |
+
|
25 |
+
def generate(text):
|
26 |
+
result = generator(text, max_length=30, num_return_sequences=1)
|
27 |
+
return result[0]["generated_text"]
|
28 |
+
|
29 |
+
examples = [
|
30 |
+
["The Moon's orbit around Earth has"],
|
31 |
+
["The smooth Borealis basin in the Northern Hemisphere covers 40%"],
|
32 |
+
]
|
33 |
+
|
34 |
+
demo = gr.Interface(
|
35 |
+
fn=generate,
|
36 |
+
inputs=gr.inputs.Textbox(lines=5, label="Input Text"),
|
37 |
+
outputs=gr.outputs.Textbox(label="Generated Text"),
|
38 |
+
examples=examples
|
39 |
+
)
|
40 |
+
|
41 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
app.py
CHANGED
@@ -1,22 +1,13 @@
|
|
1 |
-
import os
|
2 |
-
import subprocess
|
3 |
-
import logging
|
4 |
-
|
5 |
import gradio as gr
|
6 |
from transformers import pipeline
|
7 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
8 |
|
9 |
-
# 下载模型
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
# Load model directly
|
14 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
15 |
|
16 |
tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
17 |
model = AutoModelForCausalLM.from_pretrained("gpt2")
|
18 |
|
19 |
-
|
20 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
21 |
|
22 |
# generator = pipeline('text-generation', model='gpt2')
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
|
|
|
|
|
|
|
|
|
5 |
# Load model directly
|
6 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
|
8 |
tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
9 |
model = AutoModelForCausalLM.from_pretrained("gpt2")
|
10 |
|
|
|
11 |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
12 |
|
13 |
# generator = pipeline('text-generation', model='gpt2')
|
learn/app1.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def greet(name):
|
4 |
+
return "Hello " + name + "!"
|
5 |
+
|
6 |
+
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
+
|
8 |
+
if __name__ == "__main__":
|
9 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
learn/app2.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
learn/app3.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|