lanzhiwang commited on
Commit
e90feb1
1 Parent(s): d5bdfe9
Files changed (4) hide show
  1. 01.ipynb +0 -0
  2. app-bak.py +0 -172
  3. app.py +8 -15
  4. requirements.txt +4 -5
01.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
app-bak.py DELETED
@@ -1,172 +0,0 @@
1
- import streamlit as st
2
- import os
3
- import io
4
- from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration
5
- import time
6
- import json
7
- from typing import List
8
- import torch
9
- import random
10
- import logging
11
-
12
- if torch.cuda.is_available():
13
- device = torch.device("cuda:0")
14
- else:
15
- device = torch.device("cpu")
16
- logging.warning("GPU not found, using CPU, translation will be very slow.")
17
-
18
- st.cache(suppress_st_warning=True, allow_output_mutation=True)
19
- st.set_page_config(page_title="M2M100 Translator")
20
-
21
- lang_id = {
22
- "Afrikaans": "af",
23
- "Amharic": "am",
24
- "Arabic": "ar",
25
- "Asturian": "ast",
26
- "Azerbaijani": "az",
27
- "Bashkir": "ba",
28
- "Belarusian": "be",
29
- "Bulgarian": "bg",
30
- "Bengali": "bn",
31
- "Breton": "br",
32
- "Bosnian": "bs",
33
- "Catalan": "ca",
34
- "Cebuano": "ceb",
35
- "Czech": "cs",
36
- "Welsh": "cy",
37
- "Danish": "da",
38
- "German": "de",
39
- "Greeek": "el",
40
- "English": "en",
41
- "Spanish": "es",
42
- "Estonian": "et",
43
- "Persian": "fa",
44
- "Fulah": "ff",
45
- "Finnish": "fi",
46
- "French": "fr",
47
- "Western Frisian": "fy",
48
- "Irish": "ga",
49
- "Gaelic": "gd",
50
- "Galician": "gl",
51
- "Gujarati": "gu",
52
- "Hausa": "ha",
53
- "Hebrew": "he",
54
- "Hindi": "hi",
55
- "Croatian": "hr",
56
- "Haitian": "ht",
57
- "Hungarian": "hu",
58
- "Armenian": "hy",
59
- "Indonesian": "id",
60
- "Igbo": "ig",
61
- "Iloko": "ilo",
62
- "Icelandic": "is",
63
- "Italian": "it",
64
- "Japanese": "ja",
65
- "Javanese": "jv",
66
- "Georgian": "ka",
67
- "Kazakh": "kk",
68
- "Central Khmer": "km",
69
- "Kannada": "kn",
70
- "Korean": "ko",
71
- "Luxembourgish": "lb",
72
- "Ganda": "lg",
73
- "Lingala": "ln",
74
- "Lao": "lo",
75
- "Lithuanian": "lt",
76
- "Latvian": "lv",
77
- "Malagasy": "mg",
78
- "Macedonian": "mk",
79
- "Malayalam": "ml",
80
- "Mongolian": "mn",
81
- "Marathi": "mr",
82
- "Malay": "ms",
83
- "Burmese": "my",
84
- "Nepali": "ne",
85
- "Dutch": "nl",
86
- "Norwegian": "no",
87
- "Northern Sotho": "ns",
88
- "Occitan": "oc",
89
- "Oriya": "or",
90
- "Panjabi": "pa",
91
- "Polish": "pl",
92
- "Pushto": "ps",
93
- "Portuguese": "pt",
94
- "Romanian": "ro",
95
- "Russian": "ru",
96
- "Sindhi": "sd",
97
- "Sinhala": "si",
98
- "Slovak": "sk",
99
- "Slovenian": "sl",
100
- "Somali": "so",
101
- "Albanian": "sq",
102
- "Serbian": "sr",
103
- "Swati": "ss",
104
- "Sundanese": "su",
105
- "Swedish": "sv",
106
- "Swahili": "sw",
107
- "Tamil": "ta",
108
- "Thai": "th",
109
- "Tagalog": "tl",
110
- "Tswana": "tn",
111
- "Turkish": "tr",
112
- "Ukrainian": "uk",
113
- "Urdu": "ur",
114
- "Uzbek": "uz",
115
- "Vietnamese": "vi",
116
- "Wolof": "wo",
117
- "Xhosa": "xh",
118
- "Yiddish": "yi",
119
- "Yoruba": "yo",
120
- "Chinese": "zh",
121
- "Zulu": "zu",
122
- }
123
-
124
-
125
- @st.cache(suppress_st_warning=True, allow_output_mutation=True)
126
- def load_model(
127
- pretrained_model: str = "facebook/m2m100_1.2B",
128
- cache_dir: str = "models/",
129
- ):
130
- tokenizer = M2M100Tokenizer.from_pretrained(pretrained_model, cache_dir=cache_dir)
131
- model = M2M100ForConditionalGeneration.from_pretrained(
132
- pretrained_model, cache_dir=cache_dir
133
- ).to(device)
134
- model.eval()
135
- return tokenizer, model
136
-
137
-
138
- st.title("M2M100 Translator")
139
- st.write("M2M100 is a multilingual encoder-decoder (seq-to-seq) model trained for Many-to-Many multilingual translation. It was introduced in this paper https://arxiv.org/abs/2010.11125 and first released in https://github.com/pytorch/fairseq/tree/master/examples/m2m_100 repository. The model that can directly translate between the 9,900 directions of 100 languages.\n")
140
-
141
- st.write(" This demo uses the facebook/m2m100_1.2B model. For local inference see https://github.com/ikergarcia1996/Easy-Translate")
142
-
143
-
144
- user_input: str = st.text_area(
145
- "Input text",
146
- height=200,
147
- max_chars=5120,
148
- )
149
-
150
- source_lang = st.selectbox(label="Source language", options=list(lang_id.keys()))
151
- target_lang = st.selectbox(label="Target language", options=list(lang_id.keys()))
152
-
153
- if st.button("Run"):
154
- time_start = time.time()
155
- tokenizer, model = load_model()
156
-
157
- src_lang = lang_id[source_lang]
158
- trg_lang = lang_id[target_lang]
159
- tokenizer.src_lang = src_lang
160
- with torch.no_grad():
161
- encoded_input = tokenizer(user_input, return_tensors="pt").to(device)
162
- generated_tokens = model.generate(
163
- **encoded_input, forced_bos_token_id=tokenizer.get_lang_id(trg_lang)
164
- )
165
- translated_text = tokenizer.batch_decode(
166
- generated_tokens, skip_special_tokens=True
167
- )[0]
168
-
169
- time_end = time.time()
170
- st.success(translated_text)
171
-
172
- st.write(f"Computation time: {round((time_end-time_start),3)} segs")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -2,13 +2,13 @@ import streamlit as st
2
  from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration
3
  import time
4
  import torch
5
- import logging
6
 
7
  if torch.cuda.is_available():
8
  device = torch.device("cuda:0")
9
  else:
10
  device = torch.device("cpu")
11
- logging.warning("GPU not found, using CPU, translation will be very slow.")
12
 
13
  st.set_page_config(page_title="M2M100 Translator")
14
 
@@ -125,25 +125,18 @@ def load_model(
125
  model = M2M100ForConditionalGeneration.from_pretrained(
126
  pretrained_model, cache_dir=cache_dir
127
  ).to(device)
128
- """
129
- 在PyTorch中,`model.eval()`是用来将模型设置为评估(evaluation)模式的方法。在深度学习中,训练和评估两个阶段的模型行为可能会有所不同。以下是`model.eval()`的主要作用:
130
-
131
- 1. **Batch Normalization和Dropout的影响:**
132
- - 在训练阶段,`Batch Normalization`和`Dropout`等层的行为通常是不同的。在训练时,`Batch Normalization`使用批次统计信息来规范化输入,而`Dropout`层会随机丢弃一些神经元。在评估阶段,我们通常希望使用整个数据集的统计信息来规范化,而不是每个批次的统计信息,并且不再需要随机丢弃神经元。因此,通过执行`model.eval()`,模型会切换到评估模式,从而确保这些层的行为在评估时是正确的。
133
-
134
- 2. **梯度计算的关闭:**
135
- - 在评估模式下,PyTorch会关闭自动求导(autograd)的计算图,这样可以避免不必要的梯度计算和内存消耗。在训练时,我们通常需要计算梯度以进行反向传播和参数更新,而在评估时,我们只对模型的前向传播感兴趣,因此关闭梯度计算可以提高评估的速度和减少内存使用。
136
-
137
- 总的来说,执行`model.eval()`是为了确保在评估阶段模型的行为和性能是正确的,并且可以提高评估时的效率。
138
- """
139
  model.eval()
140
  return tokenizer, model
141
 
142
 
143
  st.title("M2M100 Translator")
144
- st.write("M2M100 is a multilingual encoder-decoder (seq-to-seq) model trained for Many-to-Many multilingual translation. It was introduced in this paper https://arxiv.org/abs/2010.11125 and first released in https://github.com/pytorch/fairseq/tree/master/examples/m2m_100 repository. The model that can directly translate between the 9,900 directions of 100 languages.\n")
 
 
145
 
146
- st.write(" This demo uses the facebook/m2m100_1.2B model. For local inference see https://github.com/ikergarcia1996/Easy-Translate")
 
 
147
 
148
 
149
  user_input: str = st.text_area(
 
2
  from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration
3
  import time
4
  import torch
5
+
6
 
7
  if torch.cuda.is_available():
8
  device = torch.device("cuda:0")
9
  else:
10
  device = torch.device("cpu")
11
+
12
 
13
  st.set_page_config(page_title="M2M100 Translator")
14
 
 
125
  model = M2M100ForConditionalGeneration.from_pretrained(
126
  pretrained_model, cache_dir=cache_dir
127
  ).to(device)
 
 
 
 
 
 
 
 
 
 
 
128
  model.eval()
129
  return tokenizer, model
130
 
131
 
132
  st.title("M2M100 Translator")
133
+ st.write(
134
+ "M2M100 is a multilingual encoder-decoder (seq-to-seq) model trained for Many-to-Many multilingual translation. It was introduced in this paper https://arxiv.org/abs/2010.11125 and first released in https://github.com/pytorch/fairseq/tree/master/examples/m2m_100 repository. The model that can directly translate between the 9,900 directions of 100 languages.\n"
135
+ )
136
 
137
+ st.write(
138
+ " This demo uses the facebook/m2m100_1.2B model. For local inference see https://github.com/ikergarcia1996/Easy-Translate"
139
+ )
140
 
141
 
142
  user_input: str = st.text_area(
requirements.txt CHANGED
@@ -1,5 +1,4 @@
1
- streamlit
2
- torch
3
- transformers
4
- sentencepiece
5
- # transformers[sentencepiece]
 
1
+ streamlit==1.29.0
2
+ torch==2.1.1
3
+ transformers==4.35.2
4
+ sentencepiece==0.1.99