File size: 1,391 Bytes
cdc22cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
779a90b
 
cdc22cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a6dd02
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# Load model directly
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

from flask import Flask, request

app = Flask(__name__)

app.config.from_pyfile('settings.py')

@app.route('/')
def index():
    print(app.config["HUGGINGFACE_TOKEN"])
    return "ISA Project Flask Server"


@app.post('/translate')
def translate():
    article_en = request.form['original_text']
    translate_code = request.form['translate_code']
    access_token = app.config["HUGGINGFACE_TOKEN"]
    tokenizer = AutoTokenizer.from_pretrained("SnypzZz/Llama2-13b-Language-translate", token="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")
    model = AutoModelForSeq2SeqLM.from_pretrained("SnypzZz/Llama2-13b-Language-translate", token="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")

    model_inputs = tokenizer(article_en, return_tensors="pt")

    # translate from English
    generated_tokens = model.generate(
        **model_inputs,
        forced_bos_token_id=tokenizer.lang_code_to_id[translate_code]
    )
    translated_sentence = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
    return translated_sentence[0]

    # English (en_XX), 
    # Spanish (es_XX), 
    # French (fr_XX), 
    # Japanese (ja_XX),
    # Korean (ko_KR),
    # Russian (ru_RU)
    # Vietnamese (vi_VN), 
    # Chinese (zh_CN),
    # Mongolian (mn_MN),
    # Urdu (ur_PK)


if __name__ == "__main__":
    app.run()