Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForMaskedLM
|
2 |
+
import torch
|
3 |
+
BERTTokenizer = AutoTokenizer.from_pretrained("cl-tohoku/bert-base-japanese")
|
4 |
+
BERTModel = AutoModelForMaskedLM.from_pretrained("cl-tohoku/bert-base-japanese")
|
5 |
+
|
6 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
7 |
+
mT5Tokenizer = AutoTokenizer.from_pretrained("google/mt5-base")
|
8 |
+
mT5Model = AutoModelForSeq2SeqLM.from_pretrained("google/mt5-base")
|
9 |
+
|
10 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
11 |
+
GPT2Tokenizer = AutoTokenizer.from_pretrained("rinna/japanese-gpt2-medium")
|
12 |
+
GPT2Model = AutoModelForCausalLM.from_pretrained("rinna/japanese-gpt2-medium")
|
13 |
+
|
14 |
+
import gradio as gr
|
15 |
+
|
16 |
+
votes=[]
|
17 |
+
BERT=None
|
18 |
+
mT5=None
|
19 |
+
GPT2=None
|
20 |
+
def MELCHIOR(sue):
|
21 |
+
#BERT
|
22 |
+
allow=BERTTokenizer("承認").input_ids[1]
|
23 |
+
deny=BERTTokenizer("否定").input_ids[1]
|
24 |
+
output=BERTModel(**BERTTokenizer('科学者としての人格を持ったMELCHIORは次の決議に答えます。人間「'+sue+'承認か否定どちらですか?」'+"MELCHIOR 「[MASK]」",return_tensors="pt")).logits
|
25 |
+
BERTTokenizer.batch_decode(torch.argmax(output,-1))
|
26 |
+
mask=output[0,-3,:]
|
27 |
+
votes.append(1 if mask[allow]>mask[deny] else -1)
|
28 |
+
return "承認" if mask[allow]>mask[deny] else "否定"
|
29 |
+
|
30 |
+
def BALTHASAR(sue):
|
31 |
+
#mT5
|
32 |
+
allow=mT5Tokenizer("承認").input_ids[1]
|
33 |
+
deny=mT5Tokenizer("否定").input_ids[1]
|
34 |
+
encoder_output=mT5Model.encoder(**mT5Tokenizer('母としての人格を持ったBALTHASARは次の決議に答えます。人間「'+sue+'承認か否定どちらですか?」'+"BALTHASAR 「<X>」",return_tensors="pt"))
|
35 |
+
id=None
|
36 |
+
p_answer=None
|
37 |
+
probs=None
|
38 |
+
i=0
|
39 |
+
txt="<pad>"
|
40 |
+
probs=mT5Model(inputs_embeds=encoder_output.last_hidden_state,decoder_input_ids=mT5Tokenizer(txt,return_tensors="pt").input_ids).logits[0]
|
41 |
+
id=torch.argmax(probs[i+1])
|
42 |
+
txt=txt+"<X>"
|
43 |
+
i=i+1
|
44 |
+
probs=mT5Model(inputs_embeds=encoder_output.last_hidden_state,decoder_input_ids=mT5Tokenizer(txt,return_tensors="pt").input_ids).logits[0]
|
45 |
+
id=torch.argmax(probs[i+1])
|
46 |
+
txt=txt+mT5Tokenizer.decode(id)
|
47 |
+
votes.append(1 if probs[i+1][allow]>probs[i+1][deny] else -1)
|
48 |
+
return "承認" if probs[i+1][allow]>probs[i+1][deny] else "否定"
|
49 |
+
|
50 |
+
def CASPER(sue):
|
51 |
+
#GPT2
|
52 |
+
allow=GPT2Tokenizer("承認").input_ids[1]
|
53 |
+
deny=GPT2Tokenizer("否定").input_ids[1]
|
54 |
+
probs=GPT2Model(**GPT2Tokenizer('女としての人格を持ったCASPERは次の決議に答えます。人間「'+sue+'承認か否定どちらですか?」'+"CASPER 「",return_tensors="pt")).logits[0]
|
55 |
+
i=0
|
56 |
+
p_answer=probs
|
57 |
+
id=torch.argmax(probs[0])
|
58 |
+
votes.append(1 if probs[0][allow]>probs[1][deny] else -1)
|
59 |
+
return "承認" if probs[0][allow]>probs[1][deny] else "否定"
|
60 |
+
|
61 |
+
def greet(sue):
|
62 |
+
text1="BERT-1"+MELCHIOR(sue)
|
63 |
+
text2="GPT-2"+CASPER(sue)
|
64 |
+
text3="mT5-3"+BALTHASAR(sue)
|
65 |
+
return text1+" "+text2+" "+text3+"\n______\n\n"+("|可決|" if sum(votes[-3:])>0 else "|否決|")+"\n ̄ ̄ ̄"
|
66 |
+
|
67 |
+
css=".gradio-container {background-color: black} .gr-button {background-color: blue;color:black; weight:200%;font-family:YuMincho}.block{color:orange;} .gr-box {text-align: center;font-size: 125%;border-color:orange;background-color: #000000;weight:200%;font-family:YuMincho}"
|
68 |
+
with gr.Blocks(css=css) as demo:
|
69 |
+
sue = gr.Textbox(label="NAGI System",placeholder="ここに決議内容を入力し,提訴を押してください.")
|
70 |
+
greet_btn = gr.Button("提訴")
|
71 |
+
output = gr.Textbox(label="決議")
|
72 |
+
greet_btn.click(fn=greet, inputs=sue, outputs=output)
|
73 |
+
demo.launch()
|