Spaces:
Running
Running
Created site
Browse files- README.md +5 -5
- app.py +110 -0
- packages.txt +1 -0
- requirements.txt +7 -0
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: streamlit
|
7 |
-
sdk_version: 1.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: cc-by-4.0
|
|
|
1 |
---
|
2 |
+
title: Syntax Tester
|
3 |
+
emoji: π
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: gray
|
6 |
sdk: streamlit
|
7 |
+
sdk_version: 1.29.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: cc-by-4.0
|
app.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import base64
|
3 |
+
from transformers import AutoModel, AutoTokenizer
|
4 |
+
from graphviz import Digraph
|
5 |
+
import json
|
6 |
+
|
7 |
+
def display_tree(output):
|
8 |
+
size = str(int(len(output))) + ',5'
|
9 |
+
dpi = '300'
|
10 |
+
format = 'svg'
|
11 |
+
print(size, dpi)
|
12 |
+
|
13 |
+
# Initialize Digraph object
|
14 |
+
dot = Digraph(engine='dot', format=format)
|
15 |
+
dot.attr('graph', rankdir='LR', rank='same', size=size, dpi=dpi)
|
16 |
+
|
17 |
+
# Add nodes and edges
|
18 |
+
for i,word_info in enumerate(output):
|
19 |
+
word = word_info['word'] # Prepare word for RTL display
|
20 |
+
head_idx = word_info['dep_head_idx']
|
21 |
+
dep_func = word_info['dep_func']
|
22 |
+
|
23 |
+
dot.node(str(i), word)
|
24 |
+
# Create an invisible edge from the previous word to this one to enforce order
|
25 |
+
if i > 0:
|
26 |
+
dot.edge(str(i), str(i - 1), style='invis')
|
27 |
+
if head_idx != -1:
|
28 |
+
dot.edge(str(i), str(head_idx), label=dep_func, constraint='False')
|
29 |
+
|
30 |
+
|
31 |
+
# Render the Digraph object
|
32 |
+
dot.render('syntax_tree', format=format, cleanup=True)
|
33 |
+
# Display the image in a scrollable container
|
34 |
+
st.markdown(
|
35 |
+
f"""
|
36 |
+
<div style="height:250px; width:75vw; overflow:auto; border:1px solid #ccc; margin-left:-15vw">
|
37 |
+
<img src="data:image/svg+xml;base64,{base64.b64encode(dot.pipe(format='svg')).decode()}"
|
38 |
+
style="display: block; margin: auto; max-height: 240px;">
|
39 |
+
</div>
|
40 |
+
""", unsafe_allow_html=True)
|
41 |
+
|
42 |
+
#st.image('syntax_tree.' + format, use_column_width=True)
|
43 |
+
|
44 |
+
# Streamlit app title
|
45 |
+
st.title('BERT Syntax Dependency Tree Visualizer')
|
46 |
+
|
47 |
+
# Load Hugging Face token
|
48 |
+
hf_token = st.secrets["HF_TOKEN"] # Assuming you've set up the token in Streamlit secrets
|
49 |
+
|
50 |
+
# Authenticate and load model
|
51 |
+
tokenizer = AutoTokenizer.from_pretrained('dicta-il/dictabert-joint', use_auth_token=hf_token)
|
52 |
+
model = AutoModel.from_pretrained('dicta-il/dictabert-joint', use_auth_token=hf_token, trust_remote_code=True)
|
53 |
+
|
54 |
+
model.eval()
|
55 |
+
|
56 |
+
# Checkbox for the compute_mst parameter
|
57 |
+
compute_mst = st.checkbox('Compute Maximum Spanning Tree', value=True)
|
58 |
+
|
59 |
+
output_style = st.selectbox(
|
60 |
+
'Output Style: ',
|
61 |
+
('JSON', 'UD', 'IAHLT_UD'), index=1).lower()
|
62 |
+
|
63 |
+
# User input
|
64 |
+
sentence = st.text_input('Enter a sentence to analyze:')
|
65 |
+
|
66 |
+
if sentence:
|
67 |
+
# Display the input sentence
|
68 |
+
st.text(sentence)
|
69 |
+
|
70 |
+
# Model prediction
|
71 |
+
output = model.predict([sentence], tokenizer, compute_syntax_mst=compute_mst, output_style=output_style)[0]
|
72 |
+
|
73 |
+
if output_style == 'ud' or output_style == 'iahlt_ud':
|
74 |
+
ud_output = output
|
75 |
+
# convert to tree format of [dict(word, dep_head_idx, dep_func)]
|
76 |
+
tree = []
|
77 |
+
for l in ud_output[2:]:
|
78 |
+
parts = l.split('\t')
|
79 |
+
if '-' in parts[0]: continue
|
80 |
+
tree.append(dict(word=parts[1], dep_head_idx=int(parts[6]) - 1, dep_func=parts[7]))
|
81 |
+
display_tree(tree)
|
82 |
+
|
83 |
+
# Construct the table as a Markdown string
|
84 |
+
table_md = "<div dir='rtl' style='text-align: right;'>\n\n" # Start with RTL div
|
85 |
+
|
86 |
+
# Add the UD header lines
|
87 |
+
table_md += "##" + ud_output[0] + "\n"
|
88 |
+
table_md += "##" + ud_output[1] + "\n"
|
89 |
+
# Table header
|
90 |
+
table_md += "| " + " | ".join(["ID", "FORM", "LEMMA", "UPOS", "XPOS", "FEATS", "HEAD", "DEPREL", "DEPS", "MISC"]) + " |\n"
|
91 |
+
# Table alignment
|
92 |
+
table_md += "| " + " | ".join(["---"]*10) + " |\n"
|
93 |
+
for line in ud_output[2:]:
|
94 |
+
# Each UD line as a table row
|
95 |
+
cells = line.replace('_', '\\_').replace('|', '|').split('\t')
|
96 |
+
table_md += "| " + " | ".join(cells) + " |\n"
|
97 |
+
table_md += "</div>" # Close the RTL div
|
98 |
+
|
99 |
+
# Display the table using a single markdown call
|
100 |
+
st.markdown(table_md, unsafe_allow_html=True)
|
101 |
+
|
102 |
+
else:
|
103 |
+
# display the tree
|
104 |
+
tree = [w['syntax'] for w in output['tokens']]
|
105 |
+
display_tree(tree)
|
106 |
+
|
107 |
+
# and the full json
|
108 |
+
st.markdown("```json\n" + json.dumps(output, ensure_ascii=False, indent=2) + "\n```")
|
109 |
+
|
110 |
+
|
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
graphviz
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
--extra-index-url https://download.pytorch.org/whl/cpu
|
2 |
+
streamlit
|
3 |
+
transformers
|
4 |
+
torch
|
5 |
+
graphviz
|
6 |
+
arabic_reshaper
|
7 |
+
python-bidi
|