Spaces:
Sleeping
Sleeping
research14
commited on
Commit
•
cd1760d
1
Parent(s):
cd41e3a
testing lftk
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import lftk
|
4 |
-
|
5 |
import time
|
6 |
import os
|
7 |
import openai
|
@@ -16,14 +16,27 @@ llama_model = AutoModelForCausalLM.from_pretrained("daryl149/llama-2-7b-chat-hf"
|
|
16 |
|
17 |
template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
|
18 |
|
19 |
-
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
return linguistic_features
|
27 |
|
28 |
def update_api_key(new_key):
|
29 |
global api_key
|
@@ -138,8 +151,8 @@ def interface():
|
|
138 |
#textbox_prompt.submit(llama_respond, inputs=[textbox_prompt, llama_S1_chatbot], outputs=[textbox_prompt, llama_S1_chatbot])
|
139 |
|
140 |
btn.click(lambda _,
|
141 |
-
message=textbox_prompt: linguistic_features_textbox.update(linguistic_features(
|
142 |
-
inputs=[
|
143 |
outputs=[linguistic_features_textbox])
|
144 |
|
145 |
btn.click(vicuna_respond, inputs=[tab_name, textbox_prompt, vicuna_S1_chatbot],
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import lftk
|
4 |
+
import spacy
|
5 |
import time
|
6 |
import os
|
7 |
import openai
|
|
|
16 |
|
17 |
template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
|
18 |
|
19 |
+
def linguistic_features(message):
|
20 |
+
# Load a trained spaCy pipeline
|
21 |
+
nlp = spacy.load("en_core_web_sm")
|
22 |
|
23 |
+
# Create a spaCy doc object
|
24 |
+
doc = nlp(message)
|
25 |
+
|
26 |
+
# Initiate LFTK extractor by passing in the doc
|
27 |
+
LFTK_extractor = lftk.Extractor(docs=doc)
|
28 |
+
|
29 |
+
# Customize LFTK extractor (optional)
|
30 |
+
LFTK_extractor.customize(stop_words=True, punctuations=False, round_decimal=3)
|
31 |
+
|
32 |
+
# Use LFTK to dynamically extract handcrafted linguistic features
|
33 |
+
features_to_extract = lftk.search_features(family="wordsent", language="general", return_format="list_key")
|
34 |
+
extracted_features = LFTK_extractor.extract(features=features_to_extract)
|
35 |
+
|
36 |
+
print('Linguistic Features:', extracted_features)
|
37 |
+
|
38 |
+
return extracted_features
|
39 |
|
|
|
40 |
|
41 |
def update_api_key(new_key):
|
42 |
global api_key
|
|
|
151 |
#textbox_prompt.submit(llama_respond, inputs=[textbox_prompt, llama_S1_chatbot], outputs=[textbox_prompt, llama_S1_chatbot])
|
152 |
|
153 |
btn.click(lambda _,
|
154 |
+
message=textbox_prompt: linguistic_features_textbox.update(linguistic_features(textbox_prompt)),
|
155 |
+
inputs=[textbox_prompt],
|
156 |
outputs=[linguistic_features_textbox])
|
157 |
|
158 |
btn.click(vicuna_respond, inputs=[tab_name, textbox_prompt, vicuna_S1_chatbot],
|