Spaces:
Runtime error
Runtime error
hannahisrael03
commited on
Commit
•
bd9abb2
1
Parent(s):
94b6be6
Upload 4 files
Browse files- app.py +76 -0
- model_functions.py +102 -0
- preprocessor.py +94 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from model_functions import *
|
2 |
+
from preprocessor import *
|
3 |
+
import streamlit as st
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
def main():
|
9 |
+
st.title("WhatsApp Analysis Tool")
|
10 |
+
st.markdown("This app summarizes Whatsapp chats and provides named entity recognition as well as sentiment analysis for the conversation")
|
11 |
+
st.markdown("**NOTE**: *This app can only receive chats downloaded from IOS as the downloaded chat format is different than from Android.*")
|
12 |
+
st.markdown("Download your whatsapp chat by going to Settings > Chats > Export Chat and there select the chat you want to summarize (download 'Without Media').")
|
13 |
+
|
14 |
+
|
15 |
+
# File uploader
|
16 |
+
uploaded_file = st.file_uploader("Choose a file (.zip)", type=['zip'])
|
17 |
+
if uploaded_file is not None:
|
18 |
+
file_type = detect_file_type(uploaded_file.name)
|
19 |
+
if file_type == "zip":
|
20 |
+
# Process the file
|
21 |
+
data = preprocess_whatsapp_messages(uploaded_file, file_type)
|
22 |
+
if data.empty:
|
23 |
+
st.write("No messages found or the file could not be processed.")
|
24 |
+
else:
|
25 |
+
# Date selector
|
26 |
+
date_options = data['date'].dt.strftime('%Y-%m-%d').unique()
|
27 |
+
selected_date = st.selectbox("Select a date for analysis:", date_options)
|
28 |
+
|
29 |
+
if selected_date:
|
30 |
+
text_for_analysis = get_dated_input(data, selected_date)
|
31 |
+
with st.expander("Show/Hide Original Conversation"):
|
32 |
+
st.markdown(f"```\n{text_for_analysis}\n```", unsafe_allow_html=True)
|
33 |
+
process = st.button('Process')
|
34 |
+
if process:
|
35 |
+
# Load models
|
36 |
+
tokenizer_sentiment, model_sentiment = load_sentiment_analyzer()
|
37 |
+
tokenizer_summary, model_summary = load_summarizer()
|
38 |
+
pipe_ner = load_NER()
|
39 |
+
|
40 |
+
# Load models
|
41 |
+
tokenizer_sentiment, model_sentiment = load_sentiment_analyzer()
|
42 |
+
tokenizer_summary, model_summary = load_summarizer()
|
43 |
+
pipe_ner = load_NER()
|
44 |
+
|
45 |
+
# Perform analysis
|
46 |
+
sentiment = get_sentiment_analysis(text_for_analysis, tokenizer_sentiment, model_sentiment)
|
47 |
+
summary = generate_summary(text_for_analysis, tokenizer_summary, model_summary)
|
48 |
+
ner_results = get_NER(text_for_analysis, pipe_ner)
|
49 |
+
|
50 |
+
# Display results
|
51 |
+
st.subheader("Sentiment Analysis")
|
52 |
+
st.write("Sentiment:", sentiment)
|
53 |
+
|
54 |
+
st.subheader("Summary")
|
55 |
+
st.write("Summary:", summary)
|
56 |
+
|
57 |
+
st.subheader("Named Entity Recognition")
|
58 |
+
ner_df = pd.DataFrame(ner_results, columns=["Word", "Entity Group"])
|
59 |
+
st.write(ner_df)
|
60 |
+
else:
|
61 |
+
st.error("Unsupported file type. Please upload a .txt or .zip file.")
|
62 |
+
else:
|
63 |
+
st.info("Please upload a file to proceed.")
|
64 |
+
|
65 |
+
if __name__ == "__main__":
|
66 |
+
main()
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
|
model_functions.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import (AutoModelForSequenceClassification, AutoModelForSeq2SeqLM,
|
3 |
+
AutoConfig, AutoModelForTokenClassification,
|
4 |
+
AutoTokenizer, pipeline)
|
5 |
+
from peft import PeftModel, PeftConfig
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
def load_sentiment_analyzer():
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained("aliciiavs/sentiment-analysis-whatsapp2")
|
12 |
+
model = AutoModelForSequenceClassification.from_pretrained("aliciiavs/sentiment-analysis-whatsapp2")
|
13 |
+
|
14 |
+
return tokenizer, model
|
15 |
+
|
16 |
+
def load_summarizer():
|
17 |
+
config = PeftConfig.from_pretrained("marcelomoreno26/bart-large-samsum-adapter")
|
18 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large")
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large")
|
20 |
+
tokenizer.pad_token = tokenizer.eos_token
|
21 |
+
model = PeftModel.from_pretrained(model, "marcelomoreno26/bart-large-samsum-adapter", config=config)
|
22 |
+
model = model.merge_and_unload()
|
23 |
+
|
24 |
+
return tokenizer, model
|
25 |
+
|
26 |
+
def load_NER():
|
27 |
+
config = AutoConfig.from_pretrained("hannahisrael03/distilbert-base-uncased-finetuned-wikiann")
|
28 |
+
model = AutoModelForTokenClassification.from_pretrained("hannahisrael03/distilbert-base-uncased-finetuned-wikiann",config=config)
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained("hannahisrael03/distilbert-base-uncased-finetuned-wikiann")
|
30 |
+
pipe = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average")
|
31 |
+
|
32 |
+
return pipe
|
33 |
+
|
34 |
+
def get_sentiment_analysis(text, tokenizer, model):
|
35 |
+
inputs = tokenizer(text, padding=True, return_tensors="pt")
|
36 |
+
with torch.no_grad():
|
37 |
+
outputs = model(**inputs)
|
38 |
+
# Get predicted probabilities and predicted label
|
39 |
+
probabilities = torch.softmax(outputs.logits, dim=1)
|
40 |
+
predicted_label = torch.argmax(probabilities, dim=1)
|
41 |
+
# Convert the predicted label tensor to a Python integer
|
42 |
+
predicted_label = predicted_label.item()
|
43 |
+
# Map predicted label index to sentiment label
|
44 |
+
label_dic = {0: 'sadness', 1: 'joy', 2: 'love', 3: 'anger', 4: 'fear', 5: 'surprise'}
|
45 |
+
# Print the predicted sentiment label
|
46 |
+
return label_dic[predicted_label]
|
47 |
+
|
48 |
+
|
49 |
+
def generate_summary(text, tokenizer, model):
|
50 |
+
prefix = "summarize: "
|
51 |
+
encoded_input = tokenizer.encode_plus(prefix + text, return_tensors='pt', add_special_tokens=True)
|
52 |
+
input_ids = encoded_input['input_ids']
|
53 |
+
|
54 |
+
# Check if input_ids exceed the model's max length
|
55 |
+
max_length = 512
|
56 |
+
if input_ids.shape[1] > max_length:
|
57 |
+
# Split the input_ids into manageable segments
|
58 |
+
total_summary = []
|
59 |
+
for i in range(0, input_ids.shape[1], max_length - 50): # We use max_length - 50 to allow for some room for the model to generate context
|
60 |
+
segment_ids = input_ids[:, i:i + max_length]
|
61 |
+
output_ids = model.generate(segment_ids, max_length=150, num_beams=5, early_stopping=True)
|
62 |
+
segment_summary = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
63 |
+
total_summary.append(segment_summary)
|
64 |
+
|
65 |
+
# Concatenate all segment summaries
|
66 |
+
summary = ' '.join(total_summary)
|
67 |
+
else:
|
68 |
+
# Process as usual
|
69 |
+
output_ids = model.generate(input_ids, max_length=150, num_beams=5, early_stopping=True)
|
70 |
+
summary = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
71 |
+
|
72 |
+
return summary
|
73 |
+
|
74 |
+
|
75 |
+
def get_NER(text, pipe):
|
76 |
+
# Use pipeline to predict NER
|
77 |
+
results = pipe(text)
|
78 |
+
# Filter duplicates while retaining the highest score for each entity type and word combination
|
79 |
+
unique_entities = {}
|
80 |
+
for ent in results:
|
81 |
+
key = (ent['entity_group'], ent['word'])
|
82 |
+
if key not in unique_entities or unique_entities[key]['score'] < ent['score']:
|
83 |
+
unique_entities[key] = ent
|
84 |
+
|
85 |
+
# Prepare the output, sorted by the start position to maintain the order they appear in the text
|
86 |
+
filtered_results = sorted(unique_entities.values(), key=lambda x: x['start'])
|
87 |
+
# Format the results for a table display
|
88 |
+
formatted_results = [[ent['word'], ent['entity_group']] for ent in filtered_results]
|
89 |
+
filtered_results = []
|
90 |
+
for entity in formatted_results:
|
91 |
+
if entity[1] == 'ORG':
|
92 |
+
# Split the 'word' by spaces and count the number of words
|
93 |
+
if len(entity[0].split()) <= 2:
|
94 |
+
filtered_results.append(entity)
|
95 |
+
else:
|
96 |
+
# Add non-ORG entities without filtering
|
97 |
+
filtered_results.append(entity)
|
98 |
+
|
99 |
+
return filtered_results
|
100 |
+
|
101 |
+
|
102 |
+
|
preprocessor.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import zipfile
|
3 |
+
import re
|
4 |
+
from io import BytesIO
|
5 |
+
|
6 |
+
|
7 |
+
def detect_file_type(file_path):
|
8 |
+
type = file_path[-3:]
|
9 |
+
if type in ["txt","zip"]:
|
10 |
+
return type
|
11 |
+
else:
|
12 |
+
return "unknown"
|
13 |
+
|
14 |
+
def preprocess_whatsapp_messages(file_path, file_type):
|
15 |
+
"""
|
16 |
+
Preprocesses the Whatsapp messages zip file into a Pandas Dataframe, all messages in one day go
|
17 |
+
to a row and a timestamp is added.
|
18 |
+
|
19 |
+
Args:
|
20 |
+
file_path (str): Location of the file (zip or txt) of the conversation.
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
str: Dataframe
|
24 |
+
"""
|
25 |
+
|
26 |
+
# Load the zip file and extract text data
|
27 |
+
print(file_type)
|
28 |
+
if file_type == "zip":
|
29 |
+
with zipfile.ZipFile(file_path, 'r') as z:
|
30 |
+
file_name = z.namelist()[0]
|
31 |
+
with z.open(file_name) as file:
|
32 |
+
text_data = file.read().decode('utf-8')
|
33 |
+
else:
|
34 |
+
text_data = BytesIO(file_path.getvalue()).read().decode('utf-8')
|
35 |
+
|
36 |
+
|
37 |
+
# Split the text data into lines
|
38 |
+
lines = text_data.strip().split('\n')
|
39 |
+
|
40 |
+
# Create a DataFrame
|
41 |
+
df = pd.DataFrame(lines, columns=['message'])
|
42 |
+
|
43 |
+
# Process each line to separate timestamp and text
|
44 |
+
df[['timestamp', 'text']] = df['message'].str.split(']', n=1, expand=True)
|
45 |
+
df['timestamp'] = df['timestamp'].str.strip('[')
|
46 |
+
|
47 |
+
# Handle cases where the split might not work (e.g., missing ']' in a line)
|
48 |
+
df.dropna(subset=['timestamp', 'text'], inplace=True)
|
49 |
+
|
50 |
+
# Convert timestamp to datetime and remove the time, keeping only the date
|
51 |
+
df['timestamp'] = pd.to_datetime(df['timestamp'], format='%d/%m/%y, %H:%M:%S', errors='coerce').dt.date
|
52 |
+
|
53 |
+
# Drop rows where the timestamp conversion failed (which results in NaT)
|
54 |
+
df.dropna(subset=['timestamp'], inplace=True)
|
55 |
+
|
56 |
+
# Remove initial WhatsApp system messages in English and Spanish
|
57 |
+
filter_text_en = "Your messages and calls are end-to-end encrypted"
|
58 |
+
filter_text_es = "Los mensajes y las llamadas están cifrados de extremo a extremo"
|
59 |
+
df = df[~df['text'].str.contains(filter_text_en, na=False)]
|
60 |
+
df = df[~df['text'].str.contains(filter_text_es, na=False)]
|
61 |
+
|
62 |
+
# Additional preprocessing steps:
|
63 |
+
# Remove URLs and convert text to lowercase
|
64 |
+
df['text'] = df['text'].apply(lambda x: re.sub(r'https?:\/\/\S+', '', x)) # Remove URLs
|
65 |
+
df['text'] = df['text'].apply(lambda x: x.lower()) # Convert text to lowercase
|
66 |
+
|
67 |
+
# Remove emojis, images, stickers, documents while preserving colons after sender names
|
68 |
+
df['text'] = df['text'].apply(lambda x: re.sub(r'(?<!\w)(:\s|\s:\s|\s:)', '', x)) # Remove colons that are not part of sender's name
|
69 |
+
df['text'] = df['text'].apply(lambda x: re.sub(r'\[image omitted\]', '', x)) # Remove images
|
70 |
+
df['text'] = df['text'].apply(lambda x: re.sub(r'\[sticker omitted\]', '', x)) # Remove stickers
|
71 |
+
df['text'] = df['text'].apply(lambda x: re.sub(r'\[document omitted\]', '', x)) # Remove documents
|
72 |
+
df['text'] = df['text'].apply(lambda x: re.sub(r'<se editó este mensaje.>', '', x)) # Remove editing function (new Whatsapp addition) in Spanish
|
73 |
+
df['text'] = df['text'].apply(lambda x: re.sub(r'<this message was edited.>', '', x)) # Remove editing function (new Whatsapp addition) in English I AM GUESSING IDk
|
74 |
+
|
75 |
+
# Group by date and concatenate all messages from the same date
|
76 |
+
df = df.groupby('timestamp')['text'].apply(lambda x: '\n'.join(x)).reset_index()
|
77 |
+
df.columns = ['date', 'text']
|
78 |
+
df['date'] = pd.to_datetime(df['date'])
|
79 |
+
df['text'] = df['text'].astype(str)
|
80 |
+
|
81 |
+
return df
|
82 |
+
|
83 |
+
def get_dated_input(data, selected_date):
|
84 |
+
'''
|
85 |
+
The Pandas dataframe is processed and the text is extracted.
|
86 |
+
:param data:
|
87 |
+
:param selected_date:
|
88 |
+
:return:
|
89 |
+
'''
|
90 |
+
selected_date = pd.to_datetime(selected_date)
|
91 |
+
data_for_model = data[data['date'].dt.date == selected_date.date()]
|
92 |
+
data_for_model.loc[:, 'text'] = data_for_model['text']
|
93 |
+
first_row_text = data_for_model['text'].iloc[0]
|
94 |
+
return first_row_text
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch==2.2.2
|
2 |
+
pandas==2.2.2
|
3 |
+
transformers==4.39.3
|
4 |
+
streamlit==1.33.0
|
5 |
+
git+https://github.com/huggingface/peft.git
|