HalteroXHunter commited on
Commit
94c45be
1 Parent(s): a3d45b2
Files changed (5) hide show
  1. absa_evaluator.py +166 -0
  2. app.py +5 -0
  3. gradio_tst.py +130 -0
  4. preprocessing.py +115 -0
  5. requirements.txt +4 -0
absa_evaluator.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List
2
+
3
+ import evaluate
4
+ from datasets import Features, Sequence, Value
5
+ from sklearn.metrics import accuracy_score
6
+
7
+ from preprocessing import absa_term_preprocess
8
+
9
+ _CITATION = """
10
+ """
11
+
12
+ _DESCRIPTION = """
13
+ Evaluation metrics for Aspect-Based Sentiment Analysis (ABSA) including precision, recall, and F1 score for aspect terms and polarities.
14
+ """
15
+
16
+ _KWARGS_DESCRIPTION = """
17
+ Computes precision, recall, and F1 score for aspect terms and polarities in Aspect-Based Sentiment Analysis (ABSA).
18
+
19
+ Args:
20
+ predictions: List of ABSA predictions with the following structure:
21
+ - 'aspects': Sequence of aspect annotations, each with the following keys:
22
+ - 'term': Aspect term
23
+ - 'polarity': Polarity of the aspect term
24
+ references: List of ABSA references with the same structure as predictions.
25
+ Returns:
26
+ aspect_precision: Precision score for aspect terms
27
+ aspect_recall: Recall score for aspect terms
28
+ aspect_f1: F1 score for aspect terms
29
+ polarity_precision: Precision score for aspect polarities
30
+ polarity_recall: Recall score for aspect polarities
31
+ polarity_f1: F1 score for aspect polarities
32
+ """
33
+
34
+
35
+ class AbsaEvaluatorTest(evaluate.Metric):
36
+ def _info(self):
37
+ return evaluate.MetricInfo(
38
+ description=_DESCRIPTION,
39
+ citation=_CITATION,
40
+ inputs_description=_KWARGS_DESCRIPTION,
41
+ features=Features(
42
+ {
43
+ "predictions": Features(
44
+ {
45
+ "aspects": Features(
46
+ {
47
+ "term": Sequence(Value("string")),
48
+ "polarity": Sequence(Value("string")),
49
+ }
50
+ ),
51
+ "category": Features(
52
+ {
53
+ "category": Sequence(Value("string")),
54
+ "polarity": Sequence(Value("string")),
55
+ }
56
+ ),
57
+ }
58
+ ),
59
+ "references": Features(
60
+ {
61
+ "aspects": Features(
62
+ {
63
+ "term": Sequence(Value("string")),
64
+ "polarity": Sequence(Value("string")),
65
+ }
66
+ ),
67
+ "category": Features(
68
+ {
69
+ "category": Sequence(Value("string")),
70
+ "polarity": Sequence(Value("string")),
71
+ }
72
+ ),
73
+ }
74
+ ),
75
+ }
76
+ ),
77
+ )
78
+
79
+ def _compute(self, predictions, references):
80
+ # preprocess aspect term
81
+ (
82
+ truth_aspect_terms,
83
+ pred_aspect_terms,
84
+ truth_term_polarities,
85
+ pred_term_polarities,
86
+ ) = absa_term_preprocess(
87
+ references=references,
88
+ predictions=predictions,
89
+ subtask_key="aspects",
90
+ subtask_value="term",
91
+ )
92
+ # evaluate
93
+ term_results = self.semeval_metric(
94
+ truth_aspect_terms, pred_aspect_terms
95
+ )
96
+ term_polarity_acc = accuracy_score(
97
+ truth_term_polarities, pred_term_polarities
98
+ )
99
+
100
+ # preprocess category detection
101
+ (
102
+ truth_categories,
103
+ pred_categories,
104
+ truth_cat_polarities,
105
+ pred_cat_polarities,
106
+ ) = absa_term_preprocess(
107
+ references=references,
108
+ predictions=predictions,
109
+ subtask_key="category",
110
+ subtask_value="category",
111
+ )
112
+
113
+ # evaluate
114
+ category_results = self.semeval_metric(
115
+ truth_categories, pred_categories
116
+ )
117
+ cat_polarity_acc = accuracy_score(
118
+ truth_cat_polarities, pred_cat_polarities
119
+ )
120
+
121
+ return {
122
+ "term_extraction_results": term_results,
123
+ "term_polarity_results_accuracy": term_polarity_acc,
124
+ "category_detection_results": category_results,
125
+ "category_polarity_results_accuracy": cat_polarity_acc,
126
+ }
127
+
128
+ def semeval_metric(
129
+ self, truths: List[List[str]], preds: List[List[str]]
130
+ ) -> Dict[str, float]:
131
+ """
132
+ Implements evaluation for extraction tasks using precision, recall, and F1 score.
133
+
134
+ Parameters:
135
+ - truths: List of lists, where each list contains the ground truth labels for a sample.
136
+ - preds: List of lists, where each list contains the predicted labels for a sample.
137
+
138
+ Returns:
139
+ - A dictionary containing the precision, recall, F1 score, and counts of common, retrieved, and relevant.
140
+
141
+ link for code: link for this code: https://github.com/davidsbatista/Aspect-Based-Sentiment-Analysis/blob/1d9c8ec1131993d924e96676fa212db6b53cb870/libraries/baselines.py#L387
142
+ """
143
+ b = 1
144
+ common, relevant, retrieved = 0.0, 0.0, 0.0
145
+ for truth, pred in zip(truths, preds):
146
+ common += len([a for a in pred if a in truth])
147
+ retrieved += len(pred)
148
+ relevant += len(truth)
149
+ precision = common / retrieved if retrieved > 0 else 0.0
150
+ recall = common / relevant if relevant > 0 else 0.0
151
+ f1 = (
152
+ (1 + (b**2))
153
+ * precision
154
+ * recall
155
+ / ((precision * b**2) + recall)
156
+ if precision > 0 and recall > 0
157
+ else 0.0
158
+ )
159
+ return {
160
+ "precision": precision,
161
+ "recall": recall,
162
+ "f1_score": f1,
163
+ "common": common,
164
+ "retrieved": retrieved,
165
+ "relevant": relevant,
166
+ }
app.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import evaluate
2
+ from gradio_tst import launch_gradio_widget2
3
+
4
+ module = evaluate.load("absa_evaluator.py")
5
+ launch_gradio_widget2(module)
gradio_tst.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ import numpy as np
8
+ from datasets import Value
9
+
10
+ import logging
11
+
12
+
13
+
14
+ REGEX_YAML_BLOCK = re.compile(r"---[\n\r]+([\S\s]*?)[\n\r]+---[\n\r]")
15
+
16
+
17
+ def infer_gradio_input_types(feature_types):
18
+ """
19
+ Maps metric feature types to input types for gradio Dataframes:
20
+ - float/int -> numbers
21
+ - string -> strings
22
+ - any other -> json
23
+ Note that json is not a native gradio type but will be treated as string that
24
+ is then parsed as a json.
25
+ """
26
+ input_types = []
27
+ for feature_type in feature_types:
28
+ input_type = "json"
29
+ if isinstance(feature_type, Value):
30
+ if feature_type.dtype.startswith("int") or feature_type.dtype.startswith("float"):
31
+ input_type = "number"
32
+ elif feature_type.dtype == "string":
33
+ input_type = "str"
34
+ input_types.append(input_type)
35
+ return input_types
36
+
37
+
38
+ def json_to_string_type(input_types):
39
+ """Maps json input type to str."""
40
+ return ["str" if i == "json" else i for i in input_types]
41
+
42
+
43
+ def parse_readme(filepath):
44
+ """Parses a repositories README and removes"""
45
+ if not os.path.exists(filepath):
46
+ return "No README.md found."
47
+ with open(filepath, "r") as f:
48
+ text = f.read()
49
+ match = REGEX_YAML_BLOCK.search(text)
50
+ if match:
51
+ text = text[match.end() :]
52
+ return text
53
+
54
+
55
+ def parse_gradio_data(data, input_types):
56
+ """Parses data from gradio Dataframe for use in metric."""
57
+ metric_inputs = {}
58
+ data.replace("", np.nan, inplace=True)
59
+ data.dropna(inplace=True)
60
+ for feature_name, input_type in zip(data, input_types):
61
+ if input_type == "json":
62
+ metric_inputs[feature_name] = [json.loads(d) for d in data[feature_name].to_list()]
63
+ elif input_type == "str":
64
+ metric_inputs[feature_name] = [d.strip('"') for d in data[feature_name].to_list()]
65
+ else:
66
+ metric_inputs[feature_name] = data[feature_name]
67
+ return metric_inputs
68
+
69
+
70
+ def parse_test_cases(test_cases, feature_names, input_types):
71
+ """
72
+ Parses test cases to be used in gradio Dataframe. Note that an apostrophe is added
73
+ to strings to follow the format in json.
74
+ """
75
+ if len(test_cases) == 0:
76
+ return None
77
+ examples = []
78
+ for test_case in test_cases:
79
+ parsed_cases = []
80
+ for feat, input_type in zip(feature_names, input_types):
81
+ if input_type == "json":
82
+ parsed_cases.append([str(element) for element in test_case[feat]])
83
+ elif input_type == "str":
84
+ parsed_cases.append(['"' + element + '"' for element in test_case[feat]])
85
+ else:
86
+ parsed_cases.append(test_case[feat])
87
+ examples.append([list(i) for i in zip(*parsed_cases)])
88
+ return examples
89
+
90
+
91
+ def launch_gradio_widget2(metric):
92
+ """Launches `metric` widget with Gradio."""
93
+
94
+ try:
95
+ import gradio as gr
96
+ except ImportError as error:
97
+ logging.error("To create a metric widget with Gradio make sure gradio is installed.")
98
+ raise error
99
+
100
+ local_path = Path(sys.path[0])
101
+ # if there are several input types, use first as default.
102
+ if isinstance(metric.features, list):
103
+ (feature_names, feature_types) = zip(*metric.features[0].items())
104
+ else:
105
+ (feature_names, feature_types) = zip(*metric.features.items())
106
+ gradio_input_types = infer_gradio_input_types(feature_types)
107
+
108
+ def compute(data):
109
+ return metric.compute(**parse_gradio_data(data, gradio_input_types))
110
+
111
+ iface = gr.Interface(
112
+ fn=compute,
113
+ inputs=gr.Dataframe(
114
+ headers=feature_names,
115
+ col_count=len(feature_names),
116
+ row_count=1,
117
+ datatype=json_to_string_type(gradio_input_types),
118
+ ),
119
+ outputs=gr.Textbox(label=metric.name),
120
+ description=(
121
+ metric.info.description + "\nIf this is a text-based metric, make sure to wrap you input in double quotes."
122
+ " Alternatively you can use a JSON-formatted list as input."
123
+ ),
124
+ title=f"Metric: {metric.name}",
125
+ article=parse_readme(local_path / "README.md"),
126
+ # TODO: load test cases and use them to populate examples
127
+ # examples=[parse_test_cases(test_cases, feature_names, gradio_input_types)]
128
+ )
129
+
130
+ iface.launch()
preprocessing.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import chain
2
+ from random import choice
3
+ from typing import Any, Dict, List, Optional, Tuple
4
+
5
+ from datasets import Dataset
6
+
7
+
8
+ def adjust_predictions(refs, preds, choices):
9
+ """Adjust predictions to match the length of references with either a special token or random choice."""
10
+ adjusted_preds = []
11
+ for ref, pred in zip(refs, preds):
12
+ if len(pred) < len(ref):
13
+ missing_count = len(ref) - len(pred)
14
+ pred.extend([choice(choices) for _ in range(missing_count)])
15
+ adjusted_preds.append(pred)
16
+ return adjusted_preds
17
+
18
+
19
+ def extract_aspects(data, specific_key, specific_val):
20
+ """Extracts and returns a list of specified aspect details from the nested 'aspects' data."""
21
+ return [item[specific_key][specific_val] for item in data]
22
+
23
+
24
+ def absa_term_preprocess(references, predictions, subtask_key, subtask_value):
25
+ """
26
+ Preprocess the terms and polarities for aspect-based sentiment analysis.
27
+
28
+ Args:
29
+ references (List[Dict]): A list of dictionaries containing the actual terms and polarities under 'aspects'.
30
+ predictions (List[Dict]): A list of dictionaries containing predicted aspect categories to terms and their sentiments.
31
+
32
+ Returns:
33
+ Tuple[List[str], List[str], List[str], List[str]]: A tuple containing lists of true aspect terms,
34
+ adjusted predicted aspect terms, true polarities, and adjusted predicted polarities.
35
+ """
36
+
37
+ # Extract aspect terms and polarities
38
+ truth_aspect_terms = extract_aspects(references, subtask_key, subtask_value)
39
+ pred_aspect_terms = extract_aspects(predictions, subtask_key, subtask_value)
40
+ truth_polarities = extract_aspects(references, subtask_key, "polarity")
41
+ pred_polarities = extract_aspects(predictions, subtask_key, "polarity")
42
+
43
+ # Define adjustment parameters
44
+ special_token = "NONE" # For missing aspect terms
45
+ sentiment_choices = [
46
+ "positive",
47
+ "negative",
48
+ "neutral",
49
+ "conflict",
50
+ ] # For missing polarities
51
+
52
+ # Adjust the predictions to match the length of references
53
+ adjusted_pred_terms = adjust_predictions(
54
+ truth_aspect_terms, pred_aspect_terms, [special_token]
55
+ )
56
+ adjusted_pred_polarities = adjust_predictions(
57
+ truth_polarities, pred_polarities, sentiment_choices
58
+ )
59
+
60
+ return (
61
+ flatten_list(truth_aspect_terms),
62
+ flatten_list(adjusted_pred_terms),
63
+ flatten_list(truth_polarities),
64
+ flatten_list(adjusted_pred_polarities),
65
+ )
66
+
67
+
68
+ def flatten_list(nested_list):
69
+ """Flatten a nested list into a single-level list."""
70
+ return list(chain.from_iterable(nested_list))
71
+
72
+
73
+ def extract_pred_terms(
74
+ all_predictions: List[Dict[str, Dict[str, str]]]
75
+ ) -> List[List]:
76
+ """Extract and organize predicted terms from the sentiment analysis results."""
77
+ pred_aspect_terms = []
78
+ for pred in all_predictions:
79
+ terms = [term for cat in pred.values() for term in cat.keys()]
80
+ pred_aspect_terms.append(terms)
81
+ return pred_aspect_terms
82
+
83
+
84
+ def merge_aspects_and_categories(aspects, categories):
85
+ result = []
86
+
87
+ # Assuming both lists are of the same length and corresponding indices match
88
+ for aspect, category in zip(aspects, categories):
89
+ combined_entry = {
90
+ "aspects": {"term": [], "polarity": []},
91
+ "category": {"category": [], "polarity": []},
92
+ }
93
+
94
+ # Process aspect entries
95
+ for cat_key, terms_dict in aspect.items():
96
+ for term, polarity in terms_dict.items():
97
+ combined_entry["aspects"]["term"].append(term)
98
+ combined_entry["aspects"]["polarity"].append(polarity)
99
+
100
+ # Add category details based on the aspect's key if available in categories
101
+ if cat_key in category:
102
+ combined_entry["category"]["category"].append(cat_key)
103
+ combined_entry["category"]["polarity"].append(
104
+ category[cat_key]
105
+ )
106
+
107
+ # Ensure all keys in category are accounted for
108
+ for cat_key, polarity in category.items():
109
+ if cat_key not in combined_entry["category"]["category"]:
110
+ combined_entry["category"]["category"].append(cat_key)
111
+ combined_entry["category"]["polarity"].append(polarity)
112
+
113
+ result.append(combined_entry)
114
+
115
+ return result
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ evaluate
2
+ datasets
3
+ scikit-learn
4
+ gradio