Datasets:
Matthew Franglen
commited on
Commit
·
d136bc8
1
Parent(s):
38ef30f
Copy over the code from the blog post
Browse files- src/__init__.py +0 -0
- src/convert.py +331 -0
src/__init__.py
ADDED
File without changes
|
src/convert.py
ADDED
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
import re
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from typing import Optional, TypedDict
|
5 |
+
|
6 |
+
import Levenshtein
|
7 |
+
import pandas as pd
|
8 |
+
|
9 |
+
|
10 |
+
def read_sem_eval_file(file: str) -> pd.DataFrame:
|
11 |
+
df = pd.read_xml(file)[["text"]]
|
12 |
+
return df
|
13 |
+
|
14 |
+
|
15 |
+
def read_aste_file(file: str) -> pd.DataFrame:
|
16 |
+
def triple_to_hashable(
|
17 |
+
triple: tuple[list[int], list[int], str]
|
18 |
+
) -> tuple[tuple[int, ...], tuple[int, ...], str]:
|
19 |
+
aspect_span, opinion_span, sentiment = triple
|
20 |
+
return tuple(aspect_span), tuple(opinion_span), sentiment
|
21 |
+
|
22 |
+
df = pd.read_csv(
|
23 |
+
file,
|
24 |
+
sep="####",
|
25 |
+
header=None,
|
26 |
+
names=["text", "triples"],
|
27 |
+
engine="python",
|
28 |
+
)
|
29 |
+
|
30 |
+
# There are duplicate rows, some of which have the same triples and some don't
|
31 |
+
# This deals with that by
|
32 |
+
# * first dropping the pure duplicates,
|
33 |
+
# * then parsing the triples and exploding them to one per row
|
34 |
+
# * then dropping the exploded duplicates (have to convert triples back to string for this)
|
35 |
+
# * then grouping the triples up again
|
36 |
+
# * finally sorting the distinct triples
|
37 |
+
|
38 |
+
# df = df.copy()
|
39 |
+
df = df.drop_duplicates()
|
40 |
+
df["triples"] = df.triples.apply(ast.literal_eval)
|
41 |
+
df = df.explode("triples")
|
42 |
+
df["triples"] = df.triples.apply(triple_to_hashable)
|
43 |
+
df = df.drop_duplicates()
|
44 |
+
df = df.groupby("text").agg(list)
|
45 |
+
df = df.reset_index(drop=False)
|
46 |
+
df["triples"] = df.triples.apply(set).apply(sorted)
|
47 |
+
|
48 |
+
return df
|
49 |
+
|
50 |
+
|
51 |
+
def get_original_text(
|
52 |
+
aste_file: str,
|
53 |
+
sem_eval_file: str,
|
54 |
+
debug: bool = False,
|
55 |
+
) -> pd.DataFrame:
|
56 |
+
approximate_matches = 0
|
57 |
+
|
58 |
+
def best_match(text: str) -> str:
|
59 |
+
comparison = text.replace(" ", "")
|
60 |
+
if comparison in comparison_to_text:
|
61 |
+
return comparison_to_text[comparison]
|
62 |
+
|
63 |
+
nonlocal approximate_matches
|
64 |
+
approximate_matches += 1
|
65 |
+
distances = sem_eval_comparison.apply(
|
66 |
+
lambda se_comparison: Levenshtein.distance(comparison, se_comparison)
|
67 |
+
)
|
68 |
+
best = sem_eval_df.iloc[distances.argmin()].text
|
69 |
+
return best
|
70 |
+
|
71 |
+
sem_eval_df = read_sem_eval_file(sem_eval_file)
|
72 |
+
sem_eval_comparison = sem_eval_df.text.str.replace(" ", "")
|
73 |
+
comparison_to_text = dict(zip(sem_eval_comparison, sem_eval_df.text))
|
74 |
+
|
75 |
+
aste_df = read_aste_file(aste_file)
|
76 |
+
aste_df = aste_df.rename(columns={"text": "preprocessed_text"})
|
77 |
+
aste_df["text"] = aste_df.preprocessed_text.apply(best_match)
|
78 |
+
if debug:
|
79 |
+
print(f"Read {len(aste_df):,} rows")
|
80 |
+
print(f"Had to use {approximate_matches:,} approximate matches")
|
81 |
+
return aste_df[["text", "preprocessed_text", "triples"]]
|
82 |
+
|
83 |
+
|
84 |
+
def edit(original: str, preprocessed: str) -> list[Optional[int]]:
|
85 |
+
indices: list[Optional[int]] = list(range(len(preprocessed)))
|
86 |
+
for operation, _source_position, destination_position in Levenshtein.editops(
|
87 |
+
preprocessed, original
|
88 |
+
):
|
89 |
+
if operation == "replace":
|
90 |
+
indices[destination_position] = None
|
91 |
+
elif operation == "insert":
|
92 |
+
indices.insert(destination_position, None)
|
93 |
+
elif operation == "delete":
|
94 |
+
del indices[destination_position]
|
95 |
+
return indices
|
96 |
+
|
97 |
+
|
98 |
+
def has_unmapped(indicies: list[Optional[int]]) -> bool:
|
99 |
+
return any(index is None for index in indicies)
|
100 |
+
|
101 |
+
|
102 |
+
def has_unmapped_non_space(row: pd.Series) -> bool:
|
103 |
+
letter_and_index: list[tuple[str, Optional[int]]] = list(
|
104 |
+
zip(row.text, row.text_indices)
|
105 |
+
)
|
106 |
+
return any(index is None for letter, index in letter_and_index if letter != " ")
|
107 |
+
|
108 |
+
|
109 |
+
@dataclass(frozen=True)
|
110 |
+
class WordSpan:
|
111 |
+
start_index: int
|
112 |
+
end_index: int # this is the letter after the end
|
113 |
+
|
114 |
+
|
115 |
+
class CharacterIndices(TypedDict):
|
116 |
+
aspect_start_index: int
|
117 |
+
aspect_end_index: int
|
118 |
+
aspect_term: str
|
119 |
+
opinion_start_index: int
|
120 |
+
opinion_end_index: int
|
121 |
+
opinion_term: str
|
122 |
+
sentiment: str
|
123 |
+
|
124 |
+
|
125 |
+
word_pattern = re.compile(r"\S+")
|
126 |
+
|
127 |
+
|
128 |
+
def row_to_character_indices(row: pd.Series) -> pd.Series:
|
129 |
+
try:
|
130 |
+
return pd.Series(
|
131 |
+
to_character_indices(
|
132 |
+
triplet=row.triples,
|
133 |
+
preprocessed=row.preprocessed_text,
|
134 |
+
text=row.text,
|
135 |
+
text_indices=row.text_indices,
|
136 |
+
)
|
137 |
+
)
|
138 |
+
except:
|
139 |
+
print(f"failed to process row {row.name}")
|
140 |
+
print(row)
|
141 |
+
raise
|
142 |
+
|
143 |
+
|
144 |
+
def to_character_indices(
|
145 |
+
*,
|
146 |
+
triplet: tuple[tuple[int], tuple[int], str],
|
147 |
+
preprocessed: str,
|
148 |
+
text: str,
|
149 |
+
text_indices: list[Optional[int]],
|
150 |
+
) -> CharacterIndices:
|
151 |
+
def find_start_index(span: WordSpan) -> int:
|
152 |
+
# the starting letter in the lookup can be missing or None
|
153 |
+
# this would cause a lookup failure
|
154 |
+
# to recover from this we can find the following letter index and backtrack
|
155 |
+
for index in range(span.start_index, span.end_index):
|
156 |
+
try:
|
157 |
+
text_index = text_indices.index(index)
|
158 |
+
for _ in range(index - span.start_index):
|
159 |
+
if text_index - 1 <= 0:
|
160 |
+
break
|
161 |
+
if text_indices[text_index - 1] is not None:
|
162 |
+
break
|
163 |
+
text_index -= 1
|
164 |
+
return text_index
|
165 |
+
except ValueError:
|
166 |
+
pass
|
167 |
+
# not present in list
|
168 |
+
raise ValueError(f"cannot find any part of {span}")
|
169 |
+
|
170 |
+
def find_end_index(span: WordSpan) -> int:
|
171 |
+
# the ending letter in the lookup can be missing or None
|
172 |
+
# this would cause a lookup failure
|
173 |
+
# to recover from this we can find the preceding letter index and backtrack
|
174 |
+
for index in range(span.end_index - 1, span.start_index - 1, -1):
|
175 |
+
try:
|
176 |
+
text_index = text_indices.index(index)
|
177 |
+
for _ in range(span.end_index - index):
|
178 |
+
if text_index + 1 >= len(text_indices):
|
179 |
+
break
|
180 |
+
if text_indices[text_index + 1] is not None:
|
181 |
+
break
|
182 |
+
text_index += 1
|
183 |
+
return text_index
|
184 |
+
except ValueError:
|
185 |
+
pass
|
186 |
+
# not present in list
|
187 |
+
raise ValueError(f"cannot find any part of {span}")
|
188 |
+
|
189 |
+
def to_indices(span: tuple[int]) -> tuple[int, int]:
|
190 |
+
word_start = span[0]
|
191 |
+
word_start_span = word_indices[word_start]
|
192 |
+
|
193 |
+
word_end = span[-1]
|
194 |
+
word_end_span = word_indices[word_end]
|
195 |
+
|
196 |
+
start_index = find_start_index(word_start_span)
|
197 |
+
end_index = find_end_index(word_end_span)
|
198 |
+
return start_index, end_index
|
199 |
+
|
200 |
+
aspect_span, opinion_span, sentiment = triplet
|
201 |
+
assert is_sequential(aspect_span), f"aspect span not sequential: {aspect_span}"
|
202 |
+
assert is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"
|
203 |
+
assert sentiment in {"POS", "NEG", "NEU"}, f"unknown sentiment: {sentiment}"
|
204 |
+
|
205 |
+
word_indices = [
|
206 |
+
WordSpan(start_index=match.start(), end_index=match.end())
|
207 |
+
for match in word_pattern.finditer(preprocessed)
|
208 |
+
]
|
209 |
+
|
210 |
+
aspect_start_index, aspect_end_index = to_indices(aspect_span)
|
211 |
+
aspect_term = text[aspect_start_index : aspect_end_index + 1]
|
212 |
+
opinion_start_index, opinion_end_index = to_indices(opinion_span)
|
213 |
+
opinion_term = text[opinion_start_index : opinion_end_index + 1]
|
214 |
+
|
215 |
+
nice_sentiment = {
|
216 |
+
"POS": "positive",
|
217 |
+
"NEG": "negative",
|
218 |
+
"NEU": "neutral",
|
219 |
+
}[sentiment]
|
220 |
+
|
221 |
+
return {
|
222 |
+
"aspect_start_index": aspect_start_index,
|
223 |
+
"aspect_end_index": aspect_end_index,
|
224 |
+
"aspect_term": aspect_term,
|
225 |
+
"opinion_start_index": opinion_start_index,
|
226 |
+
"opinion_end_index": opinion_end_index,
|
227 |
+
"opinion_term": opinion_term,
|
228 |
+
"sentiment": nice_sentiment,
|
229 |
+
}
|
230 |
+
|
231 |
+
|
232 |
+
def convert_sem_eval_text(
|
233 |
+
aste_file: str,
|
234 |
+
sem_eval_file: str,
|
235 |
+
debug: bool = False,
|
236 |
+
) -> pd.DataFrame:
|
237 |
+
df = get_original_text(
|
238 |
+
aste_file=aste_file,
|
239 |
+
sem_eval_file=sem_eval_file,
|
240 |
+
debug=debug,
|
241 |
+
)
|
242 |
+
df = df.explode("triples")
|
243 |
+
df = df.reset_index(drop=False)
|
244 |
+
df["text_indices"] = df.apply(
|
245 |
+
lambda row: edit(original=row.text, preprocessed=row.preprocessed_text),
|
246 |
+
axis="columns",
|
247 |
+
)
|
248 |
+
df = df.merge(
|
249 |
+
df.apply(row_to_character_indices, axis="columns"),
|
250 |
+
left_index=True,
|
251 |
+
right_index=True,
|
252 |
+
)
|
253 |
+
df = df.drop(columns=["preprocessed_text", "triples", "text_indices"])
|
254 |
+
return df
|
255 |
+
|
256 |
+
|
257 |
+
def convert_aste_text(aste_file: str) -> pd.DataFrame:
|
258 |
+
df = read_aste_file(aste_file)
|
259 |
+
df = df.explode("triples")
|
260 |
+
df = df.reset_index(drop=False)
|
261 |
+
df = df.merge(
|
262 |
+
df.apply(aste_row_to_character_indices, axis="columns"),
|
263 |
+
left_index=True,
|
264 |
+
right_index=True,
|
265 |
+
)
|
266 |
+
df = df.drop(columns=["triples"])
|
267 |
+
return df
|
268 |
+
|
269 |
+
|
270 |
+
def aste_row_to_character_indices(row: pd.Series) -> pd.Series:
|
271 |
+
try:
|
272 |
+
return pd.Series(
|
273 |
+
aste_to_character_indices(
|
274 |
+
triplet=row.triples,
|
275 |
+
text=row.text,
|
276 |
+
)
|
277 |
+
)
|
278 |
+
except:
|
279 |
+
print(f"failed to process row {row.name}")
|
280 |
+
print(row)
|
281 |
+
raise
|
282 |
+
|
283 |
+
|
284 |
+
def is_sequential(span: tuple[int]) -> bool:
|
285 |
+
return all(span[index + 1] - span[index] == 1 for index in range(len(span) - 1))
|
286 |
+
|
287 |
+
|
288 |
+
def aste_to_character_indices(
|
289 |
+
*,
|
290 |
+
triplet: tuple[tuple[int], tuple[int], str],
|
291 |
+
text: str,
|
292 |
+
) -> CharacterIndices:
|
293 |
+
def to_indices(span: tuple[int]) -> tuple[int, int]:
|
294 |
+
word_start = span[0]
|
295 |
+
word_start_span = word_indices[word_start]
|
296 |
+
|
297 |
+
word_end = span[-1]
|
298 |
+
word_end_span = word_indices[word_end]
|
299 |
+
|
300 |
+
return word_start_span.start_index, word_end_span.end_index - 1
|
301 |
+
|
302 |
+
aspect_span, opinion_span, sentiment = triplet
|
303 |
+
assert is_sequential(aspect_span), f"aspect span not sequential: {aspect_span}"
|
304 |
+
assert is_sequential(opinion_span), f"opinion span not sequential: {opinion_span}"
|
305 |
+
assert sentiment in {"POS", "NEG", "NEU"}, f"unknown sentiment: {sentiment}"
|
306 |
+
|
307 |
+
word_indices = [
|
308 |
+
WordSpan(start_index=match.start(), end_index=match.end())
|
309 |
+
for match in word_pattern.finditer(text)
|
310 |
+
]
|
311 |
+
|
312 |
+
aspect_start_index, aspect_end_index = to_indices(aspect_span)
|
313 |
+
aspect_term = text[aspect_start_index : aspect_end_index + 1]
|
314 |
+
opinion_start_index, opinion_end_index = to_indices(opinion_span)
|
315 |
+
opinion_term = text[opinion_start_index : opinion_end_index + 1]
|
316 |
+
|
317 |
+
nice_sentiment = {
|
318 |
+
"POS": "positive",
|
319 |
+
"NEG": "negative",
|
320 |
+
"NEU": "neutral",
|
321 |
+
}[sentiment]
|
322 |
+
|
323 |
+
return {
|
324 |
+
"aspect_start_index": aspect_start_index,
|
325 |
+
"aspect_end_index": aspect_end_index,
|
326 |
+
"aspect_term": aspect_term,
|
327 |
+
"opinion_start_index": opinion_start_index,
|
328 |
+
"opinion_end_index": opinion_end_index,
|
329 |
+
"opinion_term": opinion_term,
|
330 |
+
"sentiment": nice_sentiment,
|
331 |
+
}
|