Event-Extractor / app.py
lfcc's picture
streamlit error fix
3ad0902
raw
history blame
No virus
16.8 kB
import streamlit as st
from annotated_text import annotated_text
from transformers import AutoModelForQuestionAnswering, Trainer
import torch
from transformers import pipeline
from transformers import AutoModelForTokenClassification, AutoTokenizer
from transformers import AutoModelForQuestionAnswering
import json
st.set_page_config(layout="wide")
file = open("questions.json")
questions = json.load(file)
model = AutoModelForTokenClassification.from_pretrained("models/trigger/ACE_base_google_pt")
tokenizer = AutoTokenizer.from_pretrained("models/trigger/ACE_base_google_pt", model_max_length=512)
tagger = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy='first') #aggregation_strategy='max'
qa_model2_path = "models/argument/bert-base-portuguese-squad2-ace-impossible1.4"
qa_model_path = "models/argument/bert-base-portuguese-squad-ace-pt-pt"
model_qa = pipeline("question-answering", model=qa_model_path)
model_qa2 = AutoModelForQuestionAnswering.from_pretrained(qa_model2_path)
tokenizer_qa2 = AutoTokenizer.from_pretrained(qa_model2_path)
def aggregate_subwords(input_tokens, labels):
new_inputs = []
new_labels = []
current_word = ""
current_label = ""
for i, token in enumerate(input_tokens):
label = labels[i]
# Handle subwords
if token.startswith('##'):
current_word += token[2:]
else:
# Finish previous word
if current_word:
new_inputs.append(current_word)
new_labels.append(current_label)
# Start new word
current_word = token
current_label = label
new_inputs.append(current_word)
new_labels.append(current_label)
return new_inputs, new_labels
def annotateTriggers(line):
line = line.strip()
inputs = tokenizer(line, return_tensors="pt")
input_tokens = tokenizer.convert_ids_to_tokens(inputs['input_ids'][0])
with torch.no_grad():
logits = model(**inputs).logits
predictions = torch.argmax(logits, dim=2)
predicted_token_class = [model.config.id2label[t.item()] for t in predictions[0]]
input_tokens, predicted_token_class = aggregate_subwords(input_tokens,predicted_token_class)
token_labels = []
current_entity = ''
for i, label in enumerate(predicted_token_class):
token = input_tokens[i]
if label == 'O':
token_labels.append((token, 'O', ''))
current_entity = ''
elif label.startswith('B-'):
current_entity = label[2:]
token_labels.append((token, 'B', current_entity))
elif label.startswith('I-'):
if current_entity == '':
raise ValueError(f"Invalid label sequence: {predicted_token_class}")
token_labels[-1] = (token_labels[-1][0] + f" {token}", 'I', current_entity)
else:
raise ValueError(f"Invalid label: {label}")
return token_labels[1:-1]
n_best_size = 20
max_answer_length = 30
max_length = 384 # The maximum length of a feature (question and context)
doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed.
squad_v2 = True
pad_on_right = tokenizer.padding_side == "right"
def prepare_validation_features(examples):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
examples["question"] = [q.lstrip() for q in examples["question"]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples["question" if pad_on_right else "context"],
examples["context" if pad_on_right else "question"],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_length,
stride=doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# We keep the example_id that gave us this feature and we will store the offset mappings.
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
from tqdm.auto import tqdm
import numpy as np
import collections
def softmax(x):
return(np.exp(x)/np.exp(x).sum())
def normalizeScores(answers,min_null_score):
scores = [a["score"] for a in answers] + [min_null_score]
scores = softmax(scores)
for i in range(len(answers)):
answers[i]["score"] = scores[i]
return answers, scores[-1]
def postprocess_qa_predictions(examples, features, raw_predictions, n_best_size = 20, max_answer_length = 30):
all_start_logits, all_end_logits = raw_predictions
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
predictions = collections.OrderedDict()
# Logging.
print(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_score = None # Only used if squad_v2 is True.
valid_answers = []
context = example["context"]
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Update minimum null prediction.
cls_index = features[feature_index]["input_ids"].index(tokenizer.cls_token_id)
feature_null_score = start_logits[cls_index] + end_logits[cls_index]
if min_null_score is None or min_null_score < feature_null_score:
min_null_score = feature_null_score
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
start_char = offset_mapping[start_index][0]
end_char = offset_mapping[end_index][1]
valid_answers.append(
{
"score": start_logits[start_index] + end_logits[end_index],
"text": context[start_char: end_char]
}
)
if len(valid_answers) > 0:
valid_answers, min_null_score = normalizeScores(valid_answers,min_null_score)
#print(valid_answers)
best_answer = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[0]
else:
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
best_answer = {"text": "", "score": 0.0}
# Let's pick our final answer: the best one or the null answer (only for squad_v2)
if not squad_v2:
predictions[example["id"]] = best_answer["text"]
else:
answer = best_answer if best_answer["score"] > min_null_score else {"text":"No answer","score":min_null_score}
predictions[example["id"]] = answer
return predictions
def qa(line,question):
d = {}
d["question"] = [question] #["Qual é o diametro do sol?"]
d["context"] = [line] #["O meu pai foi para Lisboa"]
d["id"] = ["my precious"]
from datasets import Dataset
dataset_validation = Dataset.from_dict(d)
validation_features = prepare_validation_features(d)
from datasets import Dataset
validation_features = Dataset.from_dict(validation_features)
trainer = Trainer(
model=model_qa2,
tokenizer=tokenizer_qa2,
)
raw_predictions = trainer.predict(validation_features)
validation_features.set_format(type=validation_features.format["type"], columns=list(validation_features.features.keys()))
final_predictions = postprocess_qa_predictions(dataset_validation, validation_features, raw_predictions.predictions)
formatted_predictions = [{"id": k, "prediction_text": v["text"], "no_answer_probability": 0, "score":v["score"]} for k, v in final_predictions.items()]
r = {"context":line,"question":question,"answer":formatted_predictions[0]["prediction_text"],"score": formatted_predictions[0]["score"]}
return r
def joinEntities(entities):
joined_entities = []
i = 0
while i < len(entities):
curr_entity = entities[i]
if curr_entity['entity'][0] == 'B':
label = curr_entity['entity'][2:]
j = i + 1
while j < len(entities) and entities[j]['entity'][0] == 'I':
j += 1
joined_entity = {
'entity': label,
'score': max(e['score'] for e in entities[i:j]),
'index': min(e['index'] for e in entities[i:j]),
'word': ' '.join(e['word'] for e in entities[i:j]),
'start': entities[i]['start'],
'end': entities[j-1]['end']
}
joined_entities.append(joined_entity)
i = j - 1
i += 1
return joined_entities
import pysbd
seg = pysbd.Segmenter(language="es", clean=False)
def sent_tokenize(text):
return seg.segment(text)
def getSentenceIndex(lines,span):
i = 1
sum = len(lines[0])
while sum < span:
sum += len(lines[i])
i = i + 1
return i - 1
def generateContext(text, window,span):
lines = sent_tokenize(text)
index = getSentenceIndex(lines,span)
text = " ".join(lines[max(0,index-window):index+window +1])
return text
def annotateEvents(text,squad,window):
text = text.strip()
ner_results = tagger(text)
#print(ner_results)
#ner_results = joinEntities(ner_results)
i = 0
#exit()
while i < len(ner_results):
ner_results[i]["entity"] = ner_results[i]["entity_group"].lstrip("B-")
ner_results[i]["entity"] = ner_results[i]["entity_group"].lstrip("I-")
i = i + 1
events = []
for trigger in ner_results:
tipo = trigger["entity_group"]
qs = questions[tipo]
context = generateContext(text,window,trigger["start"])
event = {
"trigger":trigger["word"],
"type": tipo,
"score": trigger["score"],
"context": context,
"arguments":[]
}
print("=========", tipo, "===========")
for role, question in qs.items():
question = question.rstrip("?") + " em " + trigger["word"]+"?"
if squad == 1:
result = model_qa(question=question, context=context)
else:
result = qa( context,question)
print(f"Question: {question}, answer: '{result['answer']}', score: {round(result['score'], 4)}")
argument = {
"role": role,
"text": result['answer'],
"score": result['score'],
}
event["arguments"].append(argument)
events.append(event)
return events
#"A Joana foi atacada pelo João nas ruas do Porto, com uma faca."
st.title('Extract Events')
options = ["A Joana foi atacada pelo João nas ruas do Porto, com uma faca.","O Estado-Maior ucraniano informou também que a Rússia lançou 19 ataques aéreos e 37 ataques com sistemas de foguete de lançamento múltiplo no último dia, danificando cidades e aldeias e ferindo um número indeterminado de civis.","Pelos menos oito civis morreram numa série de ataques russos várias regiões da Ucrânia.","O comandante das Forças Armadas da Ucrânia declarou que a Rússia disparou 36 mísseis de cruzeiro contra o país na manhã de quinta-feira (16/02), um dia depois que seis balões foram vistos sobre a capital ucraniana, Kiev.","Disparados por terra e mar, os mísseis mataram uma mulher e atingiram infraestrutura crítica, segundo autoridades, que afirmaram também ter observado uma mudança na estratégia de guerra russa, em uma aparente referência aos balões avistados em Kiev.", "O João morreu na guerra do Iraque.", 'A maria foi eleita presidente da republica em Portugal.']
option = st.selectbox(
'Select examples',
options)
#option = options [index]
line = st.text_area("Insert Text",option)
st.button('Run')
st.sidebar.write("## Hyperparameters :gear:")
window = st.sidebar.slider('Context Window', 0, 10, 1)
if line != "":
st.header("Triggers:")
triggerss = annotateTriggers(line)
annotated_text(*[word[0]+" " if word[1] == 'O' else (word[0]+" ",word[2]) for word in triggerss ])
eventos_1 = annotateEvents(line,1,window)
eventos_2 = annotateEvents(line,2,window)
st.header("Arguments:")
c1, c2 = st.columns(2)
with c1:
st.subheader("bert-squad1-ace:")
with c2:
st.subheader("bert-squad2-ace:")
for mention1, mention2 in zip(eventos_1,eventos_2):
st.text("Trigger: " + mention1["trigger"] +" "+ mention1["type"] +" " +str(round(mention1["score"],3)))
st.text("Context: "+ mention1["context"])
col1, col2 = st.columns(2)
with col1:
for d in mention1["arguments"]:
text = d['text']
role = d['role']
#score = d['score']
annotated_text((text, role), " ", str(round(d['score'],3)))
st.write('\n')
with col2:
for d in mention2["arguments"]:
text = d['text']
role = d['role']
#score = d['score']
annotated_text((text, role), " ", str(round(d['score'],3)))
st.write('\n')
st.markdown("""---""")