jattokatarratto's picture
Update app.py
8a237d2 verified
raw
history blame
125 kB
import os
from transformers import file_utils
print(file_utils.default_cache_path)
import pandas as pd
from tqdm import tqdm
from gliner import GLiNER
import logging
import time
from transformers import pipeline, AutoTokenizer
from transformers.pipelines.pt_utils import KeyDataset
from concurrent.futures import ThreadPoolExecutor, as_completed
##os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
#os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
import torch
#torch.cuda.empty_cache() # Clear cache ot torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device: {device}...")
if device.type == "cuda":
print("GPU number:", torch.cuda.current_device())
import datasets
import argparse
import json
import random
import numpy as np
import requests
from langchain.text_splitter import TokenTextSplitter
from virtuosoQueryRest import sparqlQuery
import gradio as gr
import re
from common import strtobool, split_camel_case, chunk_tokens, update_nested_dict, cleanInputText, token_counter, encoding_getter, extract_words, all_words_in_list, row_to_dict_string, rescale_exponential_to_logarithmic
examples = [
["He said the disease was 1st detected to have spread to Malaysia from Africa in 1997. " , None],
["He said the disease which was 1st detected in Johor had spread to Negeri Sembilan, Melaka, Perak, Selangor and the latest Kedah. He said the disease was 1st detected to have spread to Malaysia from Africa in 1997. ", None],
["The Health Ministry has detected about 4000 suspected chikungunya cases nationwide this year [2008], Minister Datuk Liow Tiong Lai said Wednesday [17 Dec 2008]. ", None],
["The Health Ministry has detected about 4000 suspected chikungunya cases nationwide this year [2008], Minister Datuk Liow Tiong Lai said Wednesday [17 Dec 2008]. He said the disease which was 1st detected in Johor had spread to Negeri Sembilan, Melaka, Perak, Selangor and the latest Kedah. \"So far, the chikungunya disease is still under control nationwide,\" he told reporters after visiting Sultanah Nur Zahirah Hospital here. Present was Terengganu Health Director Dr. Nordiyanah Hassan. Liow said that so far, there is no specific medicine to treat the chikungunya fever disease spread by _Aedes_ mosquito. \"So, I would like to call on the public to be careful particularly during the wet season now because _Aedes_ mosquito is easy to breed,\" he said. To contain the spread of the disease, he said, the ministry had taken several measures including intensifying the campaign to rid of _Aedes_ mosquito and holding lectures on the outbreak. He said the disease was 1st detected to have spread to Malaysia from Africa in 1997. Meanwhile, he said 63 health projects costing RM458 million [USD 131 230 211] had been approved for implementation in Terengganu under the Ninth Malaysia Plan and some had started.", None],
["Carcinoma", None],
["The doctor diagnosed the patient with basal cell carcinoma, a common type of skin cancer.", None],
["West Nile virus", None],
["Legionellosis", None],
["Eight years ago I started with Fosamax for 3-4 years and then took Actonel. In March, I decided not to take Actonel any longer. I had been on it for too long and was fearful of esophageal cancer and bone breakage. Now my doctor wants me to take the Prolia injections, which I am not going to do. I am not going to continue with any drugs. My bone density recently done was in the minuses. I do work with a personal trainer and execise daily. I am searching for alternative ways to deal with this problem.", None],
["Does Chicago have any stores and does Joe live here?", None],
["Cholera has been reported every week since November 1994. By 5 November 1995 at total of 12,344 with 245 deaths have been notified. Of these, 879 cases with 4 deaths were reported for the period 9 October to 5 November 1995. Control efforts have not succeeded in preventing the spread of the epidemic and when cases were detected on Sao Nicolau and Sal Islands in the period 9 October to 5 November all nine inhabited islands of Cap Verde had become infected. The last cholera epidemic in Cap Verde occurred in 1979. (See also Weekly Epidemiological Record No. 44, 3 November 1995) Côte d'Ivoire: A cholera outbreak which started in September 1995 caused 2,027 cases and 150 deaths up to 12 November 1995. The first cases were reported in Department de l'Ouest on 18 September 1995. Cases were subsequently reported in Department de Nord and most recently in Department du Centre and Department de Sud. The WHO Representative assisted in the organization of a team to visit the area and evaluate the situation as well as arranging for medical supplies. (1.12.95) Iran, Islamic Republic of,: Kordestan Province has been declared free of cholera. (1.12.95) Iraq: An outbreak of cholera reported from Sulaimaniyah Governorate in Northern Iraq has resulted in 519 cases, 264 of which have been confirmed, and 3 deaths to date. Vibrio cholerae O1 serotype Ogawa has been isolated. At the request of the Iraqi Ministry of Health, a WHO consultant has been sent to the area to assess and monitor the situation, provide guidance to the health authorities, and coordinate inputs by non-governmental organizations. WHO has also made available essential treatment supplies. An intensive media campaign to raise public awareness about essential preventive measures has been successful in containing the spread of the outbreak. (1.12.95) Senegal: Despite the fact that cholera has been endemic in countries bordering Senegal for the past two years, no cases were reported from Senegal until mid- August 1995. Between 15 August and 17 November 1995, 852 case and 43 deaths were notified. A further 731 cases with 37 deaths have been reported for the period 1 September to 12 November. Most cases were in the Departments of Dakar and Pikine in the Dakar Region and recently also Departments of Mbacke and Touba in Diourbel Region. ", None],
]
#models_List = ["FacebookAI/xlm-roberta-large-finetuned-conll03-english", "Babelscape/wikineural-multilingual-ner", "blaze999/Medical-NER", "urchade/gliner_large-v2.1", "urchade/gliner_large_bio-v0.1", "NCBO/BioPortal" ] # "urchade/gliner_large-v2.1", "knowledgator/gliner-multitask-large-v0.5"
models_List = ["Babelscape/wikineural-multilingual-ner", "urchade/gliner_large-v2.1", "NCBO/BioPortal" ] # "urchade/gliner_large-v2.1", "knowledgator/gliner-multitask-large-v0.5"
#models_List = ["NCBO/BioPortal" ]
categories_List = ["MED","LOC","PER","ORG","DATE","MISC"]
modelGliner=None
modelGlinerBio=None
num_cores_Gliner_forDemo = 0 # 0 means use the GPU for Gliner !
tokenizerGliner = AutoTokenizer.from_pretrained('microsoft/deberta-v3-large')
encod = encoding_getter('microsoft/deberta-v3-large')
text_splitter = TokenTextSplitter(
# separators=separators,
encoding_name=encod.name,
chunk_size=80000,
chunk_overlap=50,
length_function=len,
add_start_index=True,
)
pipe_dict = {}
for modelName in models_List:
tsk = "token-classification"
if (("/gliner" in modelName) == False) and (("NCBO" in modelName) == False):
pipe = pipeline(
tsk,
model=modelName,
aggregation_strategy="simple",
device=device,
)
pipe_dict[modelName] = pipe
elif ("/gliner" in modelName):
if not tokenizerGliner:
tokenizerGliner = AutoTokenizer.from_pretrained('microsoft/deberta-v3-large')
if "_bio-" in modelName:
if num_cores_Gliner_forDemo > 0:
modelGlinerBio = GLiNER.from_pretrained(modelName) # "urchade/gliner_large_bio-v0.1")
else:
modelGlinerBio = GLiNER.from_pretrained(modelName, map_location=device)
else:
if num_cores_Gliner_forDemo > 0:
modelGliner = GLiNER.from_pretrained(
modelName) # "knowledgator/gliner-multitask-large-v0.5" - "urchade/gliner_large-v2.1"
else:
modelGliner = GLiNER.from_pretrained(modelName, map_location=device)
def process_row_Gliner(args, tokenizerGliner, modelGlinerBio, modelGliner, glinerlabels, row):
context_to_annotate = row[args.source_column]
tokens = tokenizerGliner.tokenize(context_to_annotate)
entities = []
offset = 0
if "gliner_large_bio" in args.model_id:
max_chunk_length = modelGlinerBio.config.max_len
else:
max_chunk_length = modelGliner.config.max_len
for chunk in chunk_tokens(tokens, (max_chunk_length - 1)):
chunk_text = tokenizerGliner.convert_tokens_to_string(chunk)
if "gliner_large_bio" in args.model_id:
chunk_entities = modelGlinerBio.predict_entities(chunk_text, glinerlabels,
threshold=args.entities_filter_threshold)
else:
chunk_entities = modelGliner.predict_entities(chunk_text, glinerlabels,
threshold=args.entities_filter_threshold)
adjusted_entities = []
for entity in chunk_entities:
adjusted_entity = {
'text': entity['text'],
'score': entity['score'],
'start': entity['start'] + offset,
'end': entity['end'] + offset,
'label': entity['label']
}
adjusted_entities.append(adjusted_entity)
entities.extend(adjusted_entities)
offset += len(chunk_text)
if entities and isinstance(entities, list):
for d in entities:
d['entity_group'] = d.pop('label')
d['word'] = d.pop('text')
d['entity_group'] = d['entity_group'].upper()
return row.name, entities
def parallel_process_df_Gliner(args, df, tokenizerGliner, modelGlinerBio, modelGliner, glinerlabels):
results = []
if args.num_cores_Gliner > 0:
with ThreadPoolExecutor(max_workers=args.num_cores_Gliner) as executor:
futures = [
executor.submit(
process_row_Gliner, args, tokenizerGliner, modelGlinerBio, modelGliner, glinerlabels, row
)
for _, row in df.iterrows()
]
for future in tqdm(futures):
drm_idx, entities = future.result()
df.at[drm_idx, 'annotation'] = entities
else:
# Apply the function to each row and extract only the entities part
df['annotation'] = df.apply(
lambda row: process_row_Gliner(args, tokenizerGliner, modelGlinerBio, modelGliner, glinerlabels, row)[1],
axis=1
)
return df
def process_row_BioPortal_api(args, key_bioportal, row):
#context_to_annotate = row[args.source_column]
if isinstance(row, list) or isinstance(row, pd.Series):
context_to_annotate = row[args.source_column]
elif isinstance(row, str):
context_to_annotate = row
else:
raise ValueError("Unsupported type for row. Expected list or string.")
url=""
if getattr(args, 'KG_restriction', None):
# api call
if strtobool(args.debug):
print("--- BIOPORTAL: " + context_to_annotate)
# args.KG_restriction exists and is not empty
if strtobool(args.debug):
print("KG_restriction is provided and not empty:", args.KG_restriction)
onto_clauses = ""
for choice in args.KG_restriction:
if choice.upper() == "SNOMEDCT":
choice="SNOMED"
elif choice.upper() == "OBOREL":
choice = "RO"
elif choice.upper() == "PTRANS":
choice = "TRANS"
elif choice.upper() == "FOODON":
choice = "FoodOn"
elif choice.upper() == "GEOSPARQL":
choice = "GeoSPARQL"
elif choice.upper() == "NCBITAXON":
choice = "NCBITAXON,NCBITaxon_"
onto_clauses=onto_clauses+choice+","
if onto_clauses and onto_clauses[-1] == ",":
onto_clauses=onto_clauses[:-1]
url = f"https://services.data.bioontology.org/annotatorplus/?text={context_to_annotate}&ontologies={onto_clauses}&longest_only=true&exclude_numbers=true&whole_word_only=true&exclude_synonyms=false&negation=false&experiencer=false&temporality=false&score_threshold=0&confidence_threshold=0&display_links=false&display_context=false&score=cvalue&apikey={key_bioportal}"
else:
# args.KG_restriction does not exist or is empty
if strtobool(args.debug):
print("--- BIOPORTAL: " + context_to_annotate)
print("KG_restriction is not provided or empty - Consider all the KGs")
url = f"https://services.data.bioontology.org/annotatorplus/?text={context_to_annotate}&ontologies=AEO,BFO,BIM,BCGO,CL,CHIRO,CHEBI,DCM,FMA,GO,GENO,GEOSPARQL,HL7,DOID,HP,HP_O,IDO,IAO,ICD10,LOINC,MESH,MONDO,NCIT,NCBITAXON,NIFCELL,NIFSTD,GML,OBCS,OCHV,OHPI,OPB,PTRANS,PLOSTHES,RADLEX,OBOREL,STY,SO,SNOMEDCT,STATO,SYMP,FOODON,UBERON,VO&longest_only=true&exclude_numbers=true&whole_word_only=true&exclude_synonyms=false&negation=false&experiencer=false&temporality=false&score_threshold=0&confidence_threshold=0&display_links=false&display_context=false&score=cvalue&apikey={key_bioportal}"
response = requests.get(url)
try:
data = response.json()
dff = pd.DataFrame(data)
dff = dff.drop(columns=['hierarchy', 'mappings'])
# If the columns are dictionary-like, use pd.json_normalize:
expanded_annotated_class = pd.json_normalize(dff['annotatedClass'])
expanded_annotations = pd.DataFrame(dff['annotations'].tolist(), index=dff.index)
expanded_annotations = pd.json_normalize(expanded_annotations[0])
# Join the expanded columns back to the original DataFrame
df_expanded = dff.drop(columns=['annotatedClass', 'annotations']).join(expanded_annotated_class).join(
expanded_annotations)
# Snomed id replacement because in our internal knolwedgebase we have this base uri
df_expanded['@id'] = df_expanded['@id'].str.replace(
"http://purl.bioontology.org/ontology/SNOMEDCT/",
"http://snomed.info/id/"
)
return df_expanded
except Exception as err:
logging.error(
f'ERROR ON BioPortal Annotator API Call\n\tError: {err}\n TextToAnnotate: {context_to_annotate}\n Have a check...')
return pd.DataFrame() # empty dataframe
def annotate(df, args, pipeInner, tokenizerGliner, modelGliner, modelGlinerBio, device="cpu"):
if strtobool(args.debug):
print("\nAnnotate using " + args.model_id)
print("device=" + str(device))
startAnnotate = time.time()
if "gliner" in args.model_id:
df['model'] = args.model_id
df['annotation'] = None
glinerlabels = ["location", "disease", "date", "numerical value", "number"]
# Parallel CPU computation for Gliner:
df = parallel_process_df_Gliner(args, df, tokenizerGliner, modelGlinerBio, modelGliner, glinerlabels)
# for drm_idx, row in tqdm(df.iterrows()):
# context_to_annotate = row[args.source_column]
#
# # Tokenize the text
# tokens = tokenizerGliner.tokenize(context_to_annotate)
#
# # Process each chunk and predict entities
# entities = []
# offset = 0 # Initialize the offset
#
# if "gliner_large_bio" in args.model_id:
# maxchunckslen=modelGlinerBio.config.max_len
# else:
# maxchunckslen = modelGliner.config.max_len
# for chunk in chunk_tokens(tokens, (maxchunckslen - 1)):
# # Convert tokens back to text for the chunk
# chunk_text = tokenizerGliner.convert_tokens_to_string(chunk)
# # Predict entities for the chunk
# if "gliner_large_bio" in args.model_id:
# chunk_entities = modelGlinerBio.predict_entities(chunk_text, glinerlabels, threshold=args.entities_filter_threshold)
# else:
# chunk_entities = modelGliner.predict_entities(chunk_text, glinerlabels, threshold=args.entities_filter_threshold)
#
# # Adjust the start and end positions of entities to reflect their positions in the original text
# adjusted_entities = []
# for entity in chunk_entities:
# adjusted_entity = {
# 'text': entity['text'],
# 'score': entity['score'],
# 'start': entity['start'] + offset,
# 'end': entity['end'] + offset,
# 'label': entity['label']
# }
# adjusted_entities.append(adjusted_entity)
#
# # Append adjusted entities to all_entities
# entities.extend(adjusted_entities)
#
# # Update the offset for the next chunk by adding the length of the current chunk
# offset += len(chunk_text)
#
# # Now `all entities` contains all entities with adjusted positions
# if entities and isinstance(entities, list):
#
# # if strtobool(args.debug):
# # for entity in entities:
# # print(entity["text"], "=>", entity["label"])
#
# for d in entities:
# d['entity_group'] = d.pop('label') # Change 'label' to 'entity_group'
# d['word'] = d.pop('text') # Change 'text' to 'word'
# d['entity_group'] = d['entity_group'].upper()
#
# df.at[drm_idx, 'annotation'] = entities
df_annot = df.explode('annotation').dropna(subset=['annotation']).reset_index(drop=True)
elif "NCBO" in args.model_id: #NCBO/BioPortal" annotator
#https://data.bioontology.org/documentation#nav_annotator
#https://bioportal.bioontology.org/annotatorplus
#key_bioportal = ""
#if args.bioportalkey_filename:
# fkeyname = args.bioportalkey_filename
# with open(fkeyname) as f:
# key_bioportal = f.read()
key_bioportal = os.environ['key_bioportal']
df_annot = pd.DataFrame()
for drm_idx, row in tqdm(df.iterrows()):
df_BioPortalAnnotation=process_row_BioPortal_api(args, key_bioportal, row)
if not df_BioPortalAnnotation.empty:
df_BioPortalAnnotation = df_BioPortalAnnotation.sort_values(
by=['from', 'text', 'score', 'matchType'], ascending=[True, True, False, False])
df_biop_minimised = df_BioPortalAnnotation.copy()
# Group the dataframe by "from" and "to" columns
grouped_biop = df_biop_minimised.groupby(['from', 'to'])
# Get the index of the row with the maximum score in each group
idx_biop = grouped_biop['score'].idxmax()
# Filter the dataframe using these indices
df_max_score_biop = df_biop_minimised.loc[idx_biop]
# Optional: Reset index if you want a clean index
df_max_score_biop = df_max_score_biop.reset_index(drop=True)
# Create the "pippo" column by grouping and collecting "@id" values
pippo_lists = grouped_biop['@id'].apply(list).reset_index()
# Merge the lists back into the df_max_score_biop by 'from' and 'to'
df_max_score_biop = df_max_score_biop.merge(pippo_lists, on=['from', 'to'])
# Rename the merged column to "pippo"
df_max_score_biop = df_max_score_biop.rename(columns={'@id_x': '@id'})
df_max_score_biop = df_max_score_biop.rename(columns={'@id_y': 'ALLURIScontextFromNCBO'})
# The df_max_score_biop now contains the "pippo" column with lists of "@id" values.
# Filter the dataframe to keep only rows where the score is greater than 3.0
df_max_score_biop = df_max_score_biop[df_max_score_biop['score'] > 3.0]
# Specify the columns you want to keep
columns_to_keep = ["score", "from", "to", "prefLabel", "text", "semantic_groups", "@id", "ALLURIScontextFromNCBO"]
# Subset the dataframe to keep only the specified columns
df_max_score_biop = df_max_score_biop[columns_to_keep]
# Rename the specified columns
df_max_score_biop = df_max_score_biop.rename(columns={"from": "start", "to": "end", "text": "word", "semantic_groups": "entity_group"})
# Optional: Reset index if you want a clean index after filtering
df_max_score_biop = df_max_score_biop.reset_index(drop=True)
df_max_score_biop['score'] = df_max_score_biop['score'].round(2)
# Each row of "semantic_groups" --> 'entity_group' is a list of string ... keep only the first part if not NaN
# df_max_score_biop['entity_group'] = df_max_score_biop['entity_group'].apply(lambda x: x[0] if pd.notna(x).all() else x)
df_max_score_biop['entity_group'] = df_max_score_biop['entity_group'].apply(lambda x: x[0] if isinstance(x, list) and len(x) > 0 else (np.nan if x is None or (isinstance(x, float) and pd.isna(x)) else x))
#df_max_score_biop = df_max_score_biop.dropna(subset=['entity_group'])
all_empty_or_nan_or_empty_string = df_max_score_biop['entity_group'].replace('', pd.NA).isna().all()
if not all_empty_or_nan_or_empty_string:
#print("Is the 'entity_group' column completely NaN, None, or empty strings?", all_empty_or_nan_or_empty_string)
# Identify the minimum score
min_score_biop = df_max_score_biop['score'].min()
# Apply filter to find rows where entity_group is None and score is the minimum one
conditionBiop = (df_max_score_biop['entity_group'].isna()) & (df_max_score_biop['score'] == min_score_biop)
df_max_score_biop = df_max_score_biop[~conditionBiop]
# Replace all NaN values in 'entity_group' with 'BIOP'
df_max_score_biop['entity_group'] = df_max_score_biop['entity_group'].fillna('BIOP')
# To delete the columns "start" and "end"
#df_max_score_biop = df_max_score_biop.drop(columns=['start', 'end', 'word', 'entity_group', 'score', 'prefLabel'])
if not df_max_score_biop.empty:
row_df = pd.DataFrame([row] * len(df_max_score_biop), columns=row.index)
row_df['model'] = args.model_id #'NCBO/BioPortal'
df_max_score_biop = pd.concat([row_df.reset_index(drop=True), df_max_score_biop.reset_index(drop=True)],
axis=1)
df_annot = pd.concat([df_annot, df_max_score_biop], ignore_index=True)
#here I finish the for cycle
if not df_annot.empty:
mmax_score = df_annot['score'].max()
mmin_score = df_annot['score'].min()
if mmax_score == mmin_score:
df_annot['score'] = 0.3
df_annot.loc[df_annot['score'].notnull(), 'score'] = 0.7
else:
# Apply the transformation function
#df_annot = rescale_exponential_to_linear(df_annot, 'score', new_min=0.5, new_max=1.0)
df_annot = rescale_exponential_to_logarithmic(df_annot, 'score', new_min=0.7, new_max=1.0)
columnsDict = ['start', 'end', 'word', 'entity_group', 'score', 'prefLabel']
df_annot['annotation'] = df_annot.apply(row_to_dict_string, axis=1, columnsDict=columnsDict)
# Convert JSON string to a dictionary, if needed
df_annot['annotation'] = df_annot['annotation'].apply(
lambda x: json.loads(x) if isinstance(x, str) else x
)
df_annot = df_annot.drop(columns=columnsDict)
else:
HF_dataset = datasets.Dataset.from_pandas(pd.DataFrame(data=df))
# tsk = "token-classification"
#
# pipe = pipeline(
# tsk,
# model=args.model_id,
# aggregation_strategy="simple",
# device=device,
# )
if strtobool(args.debug):
print('Annotating texts...')
annotated_texts = [
# out for out in tqdm(pipe(KeyDataset(HF_dataset, args.source_column), batch_size=args.batch_size))
# out for out in tqdm(ner_pipeline(KeyDataset(HF_dataset, args.source_column), batch_size=args.batch_size))
out for out in tqdm(pipeInner(KeyDataset(HF_dataset, args.source_column), batch_size=args.batch_size))
]
if strtobool(args.debug):
print('looping annotations...')
df['model'] = args.model_id
df['annotation'] = annotated_texts
df_annot = df.explode('annotation').dropna(subset=['annotation']).reset_index(drop=True)
# # Initialize an empty dataframe with the same columns as df
# df_annot = pd.DataFrame(columns=df.columns.tolist()) #+ ['model', 'annotation'])
#
# for drm_idx, txt_ents in enumerate(tqdm(annotated_texts)):
#
# filtered_entities = get_filtered_entities(txt_ents)
#
# for x_ent in filtered_entities:
# # Create a new row with the annotation
# new_row = df.iloc[drm_idx].copy()
# new_row['model'] = args.model_id
# new_row['annotation'] = x_ent
# df_annot = pd.concat([df_annot, new_row.to_frame().T], ignore_index=True) # Append the new row to the dataframe
if strtobool(args.debug):
endAnnotate = time.time()
hours, rem = divmod(endAnnotate - startAnnotate, 3600)
minutes, seconds = divmod(rem, 60)
print("...end annotation - Time... {:0>2}:{:0>2}:{:05.2f}\n".format(int(hours), int(minutes), seconds))
print('\n')
return df_annot
def default_serializer(obj):
if isinstance(obj, np.floating):
return float(obj)
raise TypeError(f"Object of type {obj.__class__.__name__} is not JSON serializable")
def is_json(myjson):
try:
# json.loads(myjson, default=default_serializer)
json.dumps(myjson, default=default_serializer)
except ValueError as e:
return False
return True
def is_cross_inside(df_sorted, args, valuecutCross=0.75):
df_sorted['IsCrossInside'] = 0
df_sorted = df_sorted.reset_index(drop=True) # I reset the index so that I can loop on inner loop inside
# this works but it is absolutely too heavy!!! Got: mask1 = (SentenceRef_array[:, None] == SentenceRef_array) & \
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# numpy.core._exceptions._ArrayMemoryError: Unable to allocate 73.4 TiB for an array with shape (8984940, 8984940) and data type bool
#
# # Assuming df_sorted is your DataFrame
# df_sorted['IsCrossInside'] = 0
#
# # Convert necessary columns to numpy arrays
# SentenceRef_array = df_sorted['SentenceRef'].to_numpy()
# start_array = df_sorted['start'].to_numpy()
# end_array = df_sorted['end'].to_numpy()
# score_array = df_sorted['score'].to_numpy()
# ToLink_array = df_sorted['ToLink'].to_numpy()
#
# # Identify NaN or empty strings in ToLink array
# toLink_nan_or_empty = pd.isna(ToLink_array) | (ToLink_array == '')
#
# # Create the mask for the first set of conditions
# mask1 = (SentenceRef_array[:, None] == SentenceRef_array) & \
# (start_array[:, None] <= start_array) & \
# (end_array[:, None] > start_array) & \
# (end_array[:, None] <= end_array) & \
# (score_array[:, None] < score_array) & \
# toLink_nan_or_empty[:, None]
#
# # Create the mask for the second set of conditions
# mask2 = (SentenceRef_array[:, None] == SentenceRef_array) & \
# (end_array[:, None] >= end_array) & \
# (start_array[:, None] < end_array) & \
# (start_array[:, None] >= start_array) & \
# (score_array[:, None] < score_array) & \
# toLink_nan_or_empty[:, None]
#
# # Combine the masks
# mask = mask1 | mask2
#
# # Aggregate the masks and set 'IsCrossInside' where the condition is True
# df_sorted.loc[mask.any(axis=1), 'IsCrossInside'] = 1
# df_sorted['IsCrossInside'] = 0
#
# #questo for e' altamente inefficiente...guardare per alternative piu' ottimizzate!!!
# for index, row in df_sorted.iterrows():
#
# print(row)
#
# mask = (df_sorted['SentenceRef'] == row['SentenceRef']) & (df_sorted['start'] <= row['start']) & (df_sorted['end'] > row['start']) & (
# df_sorted['end'] <= row['end']) & (df_sorted['score'] < row['score']) & (df_sorted['ToLink'].isnull() | (df_sorted['ToLink'] == ''))
# mask |= (df_sorted['SentenceRef'] == row['SentenceRef']) & (df_sorted['end'] >= row['end']) & (df_sorted['start'] < row['end']) & (
# df_sorted['start'] >= row['start']) & (df_sorted['score'] < row['score']) & (df_sorted['ToLink'].isnull() | (df_sorted['ToLink'] == ''))
#
# df_sorted.loc[mask, 'IsCrossInside'] = 1
# Iterate over the DataFrame with an outer loop - I know that df_sorted is ordered by 'SentenceRef' and 'start'
indexes_list = []
scores_list = []
IsToLinkContained = []
for i, row_outer in tqdm(df_sorted.iterrows()):
#print(row_outer)
# if (i>0) and (df_sorted.iloc[i-1]['SentenceRef'] == row_outer['SentenceRef']): # if the previous row is of the same SenteceRef, it has been alredy computed and I skip
# continue
if len(indexes_list)==0:
scores_list.append(row_outer['score'])
indexes_list.append(i)
if (pd.isnull(row_outer['ToLink']==False) and len(row_outer['ToLink'])>0):
IsToLinkContained.append(True)
else:
IsToLinkContained.append(False)
# if (pd.isnull(row_outer['ToLink']) or row_outer['ToLink'] == ''):
# IsToLinkContained.append(False)
# elif len(row_outer['ToLink'])>0:
# IsToLinkContained.append(True)
else:
if i in indexes_list:
if (i == indexes_list[-1]):
# indexes_list = []
# scores_list = []
# IsToLinkContained = []
indexes_list = [indexes_list[-1]]
scores_list = [scores_list[-1]]
IsToLinkContained = [IsToLinkContained[-1]]
else:
continue
# Inner loop only needs to consider rows starting from the current position
for j in range(i + 1, len(df_sorted)):
#for j in range(0, len(df_sorted)):
# if i==j: continue
#print(j)
row_inner = df_sorted.iloc[j]
# Break the inner loop if SentenceRef changes (due to sorting)
if row_inner['SentenceRef'] != row_outer['SentenceRef']:
break
elif row_inner['start'] >= row_outer['end']:
break
else:
scores_list.append(row_inner['score'])
indexes_list.append(j)
if (pd.isnull(row_inner['ToLink'] == False) and len(row_inner['ToLink']) > 0):
IsToLinkContained.append(True)
else:
IsToLinkContained.append(False)
if len(indexes_list)>1:
first_true_index = -1
try:
first_true_index = IsToLinkContained.index(True)
#print(f"The first index with value True is: {first_true_index}")
except ValueError:
first_true_index = -1
#print("No True value found in the list")
topinlist=-1
if first_true_index >=0:
topinlist = first_true_index
else:
topinlist = scores_list.index(max(scores_list))
#print(f"The position with the maximum score is: {topinlist}")
if topinlist >= 0:
for xx in range(0, len(indexes_list)):
if xx == topinlist:
continue
df_sorted.at[indexes_list[xx], 'IsCrossInside'] = 1
else:
indexes_list = []
scores_list = []
IsToLinkContained = []
#Now I want to delete all the words that are contained within another . For example I want to drop "osis" from Legionellosis
if not df_sorted.empty:
# punctuation_and_space = set(string.punctuation + ' ')
# condition = df_sorted.apply(lambda row:
# (row['IsCrossInside'] == 0) or
# ((row['IsCrossInside'] == 1) and
# ((row[args.source_column][row['start'] - 1] in punctuation_and_space) if row[
# 'start'] - 1 >= 0 else True) and
# ((row[args.source_column][row['end']] in punctuation_and_space) if row['end'] <= len(
# row[args.source_column]) else True)),
# axis=1)
condition = df_sorted.apply(lambda row:
(row['IsCrossInside'] == 0) or
( (row['IsCrossInside'] == 1) and (row['score'] >=valuecutCross )),
axis=1)
# Filter the DataFrame to keep only rows where the condition is False
df_sorted = df_sorted[condition]
return df_sorted
def entitiesFusion(df_annotated, args):
if strtobool(args.debug):
print("\nStart entities fusion and filtering ...")
areJson = df_annotated["annotation"].apply(is_json)
if False in areJson.unique():
for idxr, rr in df_annotated.iterrows():
# for idxr in range(df_annotated["annotation"].shape[0]):
if areJson[idxr] == False:
print("PROBLEM WITH JSON AT INDEX " + str(idxr) + ":\n" + df_annotated["annotation"][idxr])
replacement_empty_myjson = '{\"entity_group\": \"\", \"score\": \"\", "word": \"\", \"start\": \"\", \"end\": \"\"}' # {'entity_group': 'ORG', 'score': 0.9999951, 'word': 'Health Ministry', 'start': 4, 'end': 19}
df_annotated.at[idxr, "annotation"] = replacement_empty_myjson
print(" ...... Then replacing it with empty JSON --> " + df_annotated["annotation"][idxr])
try:
# df_extract = df_annotated.apply(lambda x: pd.Series(
# json.loads(x['annotation'], default=default_serializer).values(),
# index=json.loads(x['annotation'], default=default_serializer).keys()), axis=1)
df_extract = df_annotated.apply(lambda x: pd.Series(x['annotation'].values(),
index=x['annotation'].keys()), axis=1)
# Check if '@id' column exists in df_Extract
if '@id' in df_extract.columns:
# Drop the '@id' column
df_extract = df_extract.drop(columns='@id')
df_annotated = pd.merge(df_annotated, df_extract, left_index=True, right_index=True)
except Exception as err:
logging.error(
f'FAILED to extract json results\n\tError: {err}\nLeaving it as a single column then and not decompressing! Have a check...')
#delete all the rows with score smaller than entities_filter_threshold:
if args.entities_filter_threshold > 0:
df_annotated = df_annotated[df_annotated['score'] >= args.entities_filter_threshold]
if df_annotated.empty:
return df_annotated
# #delete all the rows where the concept that was asked to link, in the column "ToLink", is present and it is different from the column "word", that was recognised
# df_annotated = df_annotated[(df_annotated['ToLink'] == df_annotated['word']) | df_annotated['ToLink'].isna()]
# in all the rows having a value not null for the column "ToLink", compare this value to that of the column "word". If they are different, set the value in "ToLink" to None
df_annotated.loc[
(~df_annotated['ToLink'].isnull()) & (df_annotated['ToLink'] != df_annotated['word']), 'ToLink'] = None
# now fill all the values of the column "toLink" that are empty with the values of the row "word":
# df_annotated['ToLink'] = df_annotated['ToLink'].fillna(df_annotated['word'])
if "IsGeo" not in df_annotated.columns:
#df_annotated["IsGeo"] = None # 0
df_annotated.loc[:, "IsGeo"] = None
if "IsBio" not in df_annotated.columns:
#df_annotated["IsBio"] = None # 0
df_annotated.loc[:, "IsBio"] = None
df_annotated.loc[df_annotated['entity_group'] == 'LOCATION', 'entity_group'] = "LOC"
df_annotated.loc[df_annotated['entity_group'] == 'LOC', 'IsGeo'] = 1
#df_annotated.loc[df_annotated['entity_group'] == 'DISEASE', 'IsBio'] = 1
df_annotated.loc[df_annotated['entity_group'].str.lower().str.contains('disease'), 'IsBio'] = 1
df_annotated.loc[(df_annotated['model'].str.contains('Medical-NER')) & (
df_annotated['entity_group'].isin(['LOC', 'DATE', 'PER', 'ORG', 'DOSAGE', 'LAB_VALUE', 'DURATION']) == False), 'IsBio'] = 1
df_annotated.loc[(df_annotated['model'].str.contains('NCBO')) & (
df_annotated['entity_group'].isin(['CONC']) == False), 'IsBio'] = 1
# !!! THIS CHECK HAS BEEN PLACED HERE BECAUSE NCBO Bioportal put Start+1 !!! SO I NEED TO DROP A ONE TO DROP DUPLICATES AND MAX SCORES
df_annotated.loc[df_annotated['model'].str.lower().str.contains('ncbo'), 'start'] -= 1
# !!! THIS CHECK HAS BEEN PLACED HERE BECAUSE MEDICAL-NER PUT A VALUE OF START WHICH IS THE REAL ONE MINUS ONE, IN THE CASE THAT THE IDENTIFIED ENTITY STARTS WITH A SPACE!!! SO I NEED TO ADD A ONE TO DROP DUPLICATES AND MAX SCORES
df_annotated.loc[(df_annotated['model'] == 'blaze999/Medical-NER') &
df_annotated.apply(lambda row: row[args.source_column][row['start']] == ' ',
axis=1), 'start'] += 1
# !!! THIS CHECK HAS BEEN PLACED HERE BECAUSE GLINER MODELS PUT A VALUE OF START and of END WHICH IS THE REAL ONE MINUS ONE, IN THE CASE THAT THE IDENTIFIED ENTITY STARTS WITH A SPACE!!! SO I NEED TO ADD A ONE to START and END TO DROP DUPLICATES AND MAX SCORES
df_annotated.loc[df_annotated['model'].str.lower().str.contains('gliner') &
df_annotated.apply(lambda row: row[args.source_column][row['start']] == ' ',
axis=1), 'end'] += 1
df_annotated.loc[df_annotated['model'].str.lower().str.contains('gliner') &
df_annotated.apply(lambda row: row[args.source_column][row['start']] == ' ',
axis=1), 'start'] += 1
#### Here I drop all the identified NER words which are strictly contained in other words
# Apply first the function to the "args.source_column" to create lists of words
df_annotated['extracted_words'] = df_annotated[args.source_column].apply(extract_words,putInLower=True)
# Filter the DataFrame
df_annotated = df_annotated[df_annotated.apply(lambda row: all_words_in_list(row['word'], row['extracted_words'], putInLower=True), axis=1)]
# drop the 'extracted_words' column afterwards:
df_annotated = df_annotated.drop(columns=['extracted_words'])
#####
# This operation sorts the DataFrame by the "ToLink" column in descending order, with null values at the end, and then uses the drop_duplicates method to drop all duplicate rows,
# except the first one, based on all columns except "ToLink".
# This way, it will keep the rows with the non-null value in "ToLink" if there are multiple rows with the same values in all columns except "ToLink".
df_annotated = df_annotated.sort_values(by='ToLink', ascending=False, na_position='last')
for col in df_annotated.columns:
if df_annotated[col].apply(lambda x: isinstance(x, dict)).any():
if strtobool(args.debug):
print(
f"Column '{col}' contains dictionaries...converting it to strings otherwise it will not work the concat etc..")
df_annotated[col] = df_annotated[col].apply(lambda x: str(x))
df_annotated = df_annotated.drop_duplicates(subset=[col for col in df_annotated.columns if
col != 'ToLink' and col != 'ALLURIScontextFromNCBO' and not df_annotated[col].apply(
lambda x: isinstance(x, dict)).any()], keep='first')
# df_annotated = df_annotated.loc[df_annotated.groupby(['SentenceRef', 'ToLink', args.source_column, 'end', 'start', df_annotated['word'].str.lower()])['score'].idxmax()]
# df_annotated = df_annotated.loc[df_annotated.groupby(['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower()])['score'].idxmax()]
df_annotated_Geo = df_annotated.loc[df_annotated.groupby(
['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower(), 'IsGeo'])[
'score'].idxmax()]
df_annotated_Bio = df_annotated.loc[df_annotated.groupby(
['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower(), 'IsBio'])[
'score'].idxmax()]
df_annotated_all = df_annotated.loc[
df_annotated.groupby(['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower()])[
'score'].idxmax()]
# now you can concat
df_annotated_combined = pd.concat([df_annotated_Geo, df_annotated_Bio, df_annotated_all])
df_annotated_combined = df_annotated_combined.drop_duplicates(subset=[col for col in df_annotated_combined.columns if
col != 'ToLink' and col != 'ALLURIScontextFromNCBO' and not df_annotated_combined[col].apply(
lambda x: isinstance(x, dict)).any()], keep='first')
# df_annotated_combined['IsBioGeo'] = df_annotated_combined['IsGeo'].fillna(0) + df_annotated_combined['IsBio'].fillna(0)
# df_annotated_combined['IsBioGeo'] = pd.to_numeric(df_annotated_combined['IsGeo'], errors='coerce').fillna(0) + pd.to_numeric(df_annotated_combined['IsBio'], errors='coerce').fillna(0)
# df_annotated_combined['IsBioGeo'] = df_annotated_combined['IsGeo'].infer_objects(copy=False).fillna(0) + df_annotated_combined['IsBio'].infer_objects(copy=False).fillna(0)
df_annotated_combined.loc[:, "IsBioGeo"] = df_annotated_combined.loc[:, 'IsGeo'].infer_objects(copy=False).fillna(0) + df_annotated_combined.loc[:, 'IsBio'].infer_objects(copy=False).fillna(0)
df_annotated_combined = df_annotated_combined.loc[df_annotated_combined.groupby(
['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower()])['IsBioGeo'].idxmax()]
df_annotated_combined = df_annotated_combined.loc[
df_annotated_combined.groupby(
['SentenceRef', args.source_column, 'end', 'start', df_annotated['word'].str.lower(), 'IsBioGeo'])[
'score'].idxmax()]
df_annotated_combined = df_annotated_combined.drop('IsBioGeo', axis=1)
df_annotated_combined.loc[df_annotated_combined['IsBio'] == 0, 'IsBio'] = None
df_annotated_combined.loc[df_annotated_combined['IsGeo'] == 0, 'IsGeo'] = None
df_annotated_combined = df_annotated_combined.sort_values(by=['SentenceRef', 'start', 'ToLink', 'word', 'score'], ascending=[True, True, True, True, False])
#df_annotated_combined = df_annotated_combined.reindex(range(len(df_annotated_combined)))
return df_annotated_combined
def geonames_api_call(word, args, key_geonames, cache_map_geonames):
context = ""
singleContext = None
globalContext = None
singleTriples = None
globalTriples = None
if cache_map_geonames is not None:
if word in cache_map_geonames:
if context in cache_map_geonames[word]:
url_text = cache_map_geonames[word][context]
if strtobool(args.debug):
print("RETRIEVED CACHED RESULT FOR:\n", word, " => ", url_text, "\n")
return url_text, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames
# url = f"http://api.geonames.org/search?name_equals={word}&maxRows=1&featureClass=A&featureCode=ADM1&featureCode=ADM2&featureCode=ADM3&featureCode=ADM4&featureCode=ADM5&type=json&username={key_geonames}"
url = f"http://api.geonames.org/search?name_equals={word}&maxRows=1&type=json&username={key_geonames}"
response = requests.get(url)
try:
data = response.json()
if data['geonames']:
# geoname = data['geonames'][0]['name']
geonameId = data['geonames'][0]['geonameId']
geonameUrl = "https://sws.geonames.org/" + str(geonameId) + "/"
if cache_map_geonames is not None:
if not word in cache_map_geonames:
cache_map_geonames[word] = {}
cache_map_geonames[word][context] = geonameUrl
return geonameUrl, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames
else:
if cache_map_geonames is not None:
if not word in cache_map_geonames:
cache_map_geonames[word] = {}
cache_map_geonames[word][context] = None
return None, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames
except Exception as err:
# if cache_map_geonames is not None:
# if not word in cache_map_geonames:
# cache_map_geonames[word] = {}
# cache_map_geonames[word][context] = None
return None, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames
def getUrlBioAndAllOtherBioConcepts(word, args, key_virtuoso, cache_map_virtuoso, endpoint, VirtuosoUsername, contextWordVirtuoso, UseBioportalForLinking=False ):
#UseBioportalForLinking = False #trial to del
if strtobool(args.debug):
print("--- start getUrlBioAndAllOtherBioConcepts for " + word.lower())
entityBioeUrl = None
ALLURIScontext = []
#key_bioportal = ""
#if args.bioportalkey_filename:
# fkeyname = args.bioportalkey_filename
# with open(fkeyname) as f:
# key_bioportal = f.read()
key_bioportal = os.environ['key_bioportal']
# Check if args.KG_restriction exists and is not empty
if getattr(args, 'KG_restriction', None):
# api call
if strtobool(args.debug):
print("--- " + word.lower())
# args.KG_restriction exists and is not empty
if strtobool(args.debug):
print("KG_restriction is provided and not empty:", args.KG_restriction)
from_clauses = ' '.join([f"FROM <{choice}>" for choice in args.KG_restriction])
# Construct the full SPARQL query
query = f"""
SELECT ?concept ?label (COUNT(?edge) AS ?score)
{from_clauses}
WHERE {{
?concept skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?label .
FILTER (LCASE(STR(?label)) = "{word.lower()}")
?concept ?edge ?o .
}}
GROUP BY ?concept ?label
ORDER BY DESC(?score)
"""
### this is for Bioportal url api:
onto_clauses = ""
for choice in args.KG_restriction:
if choice.upper() == "SNOMEDCT":
choice = "SNOMED"
elif choice.upper() == "OBOREL":
choice = "RO"
elif choice.upper() == "PTRANS":
choice = "TRANS"
elif choice.upper() == "FOODON":
choice = "FoodOn"
elif choice.upper() == "GEOSPARQL":
choice = "GeoSPARQL"
elif choice.upper() == "NCBITAXON":
choice = "NCBITAXON,NCBITaxon_"
onto_clauses = onto_clauses + choice + ","
if onto_clauses and onto_clauses[-1] == ",":
onto_clauses = onto_clauses[:-1]
url = f"https://services.data.bioontology.org/annotatorplus/?text={word.lower()}&ontologies={onto_clauses}&longest_only=true&exclude_numbers=true&whole_word_only=true&exclude_synonyms=false&negation=false&experiencer=false&temporality=false&score_threshold=0&confidence_threshold=0&display_links=false&display_context=false&score=cvalue&apikey={key_bioportal}"
else:
# args.KG_restriction does not exist or is empty
if strtobool(args.debug):
print("--- " + word.lower())
print("KG_restriction is not provided or empty - Consider all the KGs in the virtuoso endpoint")
query = f"""
SELECT ?concept ?label (COUNT(?edge) AS ?score)
WHERE {{
?concept skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?label .
FILTER (LCASE(STR(?label)) = "{word.lower()}")
?concept ?edge ?o .
}}
GROUP BY ?concept ?label
ORDER BY DESC(?score)
"""
### this is for Bioportal url api:
url = f"https://services.data.bioontology.org/annotatorplus/?text={word.lower()}&ontologies=AEO,BFO,BIM,BCGO,CL,CHIRO,CHEBI,DCM,FMA,GO,GENO,GEOSPARQL,HL7,DOID,HP,HP_O,IDO,IAO,ICD10,LOINC,MESH,MONDO,NCIT,NCBITAXON,NIFCELL,NIFSTD,GML,OBCS,OCHV,OHPI,OPB,PTRANS,PLOSTHES,RADLEX,OBOREL,STY,SO,SNOMEDCT,STATO,SYMP,FOODON,UBERON,VO&longest_only=true&exclude_numbers=true&whole_word_only=true&exclude_synonyms=false&negation=false&experiencer=false&temporality=false&score_threshold=0&confidence_threshold=0&display_links=false&display_context=false&score=cvalue&apikey={key_bioportal}"
try:
if UseBioportalForLinking == False:
if strtobool(args.debug):
print("Use Virtuoso Sparql endpoint for linking ... " + word.lower())
responseText = sparqlQuery(endpoint, query, VirtuosoUsername, key_virtuoso, strtobool(args.USE_CACHE))
# Parse the response as JSON
results = json.loads(responseText)
if len(results) > 0 and results['results']['bindings']:
entityBioeUrl = str(results['results']['bindings'][0]['concept']['value'])
if cache_map_virtuoso is not None:
if not word in cache_map_virtuoso:
cache_map_virtuoso[word] = {}
cache_map_virtuoso[word][contextWordVirtuoso] = entityBioeUrl
# # loop the results
for result in results['results']['bindings']:
# print(result)
contextConcept = result['concept']['value']
if contextConcept not in ALLURIScontext:
ALLURIScontext.append(contextConcept)
if cache_map_virtuoso is not None:
if not word in cache_map_virtuoso:
cache_map_virtuoso[word] = {}
cache_map_virtuoso[word][contextConcept] = None
if ALLURIScontext and isinstance(ALLURIScontext, list):
ALLURIScontext = list(set(ALLURIScontext))
if cache_map_virtuoso is not None:
if not word in cache_map_virtuoso:
cache_map_virtuoso[word] = {}
cache_map_virtuoso[word]['ALLURIScontext'] = ALLURIScontext
else:
if cache_map_virtuoso is not None:
if not word in cache_map_virtuoso:
cache_map_virtuoso[word] = {}
cache_map_virtuoso[word][contextWordVirtuoso] = None
cache_map_virtuoso[word]['ALLURIScontext'] = []
else: #this is instead using Bioportal API for linking
if strtobool(args.debug):
print("Use Bioportal for linking ... " + word.lower())
response = requests.get(url)
try:
data = response.json()
dff = pd.DataFrame(data)
dff = dff.drop(columns=['hierarchy', 'mappings'])
# If the columns are dictionary-like, use pd.json_normalize:
expanded_annotated_class = pd.json_normalize(dff['annotatedClass'])
expanded_annotations = pd.DataFrame(dff['annotations'].tolist(), index=dff.index)
expanded_annotations = pd.json_normalize(expanded_annotations[0])
# Join the expanded columns back to the original DataFrame
df_expanded = dff.drop(columns=['annotatedClass', 'annotations']).join(expanded_annotated_class).join(
expanded_annotations)
# Snomed id replacement because in our internal knolwedgebase we have this base uri
df_expanded['@id'] = df_expanded['@id'].str.replace(
"http://purl.bioontology.org/ontology/SNOMEDCT/",
"http://snomed.info/id/"
)
if not df_expanded.empty:
df_expanded = df_expanded.sort_values(
by=['from', 'text', 'score', 'matchType'], ascending=[True, True, False, False])
df_expanded = df_expanded.drop_duplicates(subset=['@id'])
# Filter rows where 'prefLabel' is exactly equal to 'word.lower()' or 'word.lower()' is in 'synonym'
# filtered_df = df_expanded[
# df_expanded['prefLabel'].str.lower() == word.lower() |
# df_expanded['synonym'].apply(
# lambda x: True if isinstance(x, list) and len(x) > 0 and word.lower() in [item.lower() for
# item in x] else (
# np.nan if x is None or (isinstance(x, float) and pd.isna(x)) else x))
# ]
df_expanded = df_expanded[
df_expanded['prefLabel'].apply(
lambda x: isinstance(x, str) and x.lower() == word.lower()
) |
df_expanded['synonym'].apply(
lambda x: isinstance(x, list) and any(item.lower() == word.lower() for item in x)
)
]
# Specify the columns you want to keep
columns_to_keep = ["score", "from", "to", "prefLabel", "text", "@id"]
# Subset the dataframe to keep only the specified columns
df_expanded = df_expanded[columns_to_keep]
# Rename the specified columns
df_expanded = df_expanded.rename(
columns={"from": "start", "to": "end", "text": "word"})
# Optional: Reset index if you want a clean index after filtering
df_expanded = df_expanded.reset_index(drop=True)
df_expanded['score'] = df_expanded['score'].round(2)
# Find the index of the row with the maximum 'score'
max_score_index = df_expanded['score'].idxmax()
max_score_row = df_expanded.loc[df_expanded['score'].idxmax()]
entityBioeUrl = str(max_score_row['@id'])
if cache_map_virtuoso is not None:
if not word in cache_map_virtuoso:
cache_map_virtuoso[word] = {}
cache_map_virtuoso[word][contextWordVirtuoso] = entityBioeUrl
# Drop the row with the maximum 'score'
#df_expanded = df_expanded.drop(max_score_index)
# Reset the index if desired (optional)
df_expanded.reset_index(drop=True, inplace=True)
# Iterate over each row in the DataFrame
for index, row in df_expanded.iterrows():
# Append the '@id' value to the list
if row['@id'] is not None and pd.notna(row['@id']):
contextConcept=row['@id']
ALLURIScontext.append(contextConcept)
if cache_map_virtuoso is not None:
if not word in cache_map_virtuoso:
cache_map_virtuoso[word] = {}
cache_map_virtuoso[word][contextConcept] = None
if ALLURIScontext and isinstance(ALLURIScontext, list):
ALLURIScontext = list(set(ALLURIScontext))
if cache_map_virtuoso is not None:
if not word in cache_map_virtuoso:
cache_map_virtuoso[word] = {}
cache_map_virtuoso[word]['ALLURIScontext'] = ALLURIScontext
return entityBioeUrl, ALLURIScontext, cache_map_virtuoso
else:
#nothing found from Bioportal
return None, None, None, None, None, cache_map_virtuoso
except Exception as err:
logging.error(
f'ERROR ON BioPortal Annotator API Call\n\tError: {err}\n TextToAnnotate: {word.lower()}\n Have a check...')
return None, None, None, None, None, cache_map_virtuoso
except Exception as err:
# if cache_map_virtuoso is not None:
# if not word in cache_map_virtuoso:
# cache_map_virtuoso[word] = {}
# cache_map_virtuoso[word][contextWordVirtuoso] = None
return None, None, None, None, None, cache_map_virtuoso
return entityBioeUrl, ALLURIScontext, cache_map_virtuoso
def getLinearTextualContextFromTriples(word,labelTriplesLIST, text_splitter, args, map_query_input_output, cleanInput=True):
# trial
#return None, map_query_input_output
word = word.lower()
word = word.capitalize()
labelTriples = ". ".join(" ".join(element.capitalize() for element in triple) for triple in labelTriplesLIST)
if token_counter(labelTriples, args.model_name) > args.tokens_max: # THE CONTEXT IS TOO BIG, BIGGER THAN tokens_max, I need to split
texts = text_splitter.create_documents([labelTriples])
labelTriples = texts[0].page_content
#Can you elaborate and express better the following notes, delimited by triple backticks, about "{word}"?
#Don't add explanations for your answer. Do not invent. Don't use a structure or indenting. Be concise. Don't discard relevant information.
#made of RDF-like statements,
contextText = ""
# myPromt = f"""
# Can you elaborate and express better the given notes below, delimited by triple backticks, about "{word}"?
# Don't add explanations for your answer.
# Do not invent.
# Don't use a structure or indenting.
# Be concise but exhaustive. Don't discard information reported in the notes.
# """
myPromt = f"""
Can you reformulate the following notes, provided between triple backticks, into clear and complete sentences about "{word}"?
Ensure the rewriting is human-readable and easily interpretable. Maintain conciseness and exhaustiveness, including all information from the notes.
Avoid using note formats or lists, and refrain from inventing additional information.
"""
myDelimiter = "```"
if not(labelTriples) or labelTriples.strip=="":
logging.warning("No text or promt supplied! Skypping it!")
return contextText, map_query_input_output
if cleanInput==True:
labelTriples = cleanInputText(labelTriples)
# try to read cache
if map_query_input_output is not None:
key = args.model_name + "__" + str(args.temperature) + "__" + myPromt
if key in map_query_input_output:
if labelTriples in map_query_input_output[key]:
output = map_query_input_output[key][labelTriples]
# if input_text.strip() == "":
# print("here")
# if handler == api_call_dglc:
# output = clean_gpt_out(output) #clean output
if strtobool(args.debug):
print("RETRIEVED CACHED RESULT FOR:\n", myPromt, "\n", myDelimiter, word, myDelimiter, "\n=>\n", output, "\n")
return output, map_query_input_output
# call
try:
contextText = ""
# if args.service_provider == "gptjrc":
# contextText = call_model(input_text=labelTriples, prompt=myPromt, model=args.model_name,
# temperature=args.temperature, delimiter=myDelimiter,
# InContextExamples=[],
# handler=api_call_gptjrc,
# verbose=True, args=args)
if contextText:
if not isinstance(contextText, str):
contextText = contextText['choices'][0]['message']['content']
if map_query_input_output is not None:
if not key in map_query_input_output:
map_query_input_output[key] = {}
if contextText:
if contextText != "":
map_query_input_output[key][labelTriples] = contextText
except Exception as err:
return None, map_query_input_output
return contextText, map_query_input_output
#@mem.cache
def virtuoso_api_call(word, text_splitter, args, key_virtuoso, cache_map_virtuoso, load_map_query_input_output, id=None, iALLURIScontextFromNCBO=None):
if strtobool(args.debug):
print(f"\n----- Starting virtuoso_api_call for {word}")
word = word.lower()
endpoint = 'https://api-vast.jrc.service.ec.europa.eu/sparql'
VirtuosoUsername = 'dba'
if getattr(args, 'KG_restriction', None):
contextWordVirtuoso = ', '.join(sorted(args.KG_restriction))
else:
contextWordVirtuoso = ""
singleContext = None
globalContext = None
sssingleTriples = None
ggglobalTriples = None
unique_listLabelTriples = []
unique_listGlobalTriples = []
ALLURIScontext = []
url_text = None
if id:
url_text = id
if iALLURIScontextFromNCBO and isinstance(iALLURIScontextFromNCBO, list):
ALLURIScontext=iALLURIScontextFromNCBO
ALLURIScontext = list(set(ALLURIScontext))
if (cache_map_virtuoso is not None) and (not url_text):
if word in cache_map_virtuoso:
if contextWordVirtuoso in cache_map_virtuoso[word]:
url_text = cache_map_virtuoso[word][contextWordVirtuoso]
if strtobool(args.debug):
print("RETRIEVED CACHED RESULT FOR:\n", word, " => ", url_text, "\n")
if not url_text:
return None, None, None, None, None, None, cache_map_virtuoso, load_map_query_input_output
if url_text and not ALLURIScontext:
if cache_map_virtuoso is not None:
if word in cache_map_virtuoso:
if 'ALLURIScontext' in cache_map_virtuoso[word]:
ALLURIScontext = cache_map_virtuoso[word]['ALLURIScontext']
entityBioeUrl = None
if url_text and ALLURIScontext:
entityBioeUrl = url_text
else:
try:
entityBioeUrl, ALLURIScontext, cache_map_virtuoso = getUrlBioAndAllOtherBioConcepts(word, args, key_virtuoso, cache_map_virtuoso, endpoint, VirtuosoUsername, contextWordVirtuoso, UseBioportalForLinking=True )
if ALLURIScontext and isinstance(ALLURIScontext, list):
ALLURIScontext = list(set(ALLURIScontext))
except Exception as err:
# if cache_map_virtuoso is not None:
# if not word in cache_map_virtuoso:
# cache_map_virtuoso[word] = {}
# cache_map_virtuoso[word][contextWordVirtuoso] = None
return None, None, None, None, None, None, cache_map_virtuoso, load_map_query_input_output
# # Check if args.KG_restriction exists and is not empty
# if getattr(args, 'KG_restriction', None):
#
# # api call
# if strtobool(args.debug):
# print("--- " + word.lower())
#
# # args.KG_restriction exists and is not empty
# if strtobool(args.debug):
# print("KG_restriction is provided and not empty:", args.KG_restriction)
#
# from_clauses = ' '.join([f"FROM <{choice}>" for choice in args.KG_restriction])
#
# # Construct the full SPARQL query
# query = f"""
# SELECT ?concept ?label (COUNT(?edge) AS ?score)
# {from_clauses}
# WHERE {{
# ?concept skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?label .
# FILTER (LCASE(STR(?label)) = "{word.lower()}")
# ?concept ?edge ?o .
# }}
# GROUP BY ?concept ?label
# ORDER BY DESC(?score)
# """
#
# else:
# # args.KG_restriction does not exist or is empty
# if strtobool(args.debug):
# print("--- "+word.lower())
# print("KG_restriction is not provided or empty - Consider all the KGs in the virtuoso endpoint")
#
# query = f"""
# SELECT ?concept ?label (COUNT(?edge) AS ?score)
# WHERE {{
# ?concept skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?label .
# FILTER (LCASE(STR(?label)) = "{word.lower()}")
# ?concept ?edge ?o .
# }}
# GROUP BY ?concept ?label
# ORDER BY DESC(?score)
# """
#
#
# try:
# responseText = sparqlQuery(endpoint, query, VirtuosoUsername, key_virtuoso, strtobool(args.USE_CACHE))
#
# # Parse the response as JSON
# results = json.loads(responseText)
#
#
# if len(results) > 0 and results['results']['bindings']:
#
# entityBioeUrl = str(results['results']['bindings'][0]['concept']['value'])
#
# if cache_map_virtuoso is not None:
# if not word in cache_map_virtuoso:
# cache_map_virtuoso[word] = {}
# cache_map_virtuoso[word][contextWordVirtuoso] = entityBioeUrl
#
#
# # # loop the results
# for result in results['results']['bindings']:
# #print(result)
#
# contextConcept = result['concept']['value']
# ALLURIScontext.append(contextConcept)
# if cache_map_virtuoso is not None:
# if not word in cache_map_virtuoso:
# cache_map_virtuoso[word] = {}
# cache_map_virtuoso[word][contextConcept] = None
#
# else:
#
# if cache_map_virtuoso is not None:
# if not word in cache_map_virtuoso:
# cache_map_virtuoso[word] = {}
# cache_map_virtuoso[word][contextWordVirtuoso] = None
#
# except Exception as err:
#
# # if cache_map_virtuoso is not None:
# # if not word in cache_map_virtuoso:
# # cache_map_virtuoso[word] = {}
# # cache_map_virtuoso[word][contextWordVirtuoso] = None
#
# return None, None, None, None, None, None, cache_map_virtuoso, load_map_query_input_output
if entityBioeUrl:
if strtobool(args.computeEntityContext):
if strtobool(args.debug):
print("START computeEntityContext")
unique_listLabelTriples = []
singleContext = None
if cache_map_virtuoso is not None:
if entityBioeUrl in cache_map_virtuoso:
if "LabelTriples" in cache_map_virtuoso[entityBioeUrl]:
unique_listLabelTriples = cache_map_virtuoso[entityBioeUrl]["LabelTriples"]
if strtobool(args.debug):
print("RETRIEVED CACHED RESULT FOR:\n", entityBioeUrl, " => ", "LabelTriples", "\n")
if "SingleContext" in cache_map_virtuoso[entityBioeUrl]:
singleContext = cache_map_virtuoso[entityBioeUrl]["SingleContext"]
if strtobool(args.debug):
print("RETRIEVED CACHED RESULT FOR:\n", entityBioeUrl, " => ", "SingleContext", "\n")
if not singleContext:
if unique_listLabelTriples:
singleContext, load_map_query_input_output = getLinearTextualContextFromTriples(word, unique_listLabelTriples,
text_splitter, args,
load_map_query_input_output)
else:
# query = f"""
# SELECT DISTINCT ?labelS ?labelP ?labelO
# WHERE {{
# {{
# <{entityBioeUrl}> ?p ?o.
# <{entityBioeUrl}> skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelS .
# ?p skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelP .
# ?o skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelO .
# }}
# UNION
# {{
# SELECT ?labelS ?labelP ?labelO
# WHERE {{
# <{entityBioeUrl}> ?p ?labelO .
# <{entityBioeUrl}> skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelS .
# ?p skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelP .
# FILTER (isLiteral(?labelO))
# }}
# }}
# }}
# """
query = f"""
SELECT DISTINCT ?labelS ?labelP ?labelO
WHERE {{
{{
<{entityBioeUrl}> ?p ?o.
<{entityBioeUrl}> skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelS .
?p skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelP .
?o skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelO .
}}
UNION
{{
SELECT ?labelS ?labelP ?labelO
WHERE {{
<{entityBioeUrl}> ?p ?labelO .
<{entityBioeUrl}> skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelS .
?p skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelP .
FILTER (isLiteral(?labelO))
}}
}}
UNION
{{
SELECT DISTINCT ?labelS ?labelP ?labelO
WHERE {{
<{entityBioeUrl}> ?ppp ?ooo .
?ooo rdf:type owl:Restriction .
?ooo owl:onProperty ?p .
?ooo owl:someValuesFrom ?o .
<{entityBioeUrl}> skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelS .
?p skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelP .
?o skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelO .
}}
}}
}}
"""
try:
responseText = sparqlQuery(endpoint, query, VirtuosoUsername, key_virtuoso, strtobool(args.USE_CACHE))
# Parse the response as JSON
results = json.loads(responseText)
if len(results) > 0 and results['results']['bindings']:
# word = "subClassOf"
# split_word = split_camel_case(word)
# # loop the results
labelTriples=""
listLabelTriples = []
pattern = r'\^\^<http:.*?>'
for result in results['results']['bindings']:
#print(result)
ss = str(result['labelS']['value']).strip().replace("..",".").replace("@en","")
ss = re.sub(pattern, '', ss)
pp = split_camel_case(str(result['labelP']['value'])).replace("_"," ").strip().replace("..",".").replace("@en","")
pp = re.sub(pattern, '', pp)
oo = str(result['labelO']['value']).replace("_"," ").strip().replace("..",".").replace("@en","")
oo = re.sub(pattern, '', oo)
listLabelTriples.append([ss, pp, oo])
# Remove duplicates while preserving order
unique_listLabelTriples = list(dict.fromkeys(tuple(triple) for triple in listLabelTriples))
# # If you need the result back in list of lists format
# unique_listLabelTriples = [list(triple) for triple in unique_listLabelTriples]
if unique_listLabelTriples:
if cache_map_virtuoso is not None:
if not entityBioeUrl in cache_map_virtuoso:
cache_map_virtuoso[entityBioeUrl] = {}
cache_map_virtuoso[entityBioeUrl]["LabelTriples"] = unique_listLabelTriples
singleContext, load_map_query_input_output = getLinearTextualContextFromTriples(word, unique_listLabelTriples, text_splitter, args, load_map_query_input_output)
except Exception as err:
singleContext = None
if singleContext:
if cache_map_virtuoso is not None:
if not entityBioeUrl in cache_map_virtuoso:
cache_map_virtuoso[entityBioeUrl] = {}
cache_map_virtuoso[entityBioeUrl]["SingleContext"] = singleContext
if strtobool(args.computeEntityGlobalContext):
if strtobool(args.debug):
print("START computeEntityGlobalContext")
unique_listGlobalTriples = []
globalContext = None
if cache_map_virtuoso is not None:
if word in cache_map_virtuoso:
if ("GlobalTriples"+" "+contextWordVirtuoso).strip() in cache_map_virtuoso[word]:
unique_listGlobalTriples = cache_map_virtuoso[word][("GlobalTriples"+" "+contextWordVirtuoso).strip()]
if strtobool(args.debug):
print("RETRIEVED CACHED RESULT FOR:\n", word, " => ", ("GlobalTriples"+" "+contextWordVirtuoso).strip(), "\n")
if ("GlobalContext"+" "+contextWordVirtuoso).strip() in cache_map_virtuoso[word]:
globalContext = cache_map_virtuoso[word][("GlobalContext"+" "+contextWordVirtuoso).strip()]
if strtobool(args.debug):
print("RETRIEVED CACHED RESULT FOR:\n", word, " => ", ("GlobalContext"+" "+contextWordVirtuoso).strip(), "\n")
if not globalContext:
if unique_listGlobalTriples:
globalContext, load_map_query_input_output = getLinearTextualContextFromTriples(word, unique_listGlobalTriples,
text_splitter, args,
load_map_query_input_output)
else:
if not ALLURIScontext:
if cache_map_virtuoso is not None:
if word in cache_map_virtuoso:
ALLURIScontext = list(cache_map_virtuoso[word].keys())
ALLURIScontext = [element for element in ALLURIScontext if element and ("GlobalTriples" in element == False) and ("GlobalContext" in element == False) and "http" in element ]
if not ALLURIScontext:
# THIS CASE SHOULD BE VERY DIFFICULT TO HAPPEN...IT WILL HAPPEN IN CASE IT COMES FROM AN INITIAL BIOPORTAL ANNOTATION; WHICH ALREADY RECOGNISED THE FIRST URL
# # Print the error message to stderr
# print("THIS CASE SHOULD NEVER HAPPEN!!!! Check what's happening...exiting now...")
# # Exit the program with a non-zero status code (commonly used to indicate an error)
# sys.exit(1)
try:
entityBioeUrl, ALLURIScontext, cache_map_virtuoso = getUrlBioAndAllOtherBioConcepts(word,
args,
key_virtuoso,
cache_map_virtuoso,
endpoint,
VirtuosoUsername,
contextWordVirtuoso,
UseBioportalForLinking=True)
if ALLURIScontext and isinstance(ALLURIScontext, list):
ALLURIScontext = list(set(ALLURIScontext))
except Exception as err:
# if cache_map_virtuoso is not None:
# if not word in cache_map_virtuoso:
# cache_map_virtuoso[word] = {}
# cache_map_virtuoso[word][contextWordVirtuoso] = None
return None, None, None, None, None, None, cache_map_virtuoso, load_map_query_input_output
if not ALLURIScontext:
# Print the error message to stderr
print("THIS CASE SHOULD NEVER HAPPEN NOW!!!! Check what's happening...exiting now...")
# Exit the program with a non-zero status code (commonly used to indicate an error)
else:
for xxUrl in ALLURIScontext:
unique_listLabelTriples = []
# singleContext = None
if cache_map_virtuoso is not None:
if xxUrl in cache_map_virtuoso:
if "LabelTriples" in cache_map_virtuoso[xxUrl]:
unique_listLabelTriples = cache_map_virtuoso[xxUrl]["LabelTriples"]
if strtobool(args.debug):
print("RETRIEVED CACHED RESULT FOR:\n", xxUrl, " => ",
"LabelTriples", "\n")
# if "SingleContext" in cache_map_virtuoso[xxUrl]:
# singleContext = cache_map_virtuoso[xxUrl]["SingleContext"]
# if strtobool(args.debug):
# print("RETRIEVED CACHED RESULT FOR:\n", xxUrl, " => ",
# "SingleContext", "\n")
# if not singleContext:
# if unique_listLabelTriples:
# singleContext, load_map_query_input_output = getLinearTextualContextFromTriples(
# word, unique_listLabelTriples,
# text_splitter, args,
# load_map_query_input_output)
# else:
if not unique_listLabelTriples:
# query = f"""
# SELECT DISTINCT ?labelS ?labelP ?labelO
# WHERE {{
# {{
# <{xxUrl}> ?p ?o.
# <{xxUrl}> skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelS .
# ?p skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelP .
# ?o skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelO .
# }}
# UNION
# {{
# SELECT ?labelS ?labelP ?labelO
# WHERE {{
# <{xxUrl}> ?p ?labelO .
# <{xxUrl}> skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelS .
# ?p skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelP .
# FILTER (isLiteral(?labelO))
# }}
# }}
# }}
# """
query = f"""
SELECT DISTINCT ?labelS ?labelP ?labelO
WHERE {{
{{
<{xxUrl}> ?p ?o.
<{xxUrl}> skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelS .
?p skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelP .
?o skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelO .
}}
UNION
{{
SELECT ?labelS ?labelP ?labelO
WHERE {{
<{xxUrl}> ?p ?labelO .
<{xxUrl}> skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelS .
?p skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelP .
FILTER (isLiteral(?labelO))
}}
}}
UNION
{{
SELECT DISTINCT ?labelS ?labelP ?labelO
WHERE {{
<{xxUrl}> ?ppp ?ooo .
?ooo rdf:type owl:Restriction .
?ooo owl:onProperty ?p .
?ooo owl:someValuesFrom ?o .
<{xxUrl}> skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelS .
?p skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelP .
?o skos:prefLabel|rdfs:label|skos:altLabel|obo:hasRelatedSynonym ?labelO .
}}
}}
}}
"""
try:
responseText = sparqlQuery(endpoint, query, VirtuosoUsername, key_virtuoso, strtobool(args.USE_CACHE))
# Parse the response as JSON
results = json.loads(responseText)
if len(results) > 0 and results['results']['bindings']:
# word = "subClassOf"
# split_word = split_camel_case(word)
# # loop the results
labelTriples = ""
listLabelTriples = []
pattern = r'\^\^<http:.*?>'
for result in results['results']['bindings']:
# print(result)
ss = str(result['labelS']['value']).strip().replace("..", ".").replace("@en","")
ss = re.sub(pattern, '', ss)
pp = split_camel_case(str(result['labelP']['value'])).replace("_"," ").strip().replace("..", ".").replace("@en","")
pp = re.sub(pattern, '', pp)
oo = str(result['labelO']['value']).replace("_"," ").strip().replace("..", ".").replace("@en","")
oo = re.sub(pattern, '', oo)
listLabelTriples.append([ss, pp, oo])
# Remove duplicates while preserving order
unique_listLabelTriples = list(
dict.fromkeys(tuple(triple) for triple in listLabelTriples))
# # If you need the result back in list of lists format
# unique_listLabelTriples = [list(triple) for triple in unique_listLabelTriples]
if unique_listLabelTriples:
if cache_map_virtuoso is not None:
if not xxUrl in cache_map_virtuoso:
cache_map_virtuoso[xxUrl] = {}
cache_map_virtuoso[xxUrl][
"LabelTriples"] = unique_listLabelTriples
# singleContext, load_map_query_input_output = getLinearTextualContextFromTriples(
# word, unique_listLabelTriples, text_splitter, args, load_map_query_input_output)
#
# if singleContext:
# if cache_map_virtuoso is not None:
# if not xxUrl in cache_map_virtuoso:
# cache_map_virtuoso[xxUrl] = {}
# cache_map_virtuoso[xxUrl][
# "SingleContext"] = singleContext
except Exception as err:
unique_listLabelTriples = []
if unique_listLabelTriples:
unique_listGlobalTriples.extend(unique_listLabelTriples)
# If I want to speed up, I can break here, but in this case I will not store the triples for the other uris in the cache, which maybe useful in the future
# if token_counter(str(unique_listGlobalTriples),args.model_name) > args.tokens_max:
# break # BREAK THE FOR LOOP IF THE GLOBAL CONTEXT IS ALREADY TOO BIG, BIGGER THAN tokens_max
if unique_listGlobalTriples:
# Remove duplicates while preserving order
unique_listGlobalTriples = list(
dict.fromkeys(tuple(triple) for triple in unique_listGlobalTriples))
if cache_map_virtuoso is not None:
if not word in cache_map_virtuoso:
cache_map_virtuoso[word] = {}
cache_map_virtuoso[word][("GlobalTriples"+" "+contextWordVirtuoso).strip()] = unique_listGlobalTriples
globalContext, load_map_query_input_output = getLinearTextualContextFromTriples(word,
unique_listGlobalTriples,
text_splitter, args,
load_map_query_input_output)
if globalContext:
if cache_map_virtuoso is not None:
if not word in cache_map_virtuoso:
cache_map_virtuoso[word] = {}
cache_map_virtuoso[word][("GlobalContext"+" "+contextWordVirtuoso).strip()] = globalContext
if unique_listLabelTriples:
sssingleTriples = " ,., ".join(
" ,,, ".join(element.capitalize() for element in triple) for triple in unique_listLabelTriples)
while "\\n" in sssingleTriples:
sssingleTriples = sssingleTriples.replace("\\n", " ")
sssingleTriples = sssingleTriples.strip()
while "\t" in sssingleTriples:
sssingleTriples = sssingleTriples.replace("\t", " ")
sssingleTriples = sssingleTriples.strip()
if unique_listGlobalTriples:
ggglobalTriples = " ,., ".join(
" ,,, ".join(element.capitalize() for element in triple) for triple in unique_listGlobalTriples)
while "\\n" in ggglobalTriples:
ggglobalTriples = ggglobalTriples.replace("\\n", " ")
ggglobalTriples = ggglobalTriples.strip()
while "\t" in ggglobalTriples:
ggglobalTriples = ggglobalTriples.replace("\t", " ")
ggglobalTriples = ggglobalTriples.strip()
return entityBioeUrl, ALLURIScontext, singleContext, globalContext, sssingleTriples, ggglobalTriples, cache_map_virtuoso, load_map_query_input_output
def process_row4Linking(row, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output):
result = None
singleContext = None
globalContext = None
singleTriples = None
globalTriples = None
ALLURIScontext = []
InRagMode=False
if hasattr(args, 'useBioKgRAG') and (strtobool(args.useBioKgRAG)==True):
InRagMode = True
if (InRagMode==False):
if row['IsGeo'] == 1:
if strtobool(args.debug):
print(f"\n----- IsGeo ... COMPUTING {row['word']} IN THE TEXT:")
print(row[args.source_column])
result, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames = geonames_api_call(row['word'], args, key_geonames, cache_map_geonames)
else:
if row['IsBio'] == 1:
# Check if '@id' column exists in df_Extract
iiid = None
# Check if the '@id' exists in the Series
if '@id' in row:
# Check if the value is not None or NaN
if row['@id'] is not None and not pd.isna(row['@id']):
# Assign the value to the variable iiid
iiid = row['@id']
iiiALLURIScontextFromNCBO = None
if 'ALLURIScontextFromNCBO' in row:
if row['ALLURIScontextFromNCBO'] is not None and isinstance(row['ALLURIScontextFromNCBO'], list): #and not pd.isna(row['ALLURIScontextFromNCBO']):
iiiALLURIScontextFromNCBO=row['ALLURIScontextFromNCBO']
iiiALLURIScontextFromNCBO = list(set(iiiALLURIScontextFromNCBO))
if strtobool(args.debug):
print(f"\n----- isBio COMPUTING ... {row['word']} IN THE TEXT:")
print(row[args.source_column])
result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_virtuoso, load_map_query_input_output = virtuoso_api_call(row['word'], text_splitter, args, key_virtuoso, cache_map_virtuoso, load_map_query_input_output, id=iiid, iALLURIScontextFromNCBO=iiiALLURIScontextFromNCBO)
else:
if (row['IsBio'] == 1) or ( (pd.isnull(row["IsBio"]) or row["IsBio"] == '' or row['IsBio'] == 0 or row["IsBio"] is None) and (row['entity_group'] == "MISC") ):
if strtobool(args.debug):
print(f"\n----- InRagMode ...COMPUTING ... {row['word']} IN THE TEXT:")
print(row[args.source_column])
# Check if '@id' column exists in df_Extract
iiid = None
# Check if the '@id' exists in the Series
if '@id' in row:
# Check if the value is not None or NaN
if row['@id'] is not None and not pd.isna(row['@id']):
# Assign the value to the variable iiid
iiid = row['@id']
iiiALLURIScontextFromNCBO = None
if 'ALLURIScontextFromNCBO' in row:
if row['ALLURIScontextFromNCBO'] is not None and isinstance(row['ALLURIScontextFromNCBO'], list):
iiiALLURIScontextFromNCBO = row['ALLURIScontextFromNCBO']
iiiALLURIScontextFromNCBO = list(set(iiiALLURIScontextFromNCBO))
result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_virtuoso, load_map_query_input_output = virtuoso_api_call(
row['word'], text_splitter, args, key_virtuoso, cache_map_virtuoso, load_map_query_input_output, id=iiid, iALLURIScontextFromNCBO=iiiALLURIScontextFromNCBO)
return result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames, cache_map_virtuoso, load_map_query_input_output, row.name
def parallel_process_Row4Linking(df, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output):
results = []
with ThreadPoolExecutor(max_workers=args.num_cores_eLinking) as executor:
# Submit tasks to ThreadPoolExecutor
futures = [executor.submit(process_row4Linking, row, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output)
for _, row in df.iterrows()]
# Collect results
for future in as_completed(futures):
try:
result, ALLURIScontext, singleContext, globalContext, singleTriples, globalTriples, cache_map_geonames_Inner, cache_map_virtuoso_Inner, load_map_query_input_output_Inner, drm_idx = future.result()
df.at[drm_idx,'namedEntity'] = result
df.at[drm_idx, 'ALLURIScontext'] = ALLURIScontext
df.at[drm_idx,'Context'] = singleContext
df.at[drm_idx,'ContextGlobal'] = globalContext
df.at[drm_idx, 'Triples'] = singleTriples
df.at[drm_idx, 'TriplesGlobal'] = globalTriples
# Recursively update cache_map files with values from _Inner --> actually, this will never happen, because when you pass a mutable object like a dictionary to a function,
# changes made to that object within the function are reflected outside the function as well. This is because the function receives a reference to the original object,
# rather than a copy of it.
# cache_map_geonames, cache_map_virtuoso, load_map_query_input_output are like global variables, which is very good for the parallelization!
if (cache_map_geonames == cache_map_geonames_Inner)==False:
update_nested_dict(cache_map_geonames, cache_map_geonames_Inner)
if (cache_map_virtuoso == cache_map_virtuoso_Inner) == False:
update_nested_dict(cache_map_virtuoso, cache_map_virtuoso_Inner)
if (load_map_query_input_output == load_map_query_input_output_Inner) == False:
update_nested_dict(load_map_query_input_output, load_map_query_input_output_Inner)
except Exception as e:
print(f"Error occurred: {e}")
return df, cache_map_geonames, cache_map_virtuoso, load_map_query_input_output
def elinking(df_annotated_combined, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output, device):
if "ALLURIScontext" not in df_annotated_combined.columns:
df_annotated_combined["ALLURIScontext"] = None
if args.num_cores_eLinking>1:
# parallel elinking process
#result
df_annotated_combined, cache_map_geonames_AFTER, cache_map_virtuoso_AFTER, load_map_query_input_output_AFTER = parallel_process_Row4Linking(df_annotated_combined, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output)
#if isinstance(result, list):
# result=pd.Series(result) # I need this after the parallel processing
else:
# single processing
result = df_annotated_combined.apply(lambda row: process_row4Linking(row, text_splitter, args, key_geonames, cache_map_geonames, key_virtuoso, cache_map_virtuoso, load_map_query_input_output), axis=1)
#
df_annotated_combined['namedEntity'] = result.str[0]
df_annotated_combined['ALLURIScontext'] = result.str[1]
df_annotated_combined['Context'] = result.str[2]
df_annotated_combined['ContextGlobal'] = result.str[3]
df_annotated_combined['Triples'] = result.str[4]
df_annotated_combined['TriplesGlobal'] = result.str[5]
cache_map_geonames_AFTER = result.str[6].iloc[-1]
cache_map_virtuoso_AFTER = result.str[7].iloc[-1]
load_map_query_input_output_AFTER = result.str[8].iloc[-1] #
if args.num_cores_eLinking>1:
# if parallel elinking process I need to sort again, because rows in late will arrive later
df_annotated_combined = df_annotated_combined.sort_values(by=['SentenceRef', 'start', 'ToLink', 'word', 'score'],
ascending=[True, True, True, True, False])
return df_annotated_combined, cache_map_geonames_AFTER, cache_map_virtuoso_AFTER, load_map_query_input_output_AFTER
def nerBio(text, ModelsSelection, CategoriesSelection, ScoreFilt, EntityLinking, history_dict: dict):
if EntityLinking:
EnableNEL="True"
else:
EnableNEL="False"
if not text:
html_output = f"<div class='gr-textbox' style='white-space: pre-wrap; overflow-wrap: break-word; padding: 10px; border: 1px solid #ddd; border-radius: 5px; font-family: monospace; font-size: 12px; line-height: 24px;'>{text}</div>"
return {"text": text, "entities": []}, html_output, dict()
df_annotated = pd.DataFrame()
parser = argparse.ArgumentParser()
parser.add_argument("--model_id", type=str, default=models_List[0], help="model to use")
parser.add_argument("--debug", type=str, default="True", help="set debug mode")
parser.add_argument("--source_column", type=str, default="ContextToAnnotate")
parser.add_argument("--entities_filter_threshold", type=int, default=ScoreFilt)
parser.add_argument("--SEED", type=int, default=41)
parser.add_argument("--batch_size", type=int, default=1) # 4 - 8 - 16
parser.add_argument("--num_cores_Gliner", type=int, default=num_cores_Gliner_forDemo, help="parallel processing for Gliner annotation") # 0 means use the GPU for Gliner !
parser.add_argument("--entity_linking", type=str, default=EnableNEL, help="whether to make entities linking or not")
parser.add_argument("--geonameskey_filename", type=str, default="", help="file location where it is stored the geonames api key")
parser.add_argument("--virtuosokey_filename", type=str, default="", help="file location where it is stored the virtuoso endpoint dba pwd")
parser.add_argument("--bioportalkey_filename", type=str, default="", help="file location where it is stored the NCBO BioPortal api key")
parser.add_argument("--USE_CACHE", type=str, default="False", help="whether to use cache for the NER and NEL tasks or not")
parser.add_argument("--num_cores_eLinking", type=int, default=1, help="parallel processing for the entity linking process")
parser.add_argument("--computeEntityContext", type=str, default="False",
help="whether to extract a readable context from the extracted triples for the concept")
parser.add_argument("--computeEntityGlobalContext", type=str, default="False",
help="whether to extract a readable context from the extracted triples of all the entities extracted from the endpoint for the concept")
parser.add_argument("--service_provider", type=str, default="no", help="llm service provider")
parser.add_argument("--model_name", type=str, default="no", help="llm to use")
parser.add_argument("--tokens_max", type=int, default=80000, help="max number of tokens to supply to the llm")
parser.add_argument("--temperature", type=int, default=0.01)
args = parser.parse_args()
#print("Are all models in any row of the 'model' column, case-insensitively?", all_models_in_any_row)
#if (not history_dict) or (history_dict[args.source_column][0] != text) or (all_models_in_any_row == False):
if (not history_dict) or (history_dict[args.source_column][0] != text):
for model_id in models_List: # always do all the annotations, only filter them afterwards
#for model_id in ModelsSelection:
# if history_dict and (history_dict[args.source_column][0] == text):
# if model_id in hhist['model'].unique():
# continue
parser.set_defaults(model_id=model_id)
args = parser.parse_args()
print("ARGS:")
print(args)
# %% n machine learning tasks, particularly when dealing with models that have stochasticity involved (like text generation), it's important to set seeds for random number generators to ensure reproducibility of results. In the case of using models from the transformers library, you need to set seeds for both Python's random module, NumPy, and PyTorch to ensure that the results are the same every time you run the code.
# Before you create the pipeline and run the text generation, set the seeds like this:
random.seed(args.SEED)
np.random.seed(args.SEED)
torch.manual_seed(args.SEED)
torch.cuda.manual_seed_all(args.SEED)
###
df_ToAnnotate = pd.DataFrame({ "ToLink": [None], args.source_column: [text]})
if "SentenceRef" not in df_ToAnnotate.columns:
df_ToAnnotate["SentenceRef"] = None
df_ToAnnotate = df_ToAnnotate[['SentenceRef'] + [col for col in df_ToAnnotate.columns if
col != 'SentenceRef']] # this moves it to the first position
df_ToAnnotate['SentenceRef'] = df_ToAnnotate.index + 1
df_ToAnnotate['SentenceRef'] = df_ToAnnotate['SentenceRef'].argsort().groupby(df_ToAnnotate[args.source_column]).transform('min').astype(int)
df_ToAnnotate['SentenceRef'] = df_ToAnnotate['SentenceRef'].rank(method='dense').astype(int)
pipeToUse = None
if (("gliner" in args.model_id) == False) and (("NCBO" in args.model_id)== False) :
pipeToUse = pipe_dict[args.model_id]
new_annotations = annotate(df_ToAnnotate, args, pipeToUse, tokenizerGliner, modelGliner, modelGlinerBio, device)
if not new_annotations.empty:
if df_annotated.empty:
# If df_annotated is empty, just assign new_annotations to it
df_annotated = new_annotations
else:
# If df_annotated is not empty, concatenate new_annotations to it
df_annotated = pd.concat([df_annotated, new_annotations], ignore_index=True)
history = df_annotated.copy()
else:
print("ARGS:")
print(args)
# %% n machine learning tasks, particularly when dealing with models that have stochasticity involved (like text generation), it's important to set seeds for random number generators to ensure reproducibility of results. In the case of using models from the transformers library, you need to set seeds for both Python's random module, NumPy, and PyTorch to ensure that the results are the same every time you run the code.
# Before you create the pipeline and run the text generation, set the seeds like this:
random.seed(args.SEED)
np.random.seed(args.SEED)
torch.manual_seed(args.SEED)
torch.cuda.manual_seed_all(args.SEED)
###
history = pd.DataFrame(history_dict)
df_annotated = history.copy()
if not df_annotated.empty:
# filter now per models selection
df_annotated = df_annotated[df_annotated['model'].str.lower().isin([model.lower() for model in ModelsSelection])]
if df_annotated.empty:
html_output = f"<div class='gr-textbox' style='white-space: pre-wrap; overflow-wrap: break-word; padding: 10px; border: 1px solid #ddd; border-radius: 5px; font-family: monospace; font-size: 12px; line-height: 24px;'>{text}</div>"
return {"text": text, "entities": []}, html_output, history.to_dict()
df_annotated_combined = entitiesFusion(df_annotated,args)
if df_annotated_combined.empty:
html_output = f"<div class='gr-textbox' style='white-space: pre-wrap; overflow-wrap: break-word; padding: 10px; border: 1px solid #ddd; border-radius: 5px; font-family: monospace; font-size: 12px; line-height: 24px;'>{text}</div>"
return {"text": text, "entities": []}, html_output, history.to_dict()
else:
df_annotated_combined = is_cross_inside(df_annotated_combined, args, 0.999) #I cut all the cross inside with the 0.99. to avoid the linking
cache_prefix_fp = "LLMQUERYNER"
cache_nameLLMs = cache_prefix_fp + "___" + "__".join(
[args.service_provider, args.model_name, str(args.temperature)]).replace(
" ", "_") + ".json"
load_map_query_input_output = None
if strtobool(args.USE_CACHE):
if os.path.exists(cache_nameLLMs):
with open(cache_nameLLMs) as f:
load_map_query_input_output = json.load(f)
else:
load_map_query_input_output = {}
### entity linking part:
if strtobool(args.entity_linking):
cache_map_geonames = None
if strtobool(args.USE_CACHE):
cache_filename = "CACHE_geonames.json"
if os.path.exists(cache_filename):
with open(cache_filename) as f:
cache_map_geonames = json.load(f)
else:
cache_map_geonames = {}
#key_geonames = ""
#if args.geonameskey_filename:
# fkeyname = args.geonameskey_filename
# with open(fkeyname) as f:
# key_geonames = f.read()
key_geonames = os.environ['key_geonames']
cache_map_virtuoso = None
if strtobool(args.USE_CACHE):
cacheVirtuoso_filename = "CACHE_virtuoso.json"
if os.path.exists(cacheVirtuoso_filename):
with open(cacheVirtuoso_filename) as f:
cache_map_virtuoso = json.load(f)
else:
cache_map_virtuoso = {}
#key_virtuoso = ""
#if args.virtuosokey_filename:
# fkeyname = args.virtuosokey_filename
# with open(fkeyname) as f:
# key_virtuoso = f.read()
key_virtuoso = os.environ['key_virtuoso']
df_annotated_combined, cache_map_geonames_AFTER, cache_map_virtuoso_AFTER, load_map_query_input_output_AFTER = elinking(df_annotated_combined,
text_splitter, args, key_geonames,
cache_map_geonames,
key_virtuoso,
cache_map_virtuoso,
load_map_query_input_output,
device)
if strtobool(args.USE_CACHE):
if cache_map_geonames_AFTER is not None:
with open(cache_filename, "w") as f:
json.dump(cache_map_geonames_AFTER, f)
if cache_map_virtuoso_AFTER is not None:
with open(cacheVirtuoso_filename, "w") as f:
json.dump(cache_map_virtuoso_AFTER, f)
if load_map_query_input_output_AFTER is not None:
with open(cache_nameLLMs, "w") as f:
json.dump(load_map_query_input_output_AFTER, f)
### end entity linking part
### filter by selected category only
# #df_annotated_combined = df_annotated_combined[df_annotated_combined['entity_group'].str.lower().isin([cat.lower() for cat in CategoriesSelection])]
# if "MED" in CategoriesSelection:
# filter_mask = df_annotated_combined['entity_group'].str.lower().isin(
# [cat.lower() for cat in CategoriesSelection]) | (df_annotated_combined['IsBio'] == 1)
# else:
# filter_mask = df_annotated_combined['entity_group'].str.lower().isin(
# [cat.lower() for cat in CategoriesSelection])
# df_annotated_combined = df_annotated_combined[filter_mask]
#
# if "MED" in CategoriesSelection:
# filter_mask = df_annotated_combined['entity_group'].str.lower().isin(
# [cat.lower() for cat in CategoriesSelection]) | (df_annotated_combined['IsBio'] == 1)
# elif "OTHER" in CategoriesSelection:
# filter_mask = ~(
# df_annotated_combined['entity_group'].str.lower().isin([cat.lower() for cat in categories_List]))
# else:
# filter_mask = df_annotated_combined['entity_group'].str.lower().isin(
# [cat.lower() for cat in CategoriesSelection])
filter_mask = df_annotated_combined['entity_group'].str.lower().isin(
[cat.lower() for cat in CategoriesSelection])
if "MED" in CategoriesSelection:
filter_mask |= df_annotated_combined['entity_group'].str.lower().isin(
[cat.lower() for cat in CategoriesSelection]) | (df_annotated_combined['IsBio'] == 1)
if "MISC" in CategoriesSelection:
#filter_mask |= ~(df_annotated_combined['entity_group'].str.lower().isin([cat.lower() for cat in categories_List]))
filter_mask |= ~(df_annotated_combined['entity_group'].str.lower().isin([cat.lower() for cat in categories_List])) & ~(df_annotated_combined['IsBio'] == 1) # with this cluase, I'm including not only the categories labelled as MISC, but also the other that are not MED, PER, ORG, LOC
df_annotated_combined = df_annotated_combined[filter_mask]
if df_annotated_combined.empty:
html_output = f"<div class='gr-textbox' style='white-space: pre-wrap; overflow-wrap: break-word; padding: 10px; border: 1px solid #ddd; border-radius: 5px; font-family: monospace; font-size: 12px; line-height: 24px;'>{text}</div>"
return {"text": text, "entities": []}, html_output, history.to_dict()
###
#df_annotated_combined = is_cross_inside(df_annotated_combined, args)
if 'IsCrossInside' in df_annotated_combined.columns:
df_annotated_combined = df_annotated_combined[df_annotated_combined['IsCrossInside'] != 1]
if df_annotated_combined.empty:
html_output = f"<div class='gr-textbox' style='white-space: pre-wrap; overflow-wrap: break-word; padding: 10px; border: 1px solid #ddd; border-radius: 5px; font-family: monospace; font-size: 12px; line-height: 24px;'>{text}</div>"
return {"text": text, "entities": []}, html_output, history.to_dict()
dict_annotated_combined_NER = df_annotated_combined[["end", "entity_group", "score", "start", "word"]].to_dict(orient="records")
### continue linking part:
if strtobool(args.entity_linking):
# ##### this is to pass the links:
# Create a new column for the entities with links
# #df_annotated_combined['entity_with_link'] = df_annotated_combined.apply(lambda row: f"<a href='{row['namedEntity']}'>{row['word']}</a>", axis=1)
# df_annotated_combined['entity_with_link'] = df_annotated_combined.apply(
# lambda row: f"<a href='{row['namedEntity']}'>{row['word']}</a>" if pd.notnull(row['namedEntity']) else row[
# 'word'], axis=1)
#include the expl-rel prefix:
df_annotated_combined['entity_with_link'] = df_annotated_combined.apply(
lambda row: f"<a href='https://expl-rels-dev-vast.apps.ocpt.jrc.ec.europa.eu/?concept={row['namedEntity']}'>{row['word']}</a>" if pd.notnull(row['namedEntity']) else row[
'word'], axis=1)
# Create a new dictionary with the entity information and the link
dict_annotated_combined_NEL = df_annotated_combined[
["end", "entity_group", "score", "start", "entity_with_link"]].to_dict(orient="records")
# Sort the entities by their start index
dict_annotated_combined_NEL.sort(key=lambda x: x['start'])
# Create a dictionary to map entity groups to colors
entity_colors = {
"MED": "#E6E6E6",
"PER": "#FFC0CB",
"ORG": "#C6F4D6",
"LOC": "#FFFFCC",
"MISC": "#F5DEB3"
}
text_with_links = text
offset = 0
for entity in dict_annotated_combined_NEL:
start = entity["start"] + offset
end = entity["end"] + offset
entity_text = entity["entity_with_link"]
text_with_links = text_with_links[:start] + entity_text + text_with_links[end:]
offset += len(entity_text) - (end - start)
# # Create the text with entities highlighted and linked
# text_with_links = text
# offset = 0
# for entity in dict_annotated_combined_NEL:
# start = entity["start"] + offset
# end = entity["end"] + offset
# entity_text = entity["entity_with_link"]
# entity_group = entity["entity_group"]
#
# color = entity_colors.get(entity_group, "#dbeafe") # Default
# darker_color = "#008080"
#
# if "https:" in entity_text:
# text_with_links = text_with_links[
# :start] + f'<span style="background-color: {color}; border-radius: 2px; padding: 2px 4px"><a style="color: {darker_color}" href="{entity_text.split(">")[1].split("<")[0]}">{entity_text.split(">")[1].split("<")[0]}</a> <span style="color: {darker_color}; font-size: 0.8em">{entity_group}</span></span>' + text_with_links[
# end:]
# offset += len(
# f'<span style="background-color: {color}; border-radius: 2px; padding: 2px 4px"><a style="color: {darker_color}" href="{entity_text.split(">")[1].split("<")[0]}">{entity_text.split(">")[1].split("<")[0]}</a> <span style="color: {darker_color}; font-size: 0.8em">{entity_group}</span></span>') - (
# end - start)
# # text_with_links = text_with_links[:start] + f'<span style="background-color: {color}"><a href="{entity_text.split(">")[1].split("<")[0]}">{entity_text.split(">")[1].split("<")[0]}</a></span>' + text_with_links[end:]
# # offset += len(
# # f'<span style="background-color: {color}"><a href="{entity_text.split(">")[1].split("<")[0]}">{entity_text.split(">")[1].split("<")[0]}</a></span>') - (
# # end - start)
# #
# # text_with_links = text_with_links[:start] + entity_text + text_with_links[end:]
# # offset += len(entity_text) - (end - start)
# else:
# text_with_links = text_with_links[
# :start] + f'<span style="background-color: {color}; border-radius: 2px; padding: 2px 4px">{entity_text} <span style="color: {darker_color}; font-size: 0.8em">{entity_group}</span></span>' + text_with_links[end:]
# offset += len(
# f'<span style="background-color: {color}; border-radius: 2px; padding: 2px 4px">{entity_text} <span style="color: {darker_color}; font-size: 0.8em">{entity_group}</span></span>') - (
# end - start)
# # text_with_links = text_with_links[
# # :start] + f'<span style="background-color: {color}">{entity_text}</span>' + text_with_links[
# # end:]
# # offset += len(
# # f'<span style="background-color: {color}">{entity_text}</span>') - (end - start)
html_output = f"<div class='gr-textbox' style='white-space: pre-wrap; overflow-wrap: break-word; padding: 10px; border: 1px solid #ddd; border-radius: 5px; font-family: monospace; font-size: 12px; line-height: 24px;'>{text_with_links}</div>"
return {"text": text, "entities": dict_annotated_combined_NER}, html_output, history.to_dict()
else:
html_output = f"<div class='gr-textbox' style='white-space: pre-wrap; overflow-wrap: break-word; padding: 10px; border: 1px solid #ddd; border-radius: 5px; font-family: monospace; font-size: 12px; line-height: 24px;'>{text}</div>"
return {"text": text, "entities": dict_annotated_combined_NER}, html_output, history.to_dict()
else:
html_output = f"<div class='gr-textbox' style='white-space: pre-wrap; overflow-wrap: break-word; padding: 10px; border: 1px solid #ddd; border-radius: 5px; font-family: monospace; font-size: 12px; line-height: 24px;'>{text}</div>"
return {"text": text, "entities": []}, html_output, history.to_dict()
demo = gr.Interface(
fn=nerBio,
inputs=[
gr.Textbox(label= "Input text", placeholder="Enter text here..."),
gr.CheckboxGroup(models_List, label="ModelsSelection", value=models_List),
gr.CheckboxGroup(categories_List, label="CategoriesSelection", value=categories_List),
gr.Slider(minimum=0, maximum=1.0, step=0.1, label="Score", value=0.5),
gr.Checkbox(label="Enable Named-Entity Linking (NEL)", value=False), #True False
gr.State(value={})
],
outputs=[
gr.HighlightedText(label="Annotated Text"),
gr.HTML(label="Linked Text", show_label=True, visible=True), # use gr.HTML to render the annotated text with links , visible
gr.State()
],
live=True,
title="Named-Entity Recognition (NER) and Linking (NEL)",
description="Select one or more NER models and enter some text to get it processed. Please select also the entity categories you want to extract, as well as the score to use as a threshold for the NER extraction. Finally, select whether you want to perform Named-Entity Linking (NEL) and if you want to enable the cache for this process. ",
examples=examples,
cache_examples=False
)
demo.launch()
#demo.launch(share=True) # Share your demo with just 1 extra parameter