repo
stringlengths 2
91
| file
stringlengths 14
211
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
1.36M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ColBERT | ColBERT-master/colbert/__init__.py | 0 | 0 | 0 | py |
|
ColBERT | ColBERT-master/colbert/rerank.py | import os
import random
from colbert.utils.parser import Arguments
from colbert.utils.runs import Run
from colbert.evaluation.loaders import load_colbert, load_qrels, load_queries, load_topK_pids
from colbert.ranking.reranking import rerank
from colbert.ranking.batch_reranking import batch_rerank
def main():
random.seed(12345)
parser = Arguments(description='Re-ranking over a ColBERT index')
parser.add_model_parameters()
parser.add_model_inference_parameters()
parser.add_reranking_input()
parser.add_index_use_input()
parser.add_argument('--step', dest='step', default=1, type=int)
parser.add_argument('--part-range', dest='part_range', default=None, type=str)
parser.add_argument('--log-scores', dest='log_scores', default=False, action='store_true')
parser.add_argument('--batch', dest='batch', default=False, action='store_true')
parser.add_argument('--depth', dest='depth', default=1000, type=int)
args = parser.parse()
if args.part_range:
part_offset, part_endpos = map(int, args.part_range.split('..'))
args.part_range = range(part_offset, part_endpos)
with Run.context():
args.colbert, args.checkpoint = load_colbert(args)
args.queries = load_queries(args.queries)
args.qrels = load_qrels(args.qrels)
args.topK_pids, args.qrels = load_topK_pids(args.topK, qrels=args.qrels)
args.index_path = os.path.join(args.index_root, args.index_name)
if args.batch:
batch_rerank(args)
else:
rerank(args)
if __name__ == "__main__":
main()
| 1,609 | 30.568627 | 94 | py |
ColBERT | ColBERT-master/colbert/train.py | import os
import random
import torch
import copy
import colbert.utils.distributed as distributed
from colbert.utils.parser import Arguments
from colbert.utils.runs import Run
from colbert.training.training import train
def main():
parser = Arguments(description='Training ColBERT with <query, positive passage, negative passage> triples.')
parser.add_model_parameters()
parser.add_model_training_parameters()
parser.add_training_input()
args = parser.parse()
assert args.bsize % args.accumsteps == 0, ((args.bsize, args.accumsteps),
"The batch size must be divisible by the number of gradient accumulation steps.")
assert args.query_maxlen <= 512
assert args.doc_maxlen <= 512
args.lazy = args.collection is not None
with Run.context(consider_failed_if_interrupted=False):
train(args)
if __name__ == "__main__":
main()
| 929 | 25.571429 | 128 | py |
ColBERT | ColBERT-master/colbert/index.py | import os
import ujson
import random
from colbert.utils.runs import Run
from colbert.utils.parser import Arguments
import colbert.utils.distributed as distributed
from colbert.utils.utils import print_message, create_directory
from colbert.indexing.encoder import CollectionEncoder
def main():
random.seed(12345)
parser = Arguments(description='Precomputing document representations with ColBERT.')
parser.add_model_parameters()
parser.add_model_inference_parameters()
parser.add_indexing_input()
parser.add_argument('--chunksize', dest='chunksize', default=6.0, required=False, type=float) # in GiBs
args = parser.parse()
with Run.context():
args.index_path = os.path.join(args.index_root, args.index_name)
assert not os.path.exists(args.index_path), args.index_path
distributed.barrier(args.rank)
if args.rank < 1:
create_directory(args.index_root)
create_directory(args.index_path)
distributed.barrier(args.rank)
process_idx = max(0, args.rank)
encoder = CollectionEncoder(args, process_idx=process_idx, num_processes=args.nranks)
encoder.encode()
distributed.barrier(args.rank)
# Save metadata.
if args.rank < 1:
metadata_path = os.path.join(args.index_path, 'metadata.json')
print_message("Saving (the following) metadata to", metadata_path, "..")
print(args.input_arguments)
with open(metadata_path, 'w') as output_metadata:
ujson.dump(args.input_arguments.__dict__, output_metadata)
distributed.barrier(args.rank)
if __name__ == "__main__":
main()
# TODO: Add resume functionality
| 1,730 | 27.85 | 109 | py |
ColBERT | ColBERT-master/colbert/evaluation/ranking_logger.py | import os
from contextlib import contextmanager
from colbert.utils.utils import print_message, NullContextManager
from colbert.utils.runs import Run
class RankingLogger():
def __init__(self, directory, qrels=None, log_scores=False):
self.directory = directory
self.qrels = qrels
self.filename, self.also_save_annotations = None, None
self.log_scores = log_scores
@contextmanager
def context(self, filename, also_save_annotations=False):
assert self.filename is None
assert self.also_save_annotations is None
filename = os.path.join(self.directory, filename)
self.filename, self.also_save_annotations = filename, also_save_annotations
print_message("#> Logging ranked lists to {}".format(self.filename))
with open(filename, 'w') as f:
self.f = f
with (open(filename + '.annotated', 'w') if also_save_annotations else NullContextManager()) as g:
self.g = g
try:
yield self
finally:
pass
def log(self, qid, ranking, is_ranked=True, print_positions=[]):
print_positions = set(print_positions)
f_buffer = []
g_buffer = []
for rank, (score, pid, passage) in enumerate(ranking):
is_relevant = self.qrels and int(pid in self.qrels[qid])
rank = rank+1 if is_ranked else -1
possibly_score = [score] if self.log_scores else []
f_buffer.append('\t'.join([str(x) for x in [qid, pid, rank] + possibly_score]) + "\n")
if self.g:
g_buffer.append('\t'.join([str(x) for x in [qid, pid, rank, is_relevant]]) + "\n")
if rank in print_positions:
prefix = "** " if is_relevant else ""
prefix += str(rank)
print("#> ( QID {} ) ".format(qid) + prefix + ") ", pid, ":", score, ' ', passage)
self.f.write(''.join(f_buffer))
if self.g:
self.g.write(''.join(g_buffer))
| 2,062 | 34.568966 | 110 | py |
ColBERT | ColBERT-master/colbert/evaluation/loaders.py | import os
import ujson
import torch
import random
from collections import defaultdict, OrderedDict
from colbert.parameters import DEVICE
from colbert.modeling.colbert import ColBERT
from colbert.utils.utils import print_message, load_checkpoint
from colbert.evaluation.load_model import load_model
from colbert.utils.runs import Run
def load_queries(queries_path):
queries = OrderedDict()
print_message("#> Loading the queries from", queries_path, "...")
with open(queries_path) as f:
for line in f:
qid, query, *_ = line.strip().split('\t')
qid = int(qid)
assert (qid not in queries), ("Query QID", qid, "is repeated!")
queries[qid] = query
print_message("#> Got", len(queries), "queries. All QIDs are unique.\n")
return queries
def load_qrels(qrels_path):
if qrels_path is None:
return None
print_message("#> Loading qrels from", qrels_path, "...")
qrels = OrderedDict()
with open(qrels_path, mode='r', encoding="utf-8") as f:
for line in f:
qid, x, pid, y = map(int, line.strip().split('\t'))
assert x == 0 and y == 1
qrels[qid] = qrels.get(qid, [])
qrels[qid].append(pid)
assert all(len(qrels[qid]) == len(set(qrels[qid])) for qid in qrels)
avg_positive = round(sum(len(qrels[qid]) for qid in qrels) / len(qrels), 2)
print_message("#> Loaded qrels for", len(qrels), "unique queries with",
avg_positive, "positives per query on average.\n")
return qrels
def load_topK(topK_path):
queries = OrderedDict()
topK_docs = OrderedDict()
topK_pids = OrderedDict()
print_message("#> Loading the top-k per query from", topK_path, "...")
with open(topK_path) as f:
for line_idx, line in enumerate(f):
if line_idx and line_idx % (10*1000*1000) == 0:
print(line_idx, end=' ', flush=True)
qid, pid, query, passage = line.split('\t')
qid, pid = int(qid), int(pid)
assert (qid not in queries) or (queries[qid] == query)
queries[qid] = query
topK_docs[qid] = topK_docs.get(qid, [])
topK_docs[qid].append(passage)
topK_pids[qid] = topK_pids.get(qid, [])
topK_pids[qid].append(pid)
print()
assert all(len(topK_pids[qid]) == len(set(topK_pids[qid])) for qid in topK_pids)
Ks = [len(topK_pids[qid]) for qid in topK_pids]
print_message("#> max(Ks) =", max(Ks), ", avg(Ks) =", round(sum(Ks) / len(Ks), 2))
print_message("#> Loaded the top-k per query for", len(queries), "unique queries.\n")
return queries, topK_docs, topK_pids
def load_topK_pids(topK_path, qrels):
topK_pids = defaultdict(list)
topK_positives = defaultdict(list)
print_message("#> Loading the top-k PIDs per query from", topK_path, "...")
with open(topK_path) as f:
for line_idx, line in enumerate(f):
if line_idx and line_idx % (10*1000*1000) == 0:
print(line_idx, end=' ', flush=True)
qid, pid, *rest = line.strip().split('\t')
qid, pid = int(qid), int(pid)
topK_pids[qid].append(pid)
assert len(rest) in [1, 2, 3]
if len(rest) > 1:
*_, label = rest
label = int(label)
assert label in [0, 1]
if label >= 1:
topK_positives[qid].append(pid)
print()
assert all(len(topK_pids[qid]) == len(set(topK_pids[qid])) for qid in topK_pids)
assert all(len(topK_positives[qid]) == len(set(topK_positives[qid])) for qid in topK_positives)
# Make them sets for fast lookups later
topK_positives = {qid: set(topK_positives[qid]) for qid in topK_positives}
Ks = [len(topK_pids[qid]) for qid in topK_pids]
print_message("#> max(Ks) =", max(Ks), ", avg(Ks) =", round(sum(Ks) / len(Ks), 2))
print_message("#> Loaded the top-k per query for", len(topK_pids), "unique queries.\n")
if len(topK_positives) == 0:
topK_positives = None
else:
assert len(topK_pids) >= len(topK_positives)
for qid in set.difference(set(topK_pids.keys()), set(topK_positives.keys())):
topK_positives[qid] = []
assert len(topK_pids) == len(topK_positives)
avg_positive = round(sum(len(topK_positives[qid]) for qid in topK_positives) / len(topK_pids), 2)
print_message("#> Concurrently got annotations for", len(topK_positives), "unique queries with",
avg_positive, "positives per query on average.\n")
assert qrels is None or topK_positives is None, "Cannot have both qrels and an annotated top-K file!"
if topK_positives is None:
topK_positives = qrels
return topK_pids, topK_positives
def load_collection(collection_path):
print_message("#> Loading collection...")
collection = []
with open(collection_path) as f:
for line_idx, line in enumerate(f):
if line_idx % (1000*1000) == 0:
print(f'{line_idx // 1000 // 1000}M', end=' ', flush=True)
pid, passage, *rest = line.strip().split('\t')
assert pid == 'id' or int(pid) == line_idx
if len(rest) >= 1:
title = rest[0]
passage = title + ' | ' + passage
collection.append(passage)
print()
return collection
def load_colbert(args, do_print=True):
colbert, checkpoint = load_model(args, do_print)
# TODO: If the parameters below were not specified on the command line, their *checkpoint* values should be used.
# I.e., not their purely (i.e., training) default values.
for k in ['query_maxlen', 'doc_maxlen', 'dim', 'similarity', 'amp']:
if 'arguments' in checkpoint and hasattr(args, k):
if k in checkpoint['arguments'] and checkpoint['arguments'][k] != getattr(args, k):
a, b = checkpoint['arguments'][k], getattr(args, k)
Run.warn(f"Got checkpoint['arguments']['{k}'] != args.{k} (i.e., {a} != {b})")
if 'arguments' in checkpoint:
if args.rank < 1:
print(ujson.dumps(checkpoint['arguments'], indent=4))
if do_print:
print('\n')
return colbert, checkpoint
| 6,329 | 31.13198 | 117 | py |
ColBERT | ColBERT-master/colbert/evaluation/load_model.py | import os
import ujson
import torch
import random
from collections import defaultdict, OrderedDict
from colbert.parameters import DEVICE
from colbert.modeling.colbert import ColBERT
from colbert.utils.utils import print_message, load_checkpoint
def load_model(args, do_print=True):
colbert = ColBERT.from_pretrained('bert-base-uncased',
query_maxlen=args.query_maxlen,
doc_maxlen=args.doc_maxlen,
dim=args.dim,
similarity_metric=args.similarity,
mask_punctuation=args.mask_punctuation)
colbert = colbert.to(DEVICE)
print_message("#> Loading model checkpoint.", condition=do_print)
checkpoint = load_checkpoint(args.checkpoint, colbert, do_print=do_print)
colbert.eval()
return colbert, checkpoint
| 919 | 30.724138 | 77 | py |
ColBERT | ColBERT-master/colbert/evaluation/slow.py | import os
def slow_rerank(args, query, pids, passages):
colbert = args.colbert
inference = args.inference
Q = inference.queryFromText([query])
D_ = inference.docFromText(passages, bsize=args.bsize)
scores = colbert.score(Q, D_).cpu()
scores = scores.sort(descending=True)
ranked = scores.indices.tolist()
ranked_scores = scores.values.tolist()
ranked_pids = [pids[position] for position in ranked]
ranked_passages = [passages[position] for position in ranked]
assert len(ranked_pids) == len(set(ranked_pids))
return list(zip(ranked_scores, ranked_pids, ranked_passages))
| 626 | 27.5 | 65 | py |
ColBERT | ColBERT-master/colbert/evaluation/metrics.py | import ujson
from collections import defaultdict
from colbert.utils.runs import Run
class Metrics:
def __init__(self, mrr_depths: set, recall_depths: set, success_depths: set, total_queries=None):
self.results = {}
self.mrr_sums = {depth: 0.0 for depth in mrr_depths}
self.recall_sums = {depth: 0.0 for depth in recall_depths}
self.success_sums = {depth: 0.0 for depth in success_depths}
self.total_queries = total_queries
self.max_query_idx = -1
self.num_queries_added = 0
def add(self, query_idx, query_key, ranking, gold_positives):
self.num_queries_added += 1
assert query_key not in self.results
assert len(self.results) <= query_idx
assert len(set(gold_positives)) == len(gold_positives)
assert len(set([pid for _, pid, _ in ranking])) == len(ranking)
self.results[query_key] = ranking
positives = [i for i, (_, pid, _) in enumerate(ranking) if pid in gold_positives]
if len(positives) == 0:
return
for depth in self.mrr_sums:
first_positive = positives[0]
self.mrr_sums[depth] += (1.0 / (first_positive+1.0)) if first_positive < depth else 0.0
for depth in self.success_sums:
first_positive = positives[0]
self.success_sums[depth] += 1.0 if first_positive < depth else 0.0
for depth in self.recall_sums:
num_positives_up_to_depth = len([pos for pos in positives if pos < depth])
self.recall_sums[depth] += num_positives_up_to_depth / len(gold_positives)
def print_metrics(self, query_idx):
for depth in sorted(self.mrr_sums):
print("MRR@" + str(depth), "=", self.mrr_sums[depth] / (query_idx+1.0))
for depth in sorted(self.success_sums):
print("Success@" + str(depth), "=", self.success_sums[depth] / (query_idx+1.0))
for depth in sorted(self.recall_sums):
print("Recall@" + str(depth), "=", self.recall_sums[depth] / (query_idx+1.0))
def log(self, query_idx):
assert query_idx >= self.max_query_idx
self.max_query_idx = query_idx
Run.log_metric("ranking/max_query_idx", query_idx, query_idx)
Run.log_metric("ranking/num_queries_added", self.num_queries_added, query_idx)
for depth in sorted(self.mrr_sums):
score = self.mrr_sums[depth] / (query_idx+1.0)
Run.log_metric("ranking/MRR." + str(depth), score, query_idx)
for depth in sorted(self.success_sums):
score = self.success_sums[depth] / (query_idx+1.0)
Run.log_metric("ranking/Success." + str(depth), score, query_idx)
for depth in sorted(self.recall_sums):
score = self.recall_sums[depth] / (query_idx+1.0)
Run.log_metric("ranking/Recall." + str(depth), score, query_idx)
def output_final_metrics(self, path, query_idx, num_queries):
assert query_idx + 1 == num_queries
assert num_queries == self.total_queries
if self.max_query_idx < query_idx:
self.log(query_idx)
self.print_metrics(query_idx)
output = defaultdict(dict)
for depth in sorted(self.mrr_sums):
score = self.mrr_sums[depth] / (query_idx+1.0)
output['mrr'][depth] = score
for depth in sorted(self.success_sums):
score = self.success_sums[depth] / (query_idx+1.0)
output['success'][depth] = score
for depth in sorted(self.recall_sums):
score = self.recall_sums[depth] / (query_idx+1.0)
output['recall'][depth] = score
with open(path, 'w') as f:
ujson.dump(output, f, indent=4)
f.write('\n')
def evaluate_recall(qrels, queries, topK_pids):
if qrels is None:
return
assert set(qrels.keys()) == set(queries.keys())
recall_at_k = [len(set.intersection(set(qrels[qid]), set(topK_pids[qid]))) / max(1.0, len(qrels[qid]))
for qid in qrels]
recall_at_k = sum(recall_at_k) / len(qrels)
recall_at_k = round(recall_at_k, 3)
print("Recall @ maximum depth =", recall_at_k)
# TODO: If implicit qrels are used (for re-ranking), warn if a recall metric is requested + add an asterisk to output.
| 4,304 | 36.434783 | 118 | py |
ColBERT | ColBERT-master/colbert/evaluation/__init__.py | 0 | 0 | 0 | py |
|
ColBERT | ColBERT-master/colbert/evaluation/ranking.py | import os
import random
import time
import torch
import torch.nn as nn
from itertools import accumulate
from math import ceil
from colbert.utils.runs import Run
from colbert.utils.utils import print_message
from colbert.evaluation.metrics import Metrics
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.modeling.inference import ModelInference
from colbert.evaluation.slow import slow_rerank
def evaluate(args):
args.inference = ModelInference(args.colbert, amp=args.amp)
qrels, queries, topK_pids = args.qrels, args.queries, args.topK_pids
depth = args.depth
collection = args.collection
if collection is None:
topK_docs = args.topK_docs
def qid2passages(qid):
if collection is not None:
return [collection[pid] for pid in topK_pids[qid][:depth]]
else:
return topK_docs[qid][:depth]
metrics = Metrics(mrr_depths={10, 100}, recall_depths={50, 200, 1000},
success_depths={5, 10, 20, 50, 100, 1000},
total_queries=len(queries))
ranking_logger = RankingLogger(Run.path, qrels=qrels)
args.milliseconds = []
with ranking_logger.context('ranking.tsv', also_save_annotations=(qrels is not None)) as rlogger:
with torch.no_grad():
keys = sorted(list(queries.keys()))
random.shuffle(keys)
for query_idx, qid in enumerate(keys):
query = queries[qid]
print_message(query_idx, qid, query, '\n')
if qrels and args.shortcircuit and len(set.intersection(set(qrels[qid]), set(topK_pids[qid]))) == 0:
continue
ranking = slow_rerank(args, query, topK_pids[qid], qid2passages(qid))
rlogger.log(qid, ranking, [0, 1])
if qrels:
metrics.add(query_idx, qid, ranking, qrels[qid])
for i, (score, pid, passage) in enumerate(ranking):
if pid in qrels[qid]:
print("\n#> Found", pid, "at position", i+1, "with score", score)
print(passage)
break
metrics.print_metrics(query_idx)
metrics.log(query_idx)
print_message("#> checkpoint['batch'] =", args.checkpoint['batch'], '\n')
print("rlogger.filename =", rlogger.filename)
if len(args.milliseconds) > 1:
print('Slow-Ranking Avg Latency =', sum(args.milliseconds[1:]) / len(args.milliseconds[1:]))
print("\n\n")
print("\n\n")
# print('Avg Latency =', sum(args.milliseconds[1:]) / len(args.milliseconds[1:]))
print("\n\n")
print('\n\n')
if qrels:
assert query_idx + 1 == len(keys) == len(set(keys))
metrics.output_final_metrics(os.path.join(Run.path, 'ranking.metrics'), query_idx, len(queries))
print('\n\n')
| 2,993 | 32.640449 | 116 | py |
ColBERT | ColBERT-master/colbert/indexing/loaders.py | import os
import torch
import ujson
from math import ceil
from itertools import accumulate
from colbert.utils.utils import print_message
def get_parts(directory):
extension = '.pt'
parts = sorted([int(filename[: -1 * len(extension)]) for filename in os.listdir(directory)
if filename.endswith(extension)])
assert list(range(len(parts))) == parts, parts
# Integer-sortedness matters.
parts_paths = [os.path.join(directory, '{}{}'.format(filename, extension)) for filename in parts]
samples_paths = [os.path.join(directory, '{}.sample'.format(filename)) for filename in parts]
return parts, parts_paths, samples_paths
def load_doclens(directory, flatten=True):
parts, _, _ = get_parts(directory)
doclens_filenames = [os.path.join(directory, 'doclens.{}.json'.format(filename)) for filename in parts]
all_doclens = [ujson.load(open(filename)) for filename in doclens_filenames]
if flatten:
all_doclens = [x for sub_doclens in all_doclens for x in sub_doclens]
return all_doclens
| 1,064 | 29.428571 | 107 | py |
ColBERT | ColBERT-master/colbert/indexing/faiss.py | import os
import math
import faiss
import torch
import numpy as np
import threading
import queue
from colbert.utils.utils import print_message, grouper
from colbert.indexing.loaders import get_parts
from colbert.indexing.index_manager import load_index_part
from colbert.indexing.faiss_index import FaissIndex
def get_faiss_index_name(args, offset=None, endpos=None):
partitions_info = '' if args.partitions is None else f'.{args.partitions}'
range_info = '' if offset is None else f'.{offset}-{endpos}'
return f'ivfpq{partitions_info}{range_info}.faiss'
def load_sample(samples_paths, sample_fraction=None):
sample = []
for filename in samples_paths:
print_message(f"#> Loading {filename} ...")
part = load_index_part(filename)
if sample_fraction:
part = part[torch.randint(0, high=part.size(0), size=(int(part.size(0) * sample_fraction),))]
sample.append(part)
sample = torch.cat(sample).float().numpy()
print("#> Sample has shape", sample.shape)
return sample
def prepare_faiss_index(slice_samples_paths, partitions, sample_fraction=None):
training_sample = load_sample(slice_samples_paths, sample_fraction=sample_fraction)
dim = training_sample.shape[-1]
index = FaissIndex(dim, partitions)
print_message("#> Training with the vectors...")
index.train(training_sample)
print_message("Done training!\n")
return index
SPAN = 3
def index_faiss(args):
print_message("#> Starting..")
parts, parts_paths, samples_paths = get_parts(args.index_path)
if args.sample is not None:
assert args.sample, args.sample
print_message(f"#> Training with {round(args.sample * 100.0, 1)}% of *all* embeddings (provided --sample).")
samples_paths = parts_paths
num_parts_per_slice = math.ceil(len(parts) / args.slices)
for slice_idx, part_offset in enumerate(range(0, len(parts), num_parts_per_slice)):
part_endpos = min(part_offset + num_parts_per_slice, len(parts))
slice_parts_paths = parts_paths[part_offset:part_endpos]
slice_samples_paths = samples_paths[part_offset:part_endpos]
if args.slices == 1:
faiss_index_name = get_faiss_index_name(args)
else:
faiss_index_name = get_faiss_index_name(args, offset=part_offset, endpos=part_endpos)
output_path = os.path.join(args.index_path, faiss_index_name)
print_message(f"#> Processing slice #{slice_idx+1} of {args.slices} (range {part_offset}..{part_endpos}).")
print_message(f"#> Will write to {output_path}.")
assert not os.path.exists(output_path), output_path
index = prepare_faiss_index(slice_samples_paths, args.partitions, args.sample)
loaded_parts = queue.Queue(maxsize=1)
def _loader_thread(thread_parts_paths):
for filenames in grouper(thread_parts_paths, SPAN, fillvalue=None):
sub_collection = [load_index_part(filename) for filename in filenames if filename is not None]
sub_collection = torch.cat(sub_collection)
sub_collection = sub_collection.float().numpy()
loaded_parts.put(sub_collection)
thread = threading.Thread(target=_loader_thread, args=(slice_parts_paths,))
thread.start()
print_message("#> Indexing the vectors...")
for filenames in grouper(slice_parts_paths, SPAN, fillvalue=None):
print_message("#> Loading", filenames, "(from queue)...")
sub_collection = loaded_parts.get()
print_message("#> Processing a sub_collection with shape", sub_collection.shape)
index.add(sub_collection)
print_message("Done indexing!")
index.save(output_path)
print_message(f"\n\nDone! All complete (for slice #{slice_idx+1} of {args.slices})!")
thread.join()
| 3,899 | 32.333333 | 116 | py |
ColBERT | ColBERT-master/colbert/indexing/index_manager.py | import torch
import faiss
import numpy as np
from colbert.utils.utils import print_message
class IndexManager():
def __init__(self, dim):
self.dim = dim
def save(self, tensor, path_prefix):
torch.save(tensor, path_prefix)
def load_index_part(filename, verbose=True):
part = torch.load(filename)
if type(part) == list: # for backward compatibility
part = torch.cat(part)
return part
| 435 | 17.956522 | 56 | py |
ColBERT | ColBERT-master/colbert/indexing/encoder.py | import os
import time
import torch
import ujson
import numpy as np
import itertools
import threading
import queue
from colbert.modeling.inference import ModelInference
from colbert.evaluation.loaders import load_colbert
from colbert.utils.utils import print_message
from colbert.indexing.index_manager import IndexManager
class CollectionEncoder():
def __init__(self, args, process_idx, num_processes):
self.args = args
self.collection = args.collection
self.process_idx = process_idx
self.num_processes = num_processes
assert 0.5 <= args.chunksize <= 128.0
max_bytes_per_file = args.chunksize * (1024*1024*1024)
max_bytes_per_doc = (self.args.doc_maxlen * self.args.dim * 2.0)
# Determine subset sizes for output
minimum_subset_size = 10_000
maximum_subset_size = max_bytes_per_file / max_bytes_per_doc
maximum_subset_size = max(minimum_subset_size, maximum_subset_size)
self.possible_subset_sizes = [int(maximum_subset_size)]
self.print_main("#> Local args.bsize =", args.bsize)
self.print_main("#> args.index_root =", args.index_root)
self.print_main(f"#> self.possible_subset_sizes = {self.possible_subset_sizes}")
self._load_model()
self.indexmgr = IndexManager(args.dim)
self.iterator = self._initialize_iterator()
def _initialize_iterator(self):
return open(self.collection)
def _saver_thread(self):
for args in iter(self.saver_queue.get, None):
self._save_batch(*args)
def _load_model(self):
self.colbert, self.checkpoint = load_colbert(self.args, do_print=(self.process_idx == 0))
self.colbert = self.colbert.cuda()
self.colbert.eval()
self.inference = ModelInference(self.colbert, amp=self.args.amp)
def encode(self):
self.saver_queue = queue.Queue(maxsize=3)
thread = threading.Thread(target=self._saver_thread)
thread.start()
t0 = time.time()
local_docs_processed = 0
for batch_idx, (offset, lines, owner) in enumerate(self._batch_passages(self.iterator)):
if owner != self.process_idx:
continue
t1 = time.time()
batch = self._preprocess_batch(offset, lines)
embs, doclens = self._encode_batch(batch_idx, batch)
t2 = time.time()
self.saver_queue.put((batch_idx, embs, offset, doclens))
t3 = time.time()
local_docs_processed += len(lines)
overall_throughput = compute_throughput(local_docs_processed, t0, t3)
this_encoding_throughput = compute_throughput(len(lines), t1, t2)
this_saving_throughput = compute_throughput(len(lines), t2, t3)
self.print(f'#> Completed batch #{batch_idx} (starting at passage #{offset}) \t\t'
f'Passages/min: {overall_throughput} (overall), ',
f'{this_encoding_throughput} (this encoding), ',
f'{this_saving_throughput} (this saving)')
self.saver_queue.put(None)
self.print("#> Joining saver thread.")
thread.join()
def _batch_passages(self, fi):
"""
Must use the same seed across processes!
"""
np.random.seed(0)
offset = 0
for owner in itertools.cycle(range(self.num_processes)):
batch_size = np.random.choice(self.possible_subset_sizes)
L = [line for _, line in zip(range(batch_size), fi)]
if len(L) == 0:
break # EOF
yield (offset, L, owner)
offset += len(L)
if len(L) < batch_size:
break # EOF
self.print("[NOTE] Done with local share.")
return
def _preprocess_batch(self, offset, lines):
endpos = offset + len(lines)
batch = []
for line_idx, line in zip(range(offset, endpos), lines):
line_parts = line.strip().split('\t')
pid, passage, *other = line_parts
assert len(passage) >= 1
if len(other) >= 1:
title, *_ = other
passage = title + ' | ' + passage
batch.append(passage)
assert pid == 'id' or int(pid) == line_idx
return batch
def _encode_batch(self, batch_idx, batch):
with torch.no_grad():
embs = self.inference.docFromText(batch, bsize=self.args.bsize, keep_dims=False)
assert type(embs) is list
assert len(embs) == len(batch)
local_doclens = [d.size(0) for d in embs]
embs = torch.cat(embs)
return embs, local_doclens
def _save_batch(self, batch_idx, embs, offset, doclens):
start_time = time.time()
output_path = os.path.join(self.args.index_path, "{}.pt".format(batch_idx))
output_sample_path = os.path.join(self.args.index_path, "{}.sample".format(batch_idx))
doclens_path = os.path.join(self.args.index_path, 'doclens.{}.json'.format(batch_idx))
# Save the embeddings.
self.indexmgr.save(embs, output_path)
self.indexmgr.save(embs[torch.randint(0, high=embs.size(0), size=(embs.size(0) // 20,))], output_sample_path)
# Save the doclens.
with open(doclens_path, 'w') as output_doclens:
ujson.dump(doclens, output_doclens)
throughput = compute_throughput(len(doclens), start_time, time.time())
self.print_main("#> Saved batch #{} to {} \t\t".format(batch_idx, output_path),
"Saving Throughput =", throughput, "passages per minute.\n")
def print(self, *args):
print_message("[" + str(self.process_idx) + "]", "\t\t", *args)
def print_main(self, *args):
if self.process_idx == 0:
self.print(*args)
def compute_throughput(size, t0, t1):
throughput = size / (t1 - t0) * 60
if throughput > 1000 * 1000:
throughput = throughput / (1000*1000)
throughput = round(throughput, 1)
return '{}M'.format(throughput)
throughput = throughput / (1000)
throughput = round(throughput, 1)
return '{}k'.format(throughput)
| 6,247 | 32.234043 | 117 | py |
ColBERT | ColBERT-master/colbert/indexing/faiss_index_gpu.py | """
Heavily based on: https://github.com/facebookresearch/faiss/blob/master/benchs/bench_gpu_1bn.py
"""
import sys
import time
import math
import faiss
import torch
import numpy as np
from colbert.utils.utils import print_message
class FaissIndexGPU():
def __init__(self):
self.ngpu = faiss.get_num_gpus()
if self.ngpu == 0:
return
self.tempmem = 1 << 33
self.max_add_per_gpu = 1 << 25
self.max_add = self.max_add_per_gpu * self.ngpu
self.add_batch_size = 65536
self.gpu_resources = self._prepare_gpu_resources()
def _prepare_gpu_resources(self):
print_message(f"Preparing resources for {self.ngpu} GPUs.")
gpu_resources = []
for _ in range(self.ngpu):
res = faiss.StandardGpuResources()
if self.tempmem >= 0:
res.setTempMemory(self.tempmem)
gpu_resources.append(res)
return gpu_resources
def _make_vres_vdev(self):
"""
return vectors of device ids and resources useful for gpu_multiple
"""
assert self.ngpu > 0
vres = faiss.GpuResourcesVector()
vdev = faiss.IntVector()
for i in range(self.ngpu):
vdev.push_back(i)
vres.push_back(self.gpu_resources[i])
return vres, vdev
def training_initialize(self, index, quantizer):
"""
The index and quantizer should be owned by caller.
"""
assert self.ngpu > 0
s = time.time()
self.index_ivf = faiss.extract_index_ivf(index)
self.clustering_index = faiss.index_cpu_to_all_gpus(quantizer)
self.index_ivf.clustering_index = self.clustering_index
print(time.time() - s)
def training_finalize(self):
assert self.ngpu > 0
s = time.time()
self.index_ivf.clustering_index = faiss.index_gpu_to_cpu(self.index_ivf.clustering_index)
print(time.time() - s)
def adding_initialize(self, index):
"""
The index should be owned by caller.
"""
assert self.ngpu > 0
self.co = faiss.GpuMultipleClonerOptions()
self.co.useFloat16 = True
self.co.useFloat16CoarseQuantizer = False
self.co.usePrecomputed = False
self.co.indicesOptions = faiss.INDICES_CPU
self.co.verbose = True
self.co.reserveVecs = self.max_add
self.co.shard = True
assert self.co.shard_type in (0, 1, 2)
self.vres, self.vdev = self._make_vres_vdev()
self.gpu_index = faiss.index_cpu_to_gpu_multiple(self.vres, self.vdev, index, self.co)
def add(self, index, data, offset):
assert self.ngpu > 0
t0 = time.time()
nb = data.shape[0]
for i0 in range(0, nb, self.add_batch_size):
i1 = min(i0 + self.add_batch_size, nb)
xs = data[i0:i1]
self.gpu_index.add_with_ids(xs, np.arange(offset+i0, offset+i1))
if self.max_add > 0 and self.gpu_index.ntotal > self.max_add:
self._flush_to_cpu(index, nb, offset)
print('\r%d/%d (%.3f s) ' % (i0, nb, time.time() - t0), end=' ')
sys.stdout.flush()
if self.gpu_index.ntotal > 0:
self._flush_to_cpu(index, nb, offset)
assert index.ntotal == offset+nb, (index.ntotal, offset+nb, offset, nb)
print(f"add(.) time: %.3f s \t\t--\t\t index.ntotal = {index.ntotal}" % (time.time() - t0))
def _flush_to_cpu(self, index, nb, offset):
print("Flush indexes to CPU")
for i in range(self.ngpu):
index_src_gpu = faiss.downcast_index(self.gpu_index if self.ngpu == 1 else self.gpu_index.at(i))
index_src = faiss.index_gpu_to_cpu(index_src_gpu)
index_src.copy_subset_to(index, 0, offset, offset+nb)
index_src_gpu.reset()
index_src_gpu.reserveMemory(self.max_add)
if self.ngpu > 1:
try:
self.gpu_index.sync_with_shard_indexes()
except:
self.gpu_index.syncWithSubIndexes()
| 4,108 | 28.561151 | 108 | py |
ColBERT | ColBERT-master/colbert/indexing/faiss_index.py | import sys
import time
import math
import faiss
import torch
import numpy as np
from colbert.indexing.faiss_index_gpu import FaissIndexGPU
from colbert.utils.utils import print_message
class FaissIndex():
def __init__(self, dim, partitions):
self.dim = dim
self.partitions = partitions
self.gpu = FaissIndexGPU()
self.quantizer, self.index = self._create_index()
self.offset = 0
def _create_index(self):
quantizer = faiss.IndexFlatL2(self.dim) # faiss.IndexHNSWFlat(dim, 32)
index = faiss.IndexIVFPQ(quantizer, self.dim, self.partitions, 16, 8)
return quantizer, index
def train(self, train_data):
print_message(f"#> Training now (using {self.gpu.ngpu} GPUs)...")
if self.gpu.ngpu > 0:
self.gpu.training_initialize(self.index, self.quantizer)
s = time.time()
self.index.train(train_data)
print(time.time() - s)
if self.gpu.ngpu > 0:
self.gpu.training_finalize()
def add(self, data):
print_message(f"Add data with shape {data.shape} (offset = {self.offset})..")
if self.gpu.ngpu > 0 and self.offset == 0:
self.gpu.adding_initialize(self.index)
if self.gpu.ngpu > 0:
self.gpu.add(self.index, data, self.offset)
else:
self.index.add(data)
self.offset += data.shape[0]
def save(self, output_path):
print_message(f"Writing index to {output_path} ...")
self.index.nprobe = 10 # just a default
faiss.write_index(self.index, output_path)
| 1,605 | 26.220339 | 85 | py |
ColBERT | ColBERT-master/colbert/indexing/__init__.py | 0 | 0 | 0 | py |
|
ColBERT | ColBERT-master/colbert/training/eager_batcher.py | import os
import ujson
from functools import partial
from colbert.utils.utils import print_message
from colbert.modeling.tokenization import QueryTokenizer, DocTokenizer, tensorize_triples
from colbert.utils.runs import Run
class EagerBatcher():
def __init__(self, args, rank=0, nranks=1):
self.rank, self.nranks = rank, nranks
self.bsize, self.accumsteps = args.bsize, args.accumsteps
self.query_tokenizer = QueryTokenizer(args.query_maxlen)
self.doc_tokenizer = DocTokenizer(args.doc_maxlen)
self.tensorize_triples = partial(tensorize_triples, self.query_tokenizer, self.doc_tokenizer)
self.triples_path = args.triples
self._reset_triples()
def _reset_triples(self):
self.reader = open(self.triples_path, mode='r', encoding="utf-8")
self.position = 0
def __iter__(self):
return self
def __next__(self):
queries, positives, negatives = [], [], []
for line_idx, line in zip(range(self.bsize * self.nranks), self.reader):
if (self.position + line_idx) % self.nranks != self.rank:
continue
query, pos, neg = line.strip().split('\t')
queries.append(query)
positives.append(pos)
negatives.append(neg)
self.position += line_idx + 1
if len(queries) < self.bsize:
raise StopIteration
return self.collate(queries, positives, negatives)
def collate(self, queries, positives, negatives):
assert len(queries) == len(positives) == len(negatives) == self.bsize
return self.tensorize_triples(queries, positives, negatives, self.bsize // self.accumsteps)
def skip_to_batch(self, batch_idx, intended_batch_size):
self._reset_triples()
Run.warn(f'Skipping to batch #{batch_idx} (with intended_batch_size = {intended_batch_size}) for training.')
_ = [self.reader.readline() for _ in range(batch_idx * intended_batch_size)]
return None
| 2,018 | 31.047619 | 116 | py |
ColBERT | ColBERT-master/colbert/training/lazy_batcher.py | import os
import ujson
from functools import partial
from colbert.utils.utils import print_message
from colbert.modeling.tokenization import QueryTokenizer, DocTokenizer, tensorize_triples
from colbert.utils.runs import Run
class LazyBatcher():
def __init__(self, args, rank=0, nranks=1):
self.bsize, self.accumsteps = args.bsize, args.accumsteps
self.query_tokenizer = QueryTokenizer(args.query_maxlen)
self.doc_tokenizer = DocTokenizer(args.doc_maxlen)
self.tensorize_triples = partial(tensorize_triples, self.query_tokenizer, self.doc_tokenizer)
self.position = 0
self.triples = self._load_triples(args.triples, rank, nranks)
self.queries = self._load_queries(args.queries)
self.collection = self._load_collection(args.collection)
def _load_triples(self, path, rank, nranks):
"""
NOTE: For distributed sampling, this isn't equivalent to perfectly uniform sampling.
In particular, each subset is perfectly represented in every batch! However, since we never
repeat passes over the data, we never repeat any particular triple, and the split across
nodes is random (since the underlying file is pre-shuffled), there's no concern here.
"""
print_message("#> Loading triples...")
triples = []
with open(path) as f:
for line_idx, line in enumerate(f):
if line_idx % nranks == rank:
qid, pos, neg = ujson.loads(line)
triples.append((qid, pos, neg))
return triples
def _load_queries(self, path):
print_message("#> Loading queries...")
queries = {}
with open(path) as f:
for line in f:
qid, query = line.strip().split('\t')
qid = int(qid)
queries[qid] = query
return queries
def _load_collection(self, path):
print_message("#> Loading collection...")
collection = []
with open(path) as f:
for line_idx, line in enumerate(f):
pid, passage, title, *_ = line.strip().split('\t')
assert pid == 'id' or int(pid) == line_idx
passage = title + ' | ' + passage
collection.append(passage)
return collection
def __iter__(self):
return self
def __len__(self):
return len(self.triples)
def __next__(self):
offset, endpos = self.position, min(self.position + self.bsize, len(self.triples))
self.position = endpos
if offset + self.bsize > len(self.triples):
raise StopIteration
queries, positives, negatives = [], [], []
for position in range(offset, endpos):
query, pos, neg = self.triples[position]
query, pos, neg = self.queries[query], self.collection[pos], self.collection[neg]
queries.append(query)
positives.append(pos)
negatives.append(neg)
return self.collate(queries, positives, negatives)
def collate(self, queries, positives, negatives):
assert len(queries) == len(positives) == len(negatives) == self.bsize
return self.tensorize_triples(queries, positives, negatives, self.bsize // self.accumsteps)
def skip_to_batch(self, batch_idx, intended_batch_size):
Run.warn(f'Skipping to batch #{batch_idx} (with intended_batch_size = {intended_batch_size}) for training.')
self.position = intended_batch_size * batch_idx
| 3,552 | 33.163462 | 116 | py |
ColBERT | ColBERT-master/colbert/training/training.py | import os
import random
import time
import torch
import torch.nn as nn
import numpy as np
from transformers import AdamW
from colbert.utils.runs import Run
from colbert.utils.amp import MixedPrecisionManager
from colbert.training.lazy_batcher import LazyBatcher
from colbert.training.eager_batcher import EagerBatcher
from colbert.parameters import DEVICE
from colbert.modeling.colbert import ColBERT
from colbert.utils.utils import print_message
from colbert.training.utils import print_progress, manage_checkpoints
def train(args):
random.seed(12345)
np.random.seed(12345)
torch.manual_seed(12345)
if args.distributed:
torch.cuda.manual_seed_all(12345)
if args.distributed:
assert args.bsize % args.nranks == 0, (args.bsize, args.nranks)
assert args.accumsteps == 1
args.bsize = args.bsize // args.nranks
print("Using args.bsize =", args.bsize, "(per process) and args.accumsteps =", args.accumsteps)
if args.lazy:
reader = LazyBatcher(args, (0 if args.rank == -1 else args.rank), args.nranks)
else:
reader = EagerBatcher(args, (0 if args.rank == -1 else args.rank), args.nranks)
if args.rank not in [-1, 0]:
torch.distributed.barrier()
colbert = ColBERT.from_pretrained('bert-base-uncased',
query_maxlen=args.query_maxlen,
doc_maxlen=args.doc_maxlen,
dim=args.dim,
similarity_metric=args.similarity,
mask_punctuation=args.mask_punctuation)
if args.checkpoint is not None:
assert args.resume_optimizer is False, "TODO: This would mean reload optimizer too."
print_message(f"#> Starting from checkpoint {args.checkpoint} -- but NOT the optimizer!")
checkpoint = torch.load(args.checkpoint, map_location='cpu')
try:
colbert.load_state_dict(checkpoint['model_state_dict'])
except:
print_message("[WARNING] Loading checkpoint with strict=False")
colbert.load_state_dict(checkpoint['model_state_dict'], strict=False)
if args.rank == 0:
torch.distributed.barrier()
colbert = colbert.to(DEVICE)
colbert.train()
if args.distributed:
colbert = torch.nn.parallel.DistributedDataParallel(colbert, device_ids=[args.rank],
output_device=args.rank,
find_unused_parameters=True)
optimizer = AdamW(filter(lambda p: p.requires_grad, colbert.parameters()), lr=args.lr, eps=1e-8)
optimizer.zero_grad()
amp = MixedPrecisionManager(args.amp)
criterion = nn.CrossEntropyLoss()
labels = torch.zeros(args.bsize, dtype=torch.long, device=DEVICE)
start_time = time.time()
train_loss = 0.0
start_batch_idx = 0
if args.resume:
assert args.checkpoint is not None
start_batch_idx = checkpoint['batch']
reader.skip_to_batch(start_batch_idx, checkpoint['arguments']['bsize'])
for batch_idx, BatchSteps in zip(range(start_batch_idx, args.maxsteps), reader):
this_batch_loss = 0.0
for queries, passages in BatchSteps:
with amp.context():
scores = colbert(queries, passages).view(2, -1).permute(1, 0)
loss = criterion(scores, labels[:scores.size(0)])
loss = loss / args.accumsteps
if args.rank < 1:
print_progress(scores)
amp.backward(loss)
train_loss += loss.item()
this_batch_loss += loss.item()
amp.step(colbert, optimizer)
if args.rank < 1:
avg_loss = train_loss / (batch_idx+1)
num_examples_seen = (batch_idx - start_batch_idx) * args.bsize * args.nranks
elapsed = float(time.time() - start_time)
log_to_mlflow = (batch_idx % 20 == 0)
Run.log_metric('train/avg_loss', avg_loss, step=batch_idx, log_to_mlflow=log_to_mlflow)
Run.log_metric('train/batch_loss', this_batch_loss, step=batch_idx, log_to_mlflow=log_to_mlflow)
Run.log_metric('train/examples', num_examples_seen, step=batch_idx, log_to_mlflow=log_to_mlflow)
Run.log_metric('train/throughput', num_examples_seen / elapsed, step=batch_idx, log_to_mlflow=log_to_mlflow)
print_message(batch_idx, avg_loss)
manage_checkpoints(args, colbert, optimizer, batch_idx+1)
| 4,585 | 35.983871 | 120 | py |
ColBERT | ColBERT-master/colbert/training/utils.py | import os
import torch
from colbert.utils.runs import Run
from colbert.utils.utils import print_message, save_checkpoint
from colbert.parameters import SAVED_CHECKPOINTS
def print_progress(scores):
positive_avg, negative_avg = round(scores[:, 0].mean().item(), 2), round(scores[:, 1].mean().item(), 2)
print("#>>> ", positive_avg, negative_avg, '\t\t|\t\t', positive_avg - negative_avg)
def manage_checkpoints(args, colbert, optimizer, batch_idx):
arguments = args.input_arguments.__dict__
path = os.path.join(Run.path, 'checkpoints')
if not os.path.exists(path):
os.mkdir(path)
if batch_idx % 2000 == 0:
name = os.path.join(path, "colbert.dnn")
save_checkpoint(name, 0, batch_idx, colbert, optimizer, arguments)
if batch_idx in SAVED_CHECKPOINTS:
name = os.path.join(path, "colbert-{}.dnn".format(batch_idx))
save_checkpoint(name, 0, batch_idx, colbert, optimizer, arguments)
| 956 | 32 | 107 | py |
ColBERT | ColBERT-master/colbert/training/__init__.py | 0 | 0 | 0 | py |
|
ColBERT | ColBERT-master/colbert/utils/parser.py | import os
import copy
import faiss
from argparse import ArgumentParser
import colbert.utils.distributed as distributed
from colbert.utils.runs import Run
from colbert.utils.utils import print_message, timestamp, create_directory
class Arguments():
def __init__(self, description):
self.parser = ArgumentParser(description=description)
self.checks = []
self.add_argument('--root', dest='root', default='experiments')
self.add_argument('--experiment', dest='experiment', default='dirty')
self.add_argument('--run', dest='run', default=Run.name)
self.add_argument('--local_rank', dest='rank', default=-1, type=int)
def add_model_parameters(self):
# Core Arguments
self.add_argument('--similarity', dest='similarity', default='cosine', choices=['cosine', 'l2'])
self.add_argument('--dim', dest='dim', default=128, type=int)
self.add_argument('--query_maxlen', dest='query_maxlen', default=32, type=int)
self.add_argument('--doc_maxlen', dest='doc_maxlen', default=180, type=int)
# Filtering-related Arguments
self.add_argument('--mask-punctuation', dest='mask_punctuation', default=False, action='store_true')
def add_model_training_parameters(self):
# NOTE: Providing a checkpoint is one thing, --resume is another, --resume_optimizer is yet another.
self.add_argument('--resume', dest='resume', default=False, action='store_true')
self.add_argument('--resume_optimizer', dest='resume_optimizer', default=False, action='store_true')
self.add_argument('--checkpoint', dest='checkpoint', default=None, required=False)
self.add_argument('--lr', dest='lr', default=3e-06, type=float)
self.add_argument('--maxsteps', dest='maxsteps', default=400000, type=int)
self.add_argument('--bsize', dest='bsize', default=32, type=int)
self.add_argument('--accum', dest='accumsteps', default=2, type=int)
self.add_argument('--amp', dest='amp', default=False, action='store_true')
def add_model_inference_parameters(self):
self.add_argument('--checkpoint', dest='checkpoint', required=True)
self.add_argument('--bsize', dest='bsize', default=128, type=int)
self.add_argument('--amp', dest='amp', default=False, action='store_true')
def add_training_input(self):
self.add_argument('--triples', dest='triples', required=True)
self.add_argument('--queries', dest='queries', default=None)
self.add_argument('--collection', dest='collection', default=None)
def check_training_input(args):
assert (args.collection is None) == (args.queries is None), \
"For training, both (or neither) --collection and --queries must be supplied." \
"If neither is supplied, the --triples file must contain texts (not PIDs)."
self.checks.append(check_training_input)
def add_ranking_input(self):
self.add_argument('--queries', dest='queries', default=None)
self.add_argument('--collection', dest='collection', default=None)
self.add_argument('--qrels', dest='qrels', default=None)
def add_reranking_input(self):
self.add_ranking_input()
self.add_argument('--topk', dest='topK', required=True)
self.add_argument('--shortcircuit', dest='shortcircuit', default=False, action='store_true')
def add_indexing_input(self):
self.add_argument('--collection', dest='collection', required=True)
self.add_argument('--index_root', dest='index_root', required=True)
self.add_argument('--index_name', dest='index_name', required=True)
def add_index_use_input(self):
self.add_argument('--index_root', dest='index_root', required=True)
self.add_argument('--index_name', dest='index_name', required=True)
self.add_argument('--partitions', dest='partitions', default=None, type=int)
def add_retrieval_input(self):
self.add_index_use_input()
self.add_argument('--nprobe', dest='nprobe', default=10, type=int)
self.add_argument('--retrieve_only', dest='retrieve_only', default=False, action='store_true')
def add_argument(self, *args, **kw_args):
return self.parser.add_argument(*args, **kw_args)
def check_arguments(self, args):
for check in self.checks:
check(args)
def parse(self):
args = self.parser.parse_args()
self.check_arguments(args)
args.input_arguments = copy.deepcopy(args)
args.nranks, args.distributed = distributed.init(args.rank)
args.nthreads = int(max(os.cpu_count(), faiss.omp_get_max_threads()) * 0.8)
args.nthreads = max(1, args.nthreads // args.nranks)
if args.nranks > 1:
print_message(f"#> Restricting number of threads for FAISS to {args.nthreads} per process",
condition=(args.rank == 0))
faiss.omp_set_num_threads(args.nthreads)
Run.init(args.rank, args.root, args.experiment, args.run)
Run._log_args(args)
Run.info(args.input_arguments.__dict__, '\n')
return args
| 5,179 | 44.043478 | 108 | py |
ColBERT | ColBERT-master/colbert/utils/runs.py | import os
import sys
import time
import __main__
import traceback
import mlflow
import colbert.utils.distributed as distributed
from contextlib import contextmanager
from colbert.utils.logging import Logger
from colbert.utils.utils import timestamp, create_directory, print_message
class _RunManager():
def __init__(self):
self.experiments_root = None
self.experiment = None
self.path = None
self.script = self._get_script_name()
self.name = self._generate_default_run_name()
self.original_name = self.name
self.exit_status = 'FINISHED'
self._logger = None
self.start_time = time.time()
def init(self, rank, root, experiment, name):
assert '/' not in experiment, experiment
assert '/' not in name, name
self.experiments_root = os.path.abspath(root)
self.experiment = experiment
self.name = name
self.path = os.path.join(self.experiments_root, self.experiment, self.script, self.name)
if rank < 1:
if os.path.exists(self.path):
print('\n\n')
print_message("It seems that ", self.path, " already exists.")
print_message("Do you want to overwrite it? \t yes/no \n")
# TODO: This should timeout and exit (i.e., fail) given no response for 60 seconds.
response = input()
if response.strip() != 'yes':
assert not os.path.exists(self.path), self.path
else:
create_directory(self.path)
distributed.barrier(rank)
self._logger = Logger(rank, self)
self._log_args = self._logger._log_args
self.warn = self._logger.warn
self.info = self._logger.info
self.info_all = self._logger.info_all
self.log_metric = self._logger.log_metric
self.log_new_artifact = self._logger.log_new_artifact
def _generate_default_run_name(self):
return timestamp()
def _get_script_name(self):
return os.path.basename(__main__.__file__) if '__file__' in dir(__main__) else 'none'
@contextmanager
def context(self, consider_failed_if_interrupted=True):
try:
yield
except KeyboardInterrupt as ex:
print('\n\nInterrupted\n\n')
self._logger._log_exception(ex.__class__, ex, ex.__traceback__)
self._logger._log_all_artifacts()
if consider_failed_if_interrupted:
self.exit_status = 'KILLED' # mlflow.entities.RunStatus.KILLED
sys.exit(128 + 2)
except Exception as ex:
self._logger._log_exception(ex.__class__, ex, ex.__traceback__)
self._logger._log_all_artifacts()
self.exit_status = 'FAILED' # mlflow.entities.RunStatus.FAILED
raise ex
finally:
total_seconds = str(time.time() - self.start_time) + '\n'
original_name = str(self.original_name)
name = str(self.name)
self.log_new_artifact(os.path.join(self._logger.logs_path, 'elapsed.txt'), total_seconds)
self.log_new_artifact(os.path.join(self._logger.logs_path, 'name.original.txt'), original_name)
self.log_new_artifact(os.path.join(self._logger.logs_path, 'name.txt'), name)
self._logger._log_all_artifacts()
mlflow.end_run(status=self.exit_status)
Run = _RunManager()
| 3,463 | 31.990476 | 107 | py |
ColBERT | ColBERT-master/colbert/utils/logging.py | import os
import sys
import ujson
import mlflow
import traceback
from torch.utils.tensorboard import SummaryWriter
from colbert.utils.utils import print_message, create_directory
class Logger():
def __init__(self, rank, run):
self.rank = rank
self.is_main = self.rank in [-1, 0]
self.run = run
self.logs_path = os.path.join(self.run.path, "logs/")
if self.is_main:
self._init_mlflow()
self.initialized_tensorboard = False
create_directory(self.logs_path)
def _init_mlflow(self):
mlflow.set_tracking_uri('file://' + os.path.join(self.run.experiments_root, "logs/mlruns/"))
mlflow.set_experiment('/'.join([self.run.experiment, self.run.script]))
mlflow.set_tag('experiment', self.run.experiment)
mlflow.set_tag('name', self.run.name)
mlflow.set_tag('path', self.run.path)
def _init_tensorboard(self):
root = os.path.join(self.run.experiments_root, "logs/tensorboard/")
logdir = '__'.join([self.run.experiment, self.run.script, self.run.name])
logdir = os.path.join(root, logdir)
self.writer = SummaryWriter(log_dir=logdir)
self.initialized_tensorboard = True
def _log_exception(self, etype, value, tb):
if not self.is_main:
return
output_path = os.path.join(self.logs_path, 'exception.txt')
trace = ''.join(traceback.format_exception(etype, value, tb)) + '\n'
print_message(trace, '\n\n')
self.log_new_artifact(output_path, trace)
def _log_all_artifacts(self):
if not self.is_main:
return
mlflow.log_artifacts(self.logs_path)
def _log_args(self, args):
if not self.is_main:
return
for key in vars(args):
value = getattr(args, key)
if type(value) in [int, float, str, bool]:
mlflow.log_param(key, value)
with open(os.path.join(self.logs_path, 'args.json'), 'w') as output_metadata:
ujson.dump(args.input_arguments.__dict__, output_metadata, indent=4)
output_metadata.write('\n')
with open(os.path.join(self.logs_path, 'args.txt'), 'w') as output_metadata:
output_metadata.write(' '.join(sys.argv) + '\n')
def log_metric(self, name, value, step, log_to_mlflow=True):
if not self.is_main:
return
if not self.initialized_tensorboard:
self._init_tensorboard()
if log_to_mlflow:
mlflow.log_metric(name, value, step=step)
self.writer.add_scalar(name, value, step)
def log_new_artifact(self, path, content):
with open(path, 'w') as f:
f.write(content)
mlflow.log_artifact(path)
def warn(self, *args):
msg = print_message('[WARNING]', '\t', *args)
with open(os.path.join(self.logs_path, 'warnings.txt'), 'a') as output_metadata:
output_metadata.write(msg + '\n\n\n')
def info_all(self, *args):
print_message('[' + str(self.rank) + ']', '\t', *args)
def info(self, *args):
if self.is_main:
print_message(*args)
| 3,185 | 30.86 | 100 | py |
ColBERT | ColBERT-master/colbert/utils/utils.py | import os
import tqdm
import torch
import datetime
import itertools
from multiprocessing import Pool
from collections import OrderedDict, defaultdict
def print_message(*s, condition=True):
s = ' '.join([str(x) for x in s])
msg = "[{}] {}".format(datetime.datetime.now().strftime("%b %d, %H:%M:%S"), s)
if condition:
print(msg, flush=True)
return msg
def timestamp():
format_str = "%Y-%m-%d_%H.%M.%S"
result = datetime.datetime.now().strftime(format_str)
return result
def file_tqdm(file):
print(f"#> Reading {file.name}")
with tqdm.tqdm(total=os.path.getsize(file.name) / 1024.0 / 1024.0, unit="MiB") as pbar:
for line in file:
yield line
pbar.update(len(line) / 1024.0 / 1024.0)
pbar.close()
def save_checkpoint(path, epoch_idx, mb_idx, model, optimizer, arguments=None):
print(f"#> Saving a checkpoint to {path} ..")
if hasattr(model, 'module'):
model = model.module # extract model from a distributed/data-parallel wrapper
checkpoint = {}
checkpoint['epoch'] = epoch_idx
checkpoint['batch'] = mb_idx
checkpoint['model_state_dict'] = model.state_dict()
checkpoint['optimizer_state_dict'] = optimizer.state_dict()
checkpoint['arguments'] = arguments
torch.save(checkpoint, path)
def load_checkpoint(path, model, optimizer=None, do_print=True):
if do_print:
print_message("#> Loading checkpoint", path, "..")
if path.startswith("http:") or path.startswith("https:"):
checkpoint = torch.hub.load_state_dict_from_url(path, map_location='cpu')
else:
checkpoint = torch.load(path, map_location='cpu')
state_dict = checkpoint['model_state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k
if k[:7] == 'module.':
name = k[7:]
new_state_dict[name] = v
checkpoint['model_state_dict'] = new_state_dict
try:
model.load_state_dict(checkpoint['model_state_dict'])
except:
print_message("[WARNING] Loading checkpoint with strict=False")
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if do_print:
print_message("#> checkpoint['epoch'] =", checkpoint['epoch'])
print_message("#> checkpoint['batch'] =", checkpoint['batch'])
return checkpoint
def create_directory(path):
if os.path.exists(path):
print('\n')
print_message("#> Note: Output directory", path, 'already exists\n\n')
else:
print('\n')
print_message("#> Creating directory", path, '\n\n')
os.makedirs(path)
# def batch(file, bsize):
# while True:
# L = [ujson.loads(file.readline()) for _ in range(bsize)]
# yield L
# return
def f7(seq):
"""
Source: https://stackoverflow.com/a/480227/1493011
"""
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
def batch(group, bsize, provide_offset=False):
offset = 0
while offset < len(group):
L = group[offset: offset + bsize]
yield ((offset, L) if provide_offset else L)
offset += len(L)
return
class dotdict(dict):
"""
dot.notation access to dictionary attributes
Credit: derek73 @ https://stackoverflow.com/questions/2352181
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def flatten(L):
return [x for y in L for x in y]
def zipstar(L, lazy=False):
"""
A much faster A, B, C = zip(*[(a, b, c), (a, b, c), ...])
May return lists or tuples.
"""
if len(L) == 0:
return L
width = len(L[0])
if width < 100:
return [[elem[idx] for elem in L] for idx in range(width)]
L = zip(*L)
return L if lazy else list(L)
def zip_first(L1, L2):
length = len(L1) if type(L1) in [tuple, list] else None
L3 = list(zip(L1, L2))
assert length in [None, len(L3)], "zip_first() failure: length differs!"
return L3
def int_or_float(val):
if '.' in val:
return float(val)
return int(val)
def load_ranking(path, types=None, lazy=False):
print_message(f"#> Loading the ranked lists from {path} ..")
try:
lists = torch.load(path)
lists = zipstar([l.tolist() for l in tqdm.tqdm(lists)], lazy=lazy)
except:
if types is None:
types = itertools.cycle([int_or_float])
with open(path) as f:
lists = [[typ(x) for typ, x in zip_first(types, line.strip().split('\t'))]
for line in file_tqdm(f)]
return lists
def save_ranking(ranking, path):
lists = zipstar(ranking)
lists = [torch.tensor(l) for l in lists]
torch.save(lists, path)
return lists
def groupby_first_item(lst):
groups = defaultdict(list)
for first, *rest in lst:
rest = rest[0] if len(rest) == 1 else rest
groups[first].append(rest)
return groups
def process_grouped_by_first_item(lst):
"""
Requires items in list to already be grouped by first item.
"""
groups = defaultdict(list)
started = False
last_group = None
for first, *rest in lst:
rest = rest[0] if len(rest) == 1 else rest
if started and first != last_group:
yield (last_group, groups[last_group])
assert first not in groups, f"{first} seen earlier --- violates precondition."
groups[first].append(rest)
last_group = first
started = True
return groups
def grouper(iterable, n, fillvalue=None):
"""
Collect data into fixed-length chunks or blocks
Example: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
Source: https://docs.python.org/3/library/itertools.html#itertools-recipes
"""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
# see https://stackoverflow.com/a/45187287
class NullContextManager(object):
def __init__(self, dummy_resource=None):
self.dummy_resource = dummy_resource
def __enter__(self):
return self.dummy_resource
def __exit__(self, *args):
pass
def load_batch_backgrounds(args, qids):
if args.qid2backgrounds is None:
return None
qbackgrounds = []
for qid in qids:
back = args.qid2backgrounds[qid]
if len(back) and type(back[0]) == int:
x = [args.collection[pid] for pid in back]
else:
x = [args.collectionX.get(pid, '') for pid in back]
x = ' [SEP] '.join(x)
qbackgrounds.append(x)
return qbackgrounds
| 6,747 | 23.808824 | 91 | py |
ColBERT | ColBERT-master/colbert/utils/distributed.py | import os
import random
import torch
import numpy as np
def init(rank):
nranks = 'WORLD_SIZE' in os.environ and int(os.environ['WORLD_SIZE'])
nranks = max(1, nranks)
is_distributed = nranks > 1
if rank == 0:
print('nranks =', nranks, '\t num_gpus =', torch.cuda.device_count())
if is_distributed:
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
return nranks, is_distributed
def barrier(rank):
if rank >= 0:
torch.distributed.barrier()
| 614 | 22.653846 | 82 | py |
ColBERT | ColBERT-master/colbert/utils/amp.py | import torch
from contextlib import contextmanager
from colbert.utils.utils import NullContextManager
from packaging import version
v = version.parse
PyTorch_over_1_6 = v(torch.__version__) >= v('1.6')
class MixedPrecisionManager():
def __init__(self, activated):
assert (not activated) or PyTorch_over_1_6, "Cannot use AMP for PyTorch version < 1.6"
self.activated = activated
if self.activated:
self.scaler = torch.cuda.amp.GradScaler()
def context(self):
return torch.cuda.amp.autocast() if self.activated else NullContextManager()
def backward(self, loss):
if self.activated:
self.scaler.scale(loss).backward()
else:
loss.backward()
def step(self, colbert, optimizer):
if self.activated:
self.scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(colbert.parameters(), 2.0)
self.scaler.step(optimizer)
self.scaler.update()
optimizer.zero_grad()
else:
torch.nn.utils.clip_grad_norm_(colbert.parameters(), 2.0)
optimizer.step()
optimizer.zero_grad()
| 1,178 | 28.475 | 94 | py |
ColBERT | ColBERT-master/colbert/utils/__init__.py | 0 | 0 | 0 | py |
|
ColBERT | ColBERT-master/colbert/ranking/index_part.py | import os
import torch
import ujson
from math import ceil
from itertools import accumulate
from colbert.utils.utils import print_message, dotdict, flatten
from colbert.indexing.loaders import get_parts, load_doclens
from colbert.indexing.index_manager import load_index_part
from colbert.ranking.index_ranker import IndexRanker
class IndexPart():
def __init__(self, directory, dim=128, part_range=None, verbose=True):
first_part, last_part = (0, None) if part_range is None else (part_range.start, part_range.stop)
# Load parts metadata
all_parts, all_parts_paths, _ = get_parts(directory)
self.parts = all_parts[first_part:last_part]
self.parts_paths = all_parts_paths[first_part:last_part]
# Load doclens metadata
all_doclens = load_doclens(directory, flatten=False)
self.doc_offset = sum([len(part_doclens) for part_doclens in all_doclens[:first_part]])
self.doc_endpos = sum([len(part_doclens) for part_doclens in all_doclens[:last_part]])
self.pids_range = range(self.doc_offset, self.doc_endpos)
self.parts_doclens = all_doclens[first_part:last_part]
self.doclens = flatten(self.parts_doclens)
self.num_embeddings = sum(self.doclens)
self.tensor = self._load_parts(dim, verbose)
self.ranker = IndexRanker(self.tensor, self.doclens)
def _load_parts(self, dim, verbose):
tensor = torch.zeros(self.num_embeddings + 512, dim, dtype=torch.float16)
if verbose:
print_message("tensor.size() = ", tensor.size())
offset = 0
for idx, filename in enumerate(self.parts_paths):
print_message("|> Loading", filename, "...", condition=verbose)
endpos = offset + sum(self.parts_doclens[idx])
part = load_index_part(filename, verbose=verbose)
tensor[offset:endpos] = part
offset = endpos
return tensor
def pid_in_range(self, pid):
return pid in self.pids_range
def rank(self, Q, pids):
"""
Rank a single batch of Q x pids (e.g., 1k--10k pairs).
"""
assert Q.size(0) in [1, len(pids)], (Q.size(0), len(pids))
assert all(pid in self.pids_range for pid in pids), self.pids_range
pids_ = [pid - self.doc_offset for pid in pids]
scores = self.ranker.rank(Q, pids_)
return scores
def batch_rank(self, all_query_embeddings, query_indexes, pids, sorted_pids):
"""
Rank a large, fairly dense set of query--passage pairs (e.g., 1M+ pairs).
Higher overhead, much faster for large batches.
"""
assert ((pids >= self.pids_range.start) & (pids < self.pids_range.stop)).sum() == pids.size(0)
pids_ = pids - self.doc_offset
scores = self.ranker.batch_rank(all_query_embeddings, query_indexes, pids_, sorted_pids)
return scores
| 2,912 | 34.096386 | 104 | py |
ColBERT | ColBERT-master/colbert/ranking/batch_retrieval.py | import os
import time
import faiss
import random
import torch
from colbert.utils.runs import Run
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, batch
from colbert.ranking.faiss_index import FaissIndex
def batch_retrieve(args):
assert args.retrieve_only, "TODO: Combine batch (multi-query) retrieval with batch re-ranking"
faiss_index = FaissIndex(args.index_path, args.faiss_index_path, args.nprobe, args.part_range)
inference = ModelInference(args.colbert, amp=args.amp)
ranking_logger = RankingLogger(Run.path, qrels=None)
with ranking_logger.context('unordered.tsv', also_save_annotations=False) as rlogger:
queries = args.queries
qids_in_order = list(queries.keys())
for qoffset, qbatch in batch(qids_in_order, 100_000, provide_offset=True):
qbatch_text = [queries[qid] for qid in qbatch]
print_message(f"#> Embedding {len(qbatch_text)} queries in parallel...")
Q = inference.queryFromText(qbatch_text, bsize=512)
print_message("#> Starting batch retrieval...")
all_pids = faiss_index.retrieve(args.faiss_depth, Q, verbose=True)
# Log the PIDs with rank -1 for all
for query_idx, (qid, ranking) in enumerate(zip(qbatch, all_pids)):
query_idx = qoffset + query_idx
if query_idx % 1000 == 0:
print_message(f"#> Logging query #{query_idx} (qid {qid}) now...")
ranking = [(None, pid, None) for pid in ranking]
rlogger.log(qid, ranking, is_ranked=False)
print('\n\n')
print(ranking_logger.filename)
print("#> Done.")
print('\n\n')
| 1,819 | 34.686275 | 98 | py |
ColBERT | ColBERT-master/colbert/ranking/batch_reranking.py | import os
import time
import torch
import queue
import threading
from collections import defaultdict
from colbert.utils.runs import Run
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, flatten, zipstar
from colbert.indexing.loaders import get_parts
from colbert.ranking.index_part import IndexPart
MAX_DEPTH_LOGGED = 1000 # TODO: Use args.depth
def prepare_ranges(index_path, dim, step, part_range):
print_message("#> Launching a separate thread to load index parts asynchronously.")
parts, _, _ = get_parts(index_path)
positions = [(offset, offset + step) for offset in range(0, len(parts), step)]
if part_range is not None:
positions = positions[part_range.start: part_range.stop]
loaded_parts = queue.Queue(maxsize=2)
def _loader_thread(index_path, dim, positions):
for offset, endpos in positions:
index = IndexPart(index_path, dim=dim, part_range=range(offset, endpos), verbose=True)
loaded_parts.put(index, block=True)
thread = threading.Thread(target=_loader_thread, args=(index_path, dim, positions,))
thread.start()
return positions, loaded_parts, thread
def score_by_range(positions, loaded_parts, all_query_embeddings, all_query_rankings, all_pids):
print_message("#> Sorting by PID..")
all_query_indexes, all_pids = zipstar(all_pids)
sorting_pids = torch.tensor(all_pids).sort()
all_query_indexes, all_pids = torch.tensor(all_query_indexes)[sorting_pids.indices], sorting_pids.values
range_start, range_end = 0, 0
for offset, endpos in positions:
print_message(f"#> Fetching parts {offset}--{endpos} from queue..")
index = loaded_parts.get()
print_message(f"#> Filtering PIDs to the range {index.pids_range}..")
range_start = range_start + (all_pids[range_start:] < index.pids_range.start).sum()
range_end = range_end + (all_pids[range_end:] < index.pids_range.stop).sum()
pids = all_pids[range_start:range_end]
query_indexes = all_query_indexes[range_start:range_end]
print_message(f"#> Got {len(pids)} query--passage pairs in this range.")
if len(pids) == 0:
continue
print_message(f"#> Ranking in batches the pairs #{range_start} through #{range_end}...")
scores = index.batch_rank(all_query_embeddings, query_indexes, pids, sorted_pids=True)
for query_index, pid, score in zip(query_indexes.tolist(), pids.tolist(), scores):
all_query_rankings[0][query_index].append(pid)
all_query_rankings[1][query_index].append(score)
def batch_rerank(args):
positions, loaded_parts, thread = prepare_ranges(args.index_path, args.dim, args.step, args.part_range)
inference = ModelInference(args.colbert, amp=args.amp)
queries, topK_pids = args.queries, args.topK_pids
with torch.no_grad():
queries_in_order = list(queries.values())
print_message(f"#> Encoding all {len(queries_in_order)} queries in batches...")
all_query_embeddings = inference.queryFromText(queries_in_order, bsize=512, to_cpu=True)
all_query_embeddings = all_query_embeddings.to(dtype=torch.float16).permute(0, 2, 1).contiguous()
for qid in queries:
"""
Since topK_pids is a defaultdict, make sure each qid *has* actual PID information (even if empty).
"""
assert qid in topK_pids, qid
all_pids = flatten([[(query_index, pid) for pid in topK_pids[qid]] for query_index, qid in enumerate(queries)])
all_query_rankings = [defaultdict(list), defaultdict(list)]
print_message(f"#> Will process {len(all_pids)} query--document pairs in total.")
with torch.no_grad():
score_by_range(positions, loaded_parts, all_query_embeddings, all_query_rankings, all_pids)
ranking_logger = RankingLogger(Run.path, qrels=None, log_scores=args.log_scores)
with ranking_logger.context('ranking.tsv', also_save_annotations=False) as rlogger:
with torch.no_grad():
for query_index, qid in enumerate(queries):
if query_index % 1000 == 0:
print_message("#> Logging query #{} (qid {}) now...".format(query_index, qid))
pids = all_query_rankings[0][query_index]
scores = all_query_rankings[1][query_index]
K = min(MAX_DEPTH_LOGGED, len(scores))
if K == 0:
continue
scores_topk = torch.tensor(scores).topk(K, largest=True, sorted=True)
pids, scores = torch.tensor(pids)[scores_topk.indices].tolist(), scores_topk.values.tolist()
ranking = [(score, pid, None) for pid, score in zip(pids, scores)]
assert len(ranking) <= MAX_DEPTH_LOGGED, (len(ranking), MAX_DEPTH_LOGGED)
rlogger.log(qid, ranking, is_ranked=True, print_positions=[1, 2] if query_index % 100 == 0 else [])
print('\n\n')
print(ranking_logger.filename)
print_message('#> Done.\n')
thread.join()
| 5,139 | 37.939394 | 115 | py |
ColBERT | ColBERT-master/colbert/ranking/index_ranker.py | import os
import math
import torch
import ujson
import traceback
from itertools import accumulate
from colbert.parameters import DEVICE
from colbert.utils.utils import print_message, dotdict, flatten
BSIZE = 1 << 14
class IndexRanker():
def __init__(self, tensor, doclens):
self.tensor = tensor
self.doclens = doclens
self.maxsim_dtype = torch.float32
self.doclens_pfxsum = [0] + list(accumulate(self.doclens))
self.doclens = torch.tensor(self.doclens)
self.doclens_pfxsum = torch.tensor(self.doclens_pfxsum)
self.dim = self.tensor.size(-1)
self.strides = [torch_percentile(self.doclens, p) for p in [90]]
self.strides.append(self.doclens.max().item())
self.strides = sorted(list(set(self.strides)))
print_message(f"#> Using strides {self.strides}..")
self.views = self._create_views(self.tensor)
self.buffers = self._create_buffers(BSIZE, self.tensor.dtype, {'cpu', 'cuda:0'})
def _create_views(self, tensor):
views = []
for stride in self.strides:
outdim = tensor.size(0) - stride + 1
view = torch.as_strided(tensor, (outdim, stride, self.dim), (self.dim, self.dim, 1))
views.append(view)
return views
def _create_buffers(self, max_bsize, dtype, devices):
buffers = {}
for device in devices:
buffers[device] = [torch.zeros(max_bsize, stride, self.dim, dtype=dtype,
device=device, pin_memory=(device == 'cpu'))
for stride in self.strides]
return buffers
def rank(self, Q, pids, views=None, shift=0):
assert len(pids) > 0
assert Q.size(0) in [1, len(pids)]
Q = Q.contiguous().to(DEVICE).to(dtype=self.maxsim_dtype)
views = self.views if views is None else views
VIEWS_DEVICE = views[0].device
D_buffers = self.buffers[str(VIEWS_DEVICE)]
raw_pids = pids if type(pids) is list else pids.tolist()
pids = torch.tensor(pids) if type(pids) is list else pids
doclens, offsets = self.doclens[pids], self.doclens_pfxsum[pids]
assignments = (doclens.unsqueeze(1) > torch.tensor(self.strides).unsqueeze(0) + 1e-6).sum(-1)
one_to_n = torch.arange(len(raw_pids))
output_pids, output_scores, output_permutation = [], [], []
for group_idx, stride in enumerate(self.strides):
locator = (assignments == group_idx)
if locator.sum() < 1e-5:
continue
group_pids, group_doclens, group_offsets = pids[locator], doclens[locator], offsets[locator]
group_Q = Q if Q.size(0) == 1 else Q[locator]
group_offsets = group_offsets.to(VIEWS_DEVICE) - shift
group_offsets_uniq, group_offsets_expand = torch.unique_consecutive(group_offsets, return_inverse=True)
D_size = group_offsets_uniq.size(0)
D = torch.index_select(views[group_idx], 0, group_offsets_uniq, out=D_buffers[group_idx][:D_size])
D = D.to(DEVICE)
D = D[group_offsets_expand.to(DEVICE)].to(dtype=self.maxsim_dtype)
mask = torch.arange(stride, device=DEVICE) + 1
mask = mask.unsqueeze(0) <= group_doclens.to(DEVICE).unsqueeze(-1)
scores = (D @ group_Q) * mask.unsqueeze(-1)
scores = scores.max(1).values.sum(-1).cpu()
output_pids.append(group_pids)
output_scores.append(scores)
output_permutation.append(one_to_n[locator])
output_permutation = torch.cat(output_permutation).sort().indices
output_pids = torch.cat(output_pids)[output_permutation].tolist()
output_scores = torch.cat(output_scores)[output_permutation].tolist()
assert len(raw_pids) == len(output_pids)
assert len(raw_pids) == len(output_scores)
assert raw_pids == output_pids
return output_scores
def batch_rank(self, all_query_embeddings, all_query_indexes, all_pids, sorted_pids):
assert sorted_pids is True
######
scores = []
range_start, range_end = 0, 0
for pid_offset in range(0, len(self.doclens), 50_000):
pid_endpos = min(pid_offset + 50_000, len(self.doclens))
range_start = range_start + (all_pids[range_start:] < pid_offset).sum()
range_end = range_end + (all_pids[range_end:] < pid_endpos).sum()
pids = all_pids[range_start:range_end]
query_indexes = all_query_indexes[range_start:range_end]
print_message(f"###--> Got {len(pids)} query--passage pairs in this sub-range {(pid_offset, pid_endpos)}.")
if len(pids) == 0:
continue
print_message(f"###--> Ranking in batches the pairs #{range_start} through #{range_end} in this sub-range.")
tensor_offset = self.doclens_pfxsum[pid_offset].item()
tensor_endpos = self.doclens_pfxsum[pid_endpos].item() + 512
collection = self.tensor[tensor_offset:tensor_endpos].to(DEVICE)
views = self._create_views(collection)
print_message(f"#> Ranking in batches of {BSIZE} query--passage pairs...")
for batch_idx, offset in enumerate(range(0, len(pids), BSIZE)):
if batch_idx % 100 == 0:
print_message("#> Processing batch #{}..".format(batch_idx))
endpos = offset + BSIZE
batch_query_index, batch_pids = query_indexes[offset:endpos], pids[offset:endpos]
Q = all_query_embeddings[batch_query_index]
scores.extend(self.rank(Q, batch_pids, views, shift=tensor_offset))
return scores
def torch_percentile(tensor, p):
assert p in range(1, 100+1)
assert tensor.dim() == 1
return tensor.kthvalue(int(p * tensor.size(0) / 100.0)).values.item()
| 5,952 | 35.078788 | 120 | py |
ColBERT | ColBERT-master/colbert/ranking/retrieval.py | import os
import time
import faiss
import random
import torch
import itertools
from colbert.utils.runs import Run
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, batch
from colbert.ranking.rankers import Ranker
def retrieve(args):
inference = ModelInference(args.colbert, amp=args.amp)
ranker = Ranker(args, inference, faiss_depth=args.faiss_depth)
ranking_logger = RankingLogger(Run.path, qrels=None)
milliseconds = 0
with ranking_logger.context('ranking.tsv', also_save_annotations=False) as rlogger:
queries = args.queries
qids_in_order = list(queries.keys())
for qoffset, qbatch in batch(qids_in_order, 100, provide_offset=True):
qbatch_text = [queries[qid] for qid in qbatch]
rankings = []
for query_idx, q in enumerate(qbatch_text):
torch.cuda.synchronize('cuda:0')
s = time.time()
Q = ranker.encode([q])
pids, scores = ranker.rank(Q)
torch.cuda.synchronize()
milliseconds += (time.time() - s) * 1000.0
if len(pids):
print(qoffset+query_idx, q, len(scores), len(pids), scores[0], pids[0],
milliseconds / (qoffset+query_idx+1), 'ms')
rankings.append(zip(pids, scores))
for query_idx, (qid, ranking) in enumerate(zip(qbatch, rankings)):
query_idx = qoffset + query_idx
if query_idx % 100 == 0:
print_message(f"#> Logging query #{query_idx} (qid {qid}) now...")
ranking = [(score, pid, None) for pid, score in itertools.islice(ranking, args.depth)]
rlogger.log(qid, ranking, is_ranked=True)
print('\n\n')
print(ranking_logger.filename)
print("#> Done.")
print('\n\n')
| 2,000 | 31.274194 | 102 | py |
ColBERT | ColBERT-master/colbert/ranking/reranking.py | import os
import time
import faiss
import random
import torch
from colbert.utils.runs import Run
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, batch
from colbert.ranking.rankers import Ranker
def rerank(args):
inference = ModelInference(args.colbert, amp=args.amp)
ranker = Ranker(args, inference, faiss_depth=None)
ranking_logger = RankingLogger(Run.path, qrels=None)
milliseconds = 0
with ranking_logger.context('ranking.tsv', also_save_annotations=False) as rlogger:
queries = args.queries
qids_in_order = list(queries.keys())
for qoffset, qbatch in batch(qids_in_order, 100, provide_offset=True):
qbatch_text = [queries[qid] for qid in qbatch]
qbatch_pids = [args.topK_pids[qid] for qid in qbatch]
rankings = []
for query_idx, (q, pids) in enumerate(zip(qbatch_text, qbatch_pids)):
torch.cuda.synchronize('cuda:0')
s = time.time()
Q = ranker.encode([q])
pids, scores = ranker.rank(Q, pids=pids)
torch.cuda.synchronize()
milliseconds += (time.time() - s) * 1000.0
if len(pids):
print(qoffset+query_idx, q, len(scores), len(pids), scores[0], pids[0],
milliseconds / (qoffset+query_idx+1), 'ms')
rankings.append(zip(pids, scores))
for query_idx, (qid, ranking) in enumerate(zip(qbatch, rankings)):
query_idx = qoffset + query_idx
if query_idx % 100 == 0:
print_message(f"#> Logging query #{query_idx} (qid {qid}) now...")
ranking = [(score, pid, None) for pid, score in ranking]
rlogger.log(qid, ranking, is_ranked=True)
print('\n\n')
print(ranking_logger.filename)
print("#> Done.")
print('\n\n')
| 2,042 | 31.951613 | 91 | py |
ColBERT | ColBERT-master/colbert/ranking/faiss_index.py | import os
import time
import faiss
import random
import torch
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.utils.utils import print_message, flatten, batch
from colbert.indexing.loaders import load_doclens
class FaissIndex():
def __init__(self, index_path, faiss_index_path, nprobe, part_range=None):
print_message("#> Loading the FAISS index from", faiss_index_path, "..")
faiss_part_range = os.path.basename(faiss_index_path).split('.')[-2].split('-')
if len(faiss_part_range) == 2:
faiss_part_range = range(*map(int, faiss_part_range))
assert part_range[0] in faiss_part_range, (part_range, faiss_part_range)
assert part_range[-1] in faiss_part_range, (part_range, faiss_part_range)
else:
faiss_part_range = None
self.part_range = part_range
self.faiss_part_range = faiss_part_range
self.faiss_index = faiss.read_index(faiss_index_path)
self.faiss_index.nprobe = nprobe
print_message("#> Building the emb2pid mapping..")
all_doclens = load_doclens(index_path, flatten=False)
pid_offset = 0
if faiss_part_range is not None:
print(f"#> Restricting all_doclens to the range {faiss_part_range}.")
pid_offset = len(flatten(all_doclens[:faiss_part_range.start]))
all_doclens = all_doclens[faiss_part_range.start:faiss_part_range.stop]
self.relative_range = None
if self.part_range is not None:
start = self.faiss_part_range.start if self.faiss_part_range is not None else 0
a = len(flatten(all_doclens[:self.part_range.start - start]))
b = len(flatten(all_doclens[:self.part_range.stop - start]))
self.relative_range = range(a, b)
print(f"self.relative_range = {self.relative_range}")
all_doclens = flatten(all_doclens)
total_num_embeddings = sum(all_doclens)
self.emb2pid = torch.zeros(total_num_embeddings, dtype=torch.int)
offset_doclens = 0
for pid, dlength in enumerate(all_doclens):
self.emb2pid[offset_doclens: offset_doclens + dlength] = pid_offset + pid
offset_doclens += dlength
print_message("len(self.emb2pid) =", len(self.emb2pid))
self.parallel_pool = Pool(16)
def retrieve(self, faiss_depth, Q, verbose=False):
embedding_ids = self.queries_to_embedding_ids(faiss_depth, Q, verbose=verbose)
pids = self.embedding_ids_to_pids(embedding_ids, verbose=verbose)
if self.relative_range is not None:
pids = [[pid for pid in pids_ if pid in self.relative_range] for pids_ in pids]
return pids
def queries_to_embedding_ids(self, faiss_depth, Q, verbose=True):
# Flatten into a matrix for the faiss search.
num_queries, embeddings_per_query, dim = Q.size()
Q_faiss = Q.view(num_queries * embeddings_per_query, dim).cpu().contiguous()
# Search in large batches with faiss.
print_message("#> Search in batches with faiss. \t\t",
f"Q.size() = {Q.size()}, Q_faiss.size() = {Q_faiss.size()}",
condition=verbose)
embeddings_ids = []
faiss_bsize = embeddings_per_query * 5000
for offset in range(0, Q_faiss.size(0), faiss_bsize):
endpos = min(offset + faiss_bsize, Q_faiss.size(0))
print_message("#> Searching from {} to {}...".format(offset, endpos), condition=verbose)
some_Q_faiss = Q_faiss[offset:endpos].float().numpy()
_, some_embedding_ids = self.faiss_index.search(some_Q_faiss, faiss_depth)
embeddings_ids.append(torch.from_numpy(some_embedding_ids))
embedding_ids = torch.cat(embeddings_ids)
# Reshape to (number of queries, non-unique embedding IDs per query)
embedding_ids = embedding_ids.view(num_queries, embeddings_per_query * embedding_ids.size(1))
return embedding_ids
def embedding_ids_to_pids(self, embedding_ids, verbose=True):
# Find unique PIDs per query.
print_message("#> Lookup the PIDs..", condition=verbose)
all_pids = self.emb2pid[embedding_ids]
print_message(f"#> Converting to a list [shape = {all_pids.size()}]..", condition=verbose)
all_pids = all_pids.tolist()
print_message("#> Removing duplicates (in parallel if large enough)..", condition=verbose)
if len(all_pids) > 5000:
all_pids = list(self.parallel_pool.map(uniq, all_pids))
else:
all_pids = list(map(uniq, all_pids))
print_message("#> Done with embedding_ids_to_pids().", condition=verbose)
return all_pids
def uniq(l):
return list(set(l))
| 4,820 | 38.195122 | 101 | py |
ColBERT | ColBERT-master/colbert/ranking/__init__.py | 0 | 0 | 0 | py |
|
ColBERT | ColBERT-master/colbert/ranking/rankers.py | import torch
from functools import partial
from colbert.ranking.index_part import IndexPart
from colbert.ranking.faiss_index import FaissIndex
from colbert.utils.utils import flatten, zipstar
class Ranker():
def __init__(self, args, inference, faiss_depth=1024):
self.inference = inference
self.faiss_depth = faiss_depth
if faiss_depth is not None:
self.faiss_index = FaissIndex(args.index_path, args.faiss_index_path, args.nprobe, part_range=args.part_range)
self.retrieve = partial(self.faiss_index.retrieve, self.faiss_depth)
self.index = IndexPart(args.index_path, dim=inference.colbert.dim, part_range=args.part_range, verbose=True)
def encode(self, queries):
assert type(queries) in [list, tuple], type(queries)
Q = self.inference.queryFromText(queries, bsize=512 if len(queries) > 512 else None)
return Q
def rank(self, Q, pids=None):
pids = self.retrieve(Q, verbose=False)[0] if pids is None else pids
assert type(pids) in [list, tuple], type(pids)
assert Q.size(0) == 1, (len(pids), Q.size())
assert all(type(pid) is int for pid in pids)
scores = []
if len(pids) > 0:
Q = Q.permute(0, 2, 1)
scores = self.index.rank(Q, pids)
scores_sorter = torch.tensor(scores).sort(descending=True)
pids, scores = torch.tensor(pids)[scores_sorter.indices].tolist(), scores_sorter.values.tolist()
return pids, scores
| 1,520 | 33.568182 | 122 | py |
ColBERT | ColBERT-master/colbert/modeling/inference.py | import torch
from colbert.modeling.colbert import ColBERT
from colbert.modeling.tokenization import QueryTokenizer, DocTokenizer
from colbert.utils.amp import MixedPrecisionManager
from colbert.parameters import DEVICE
class ModelInference():
def __init__(self, colbert: ColBERT, amp=False):
assert colbert.training is False
self.colbert = colbert
self.query_tokenizer = QueryTokenizer(colbert.query_maxlen)
self.doc_tokenizer = DocTokenizer(colbert.doc_maxlen)
self.amp_manager = MixedPrecisionManager(amp)
def query(self, *args, to_cpu=False, **kw_args):
with torch.no_grad():
with self.amp_manager.context():
Q = self.colbert.query(*args, **kw_args)
return Q.cpu() if to_cpu else Q
def doc(self, *args, to_cpu=False, **kw_args):
with torch.no_grad():
with self.amp_manager.context():
D = self.colbert.doc(*args, **kw_args)
return D.cpu() if to_cpu else D
def queryFromText(self, queries, bsize=None, to_cpu=False):
if bsize:
batches = self.query_tokenizer.tensorize(queries, bsize=bsize)
batches = [self.query(input_ids, attention_mask, to_cpu=to_cpu) for input_ids, attention_mask in batches]
return torch.cat(batches)
input_ids, attention_mask = self.query_tokenizer.tensorize(queries)
return self.query(input_ids, attention_mask)
def docFromText(self, docs, bsize=None, keep_dims=True, to_cpu=False):
if bsize:
batches, reverse_indices = self.doc_tokenizer.tensorize(docs, bsize=bsize)
batches = [self.doc(input_ids, attention_mask, keep_dims=keep_dims, to_cpu=to_cpu)
for input_ids, attention_mask in batches]
if keep_dims:
D = _stack_3D_tensors(batches)
return D[reverse_indices]
D = [d for batch in batches for d in batch]
return [D[idx] for idx in reverse_indices.tolist()]
input_ids, attention_mask = self.doc_tokenizer.tensorize(docs)
return self.doc(input_ids, attention_mask, keep_dims=keep_dims)
def score(self, Q, D, mask=None, lengths=None, explain=False):
if lengths is not None:
assert mask is None, "don't supply both mask and lengths"
mask = torch.arange(D.size(1), device=DEVICE) + 1
mask = mask.unsqueeze(0) <= lengths.to(DEVICE).unsqueeze(-1)
scores = (D @ Q)
scores = scores if mask is None else scores * mask.unsqueeze(-1)
scores = scores.max(1)
if explain:
assert False, "TODO"
return scores.values.sum(-1).cpu()
def _stack_3D_tensors(groups):
bsize = sum([x.size(0) for x in groups])
maxlen = max([x.size(1) for x in groups])
hdim = groups[0].size(2)
output = torch.zeros(bsize, maxlen, hdim, device=groups[0].device, dtype=groups[0].dtype)
offset = 0
for x in groups:
endpos = offset + x.size(0)
output[offset:endpos, :x.size(1)] = x
offset = endpos
return output
| 3,132 | 34.602273 | 117 | py |
ColBERT | ColBERT-master/colbert/modeling/colbert.py | import string
import torch
import torch.nn as nn
from transformers import BertPreTrainedModel, BertModel, BertTokenizerFast
from colbert.parameters import DEVICE
class ColBERT(BertPreTrainedModel):
def __init__(self, config, query_maxlen, doc_maxlen, mask_punctuation, dim=128, similarity_metric='cosine'):
super(ColBERT, self).__init__(config)
self.query_maxlen = query_maxlen
self.doc_maxlen = doc_maxlen
self.similarity_metric = similarity_metric
self.dim = dim
self.mask_punctuation = mask_punctuation
self.skiplist = {}
if self.mask_punctuation:
self.tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
self.skiplist = {w: True
for symbol in string.punctuation
for w in [symbol, self.tokenizer.encode(symbol, add_special_tokens=False)[0]]}
self.bert = BertModel(config)
self.linear = nn.Linear(config.hidden_size, dim, bias=False)
self.init_weights()
def forward(self, Q, D):
return self.score(self.query(*Q), self.doc(*D))
def query(self, input_ids, attention_mask):
input_ids, attention_mask = input_ids.to(DEVICE), attention_mask.to(DEVICE)
Q = self.bert(input_ids, attention_mask=attention_mask)[0]
Q = self.linear(Q)
return torch.nn.functional.normalize(Q, p=2, dim=2)
def doc(self, input_ids, attention_mask, keep_dims=True):
input_ids, attention_mask = input_ids.to(DEVICE), attention_mask.to(DEVICE)
D = self.bert(input_ids, attention_mask=attention_mask)[0]
D = self.linear(D)
mask = torch.tensor(self.mask(input_ids), device=DEVICE).unsqueeze(2).float()
D = D * mask
D = torch.nn.functional.normalize(D, p=2, dim=2)
if not keep_dims:
D, mask = D.cpu().to(dtype=torch.float16), mask.cpu().bool().squeeze(-1)
D = [d[mask[idx]] for idx, d in enumerate(D)]
return D
def score(self, Q, D):
if self.similarity_metric == 'cosine':
return (Q @ D.permute(0, 2, 1)).max(2).values.sum(1)
assert self.similarity_metric == 'l2'
return (-1.0 * ((Q.unsqueeze(2) - D.unsqueeze(1))**2).sum(-1)).max(-1).values.sum(-1)
def mask(self, input_ids):
mask = [[(x not in self.skiplist) and (x != 0) for x in d] for d in input_ids.cpu().tolist()]
return mask
| 2,458 | 34.637681 | 112 | py |
ColBERT | ColBERT-master/colbert/modeling/__init__.py | 0 | 0 | 0 | py |
|
ColBERT | ColBERT-master/colbert/modeling/tokenization/doc_tokenization.py | import torch
from transformers import BertTokenizerFast
from colbert.modeling.tokenization.utils import _split_into_batches, _sort_by_length
class DocTokenizer():
def __init__(self, doc_maxlen):
self.tok = BertTokenizerFast.from_pretrained('bert-base-uncased')
self.doc_maxlen = doc_maxlen
self.D_marker_token, self.D_marker_token_id = '[D]', self.tok.convert_tokens_to_ids('[unused1]')
self.cls_token, self.cls_token_id = self.tok.cls_token, self.tok.cls_token_id
self.sep_token, self.sep_token_id = self.tok.sep_token, self.tok.sep_token_id
assert self.D_marker_token_id == 2
def tokenize(self, batch_text, add_special_tokens=False):
assert type(batch_text) in [list, tuple], (type(batch_text))
tokens = [self.tok.tokenize(x, add_special_tokens=False) for x in batch_text]
if not add_special_tokens:
return tokens
prefix, suffix = [self.cls_token, self.D_marker_token], [self.sep_token]
tokens = [prefix + lst + suffix for lst in tokens]
return tokens
def encode(self, batch_text, add_special_tokens=False):
assert type(batch_text) in [list, tuple], (type(batch_text))
ids = self.tok(batch_text, add_special_tokens=False)['input_ids']
if not add_special_tokens:
return ids
prefix, suffix = [self.cls_token_id, self.D_marker_token_id], [self.sep_token_id]
ids = [prefix + lst + suffix for lst in ids]
return ids
def tensorize(self, batch_text, bsize=None):
assert type(batch_text) in [list, tuple], (type(batch_text))
# add placehold for the [D] marker
batch_text = ['. ' + x for x in batch_text]
obj = self.tok(batch_text, padding='longest', truncation='longest_first',
return_tensors='pt', max_length=self.doc_maxlen)
ids, mask = obj['input_ids'], obj['attention_mask']
# postprocess for the [D] marker
ids[:, 1] = self.D_marker_token_id
if bsize:
ids, mask, reverse_indices = _sort_by_length(ids, mask, bsize)
batches = _split_into_batches(ids, mask, bsize)
return batches, reverse_indices
return ids, mask
| 2,248 | 34.140625 | 104 | py |
ColBERT | ColBERT-master/colbert/modeling/tokenization/query_tokenization.py | import torch
from transformers import BertTokenizerFast
from colbert.modeling.tokenization.utils import _split_into_batches
class QueryTokenizer():
def __init__(self, query_maxlen):
self.tok = BertTokenizerFast.from_pretrained('bert-base-uncased')
self.query_maxlen = query_maxlen
self.Q_marker_token, self.Q_marker_token_id = '[Q]', self.tok.convert_tokens_to_ids('[unused0]')
self.cls_token, self.cls_token_id = self.tok.cls_token, self.tok.cls_token_id
self.sep_token, self.sep_token_id = self.tok.sep_token, self.tok.sep_token_id
self.mask_token, self.mask_token_id = self.tok.mask_token, self.tok.mask_token_id
assert self.Q_marker_token_id == 1 and self.mask_token_id == 103
def tokenize(self, batch_text, add_special_tokens=False):
assert type(batch_text) in [list, tuple], (type(batch_text))
tokens = [self.tok.tokenize(x, add_special_tokens=False) for x in batch_text]
if not add_special_tokens:
return tokens
prefix, suffix = [self.cls_token, self.Q_marker_token], [self.sep_token]
tokens = [prefix + lst + suffix + [self.mask_token] * (self.query_maxlen - (len(lst)+3)) for lst in tokens]
return tokens
def encode(self, batch_text, add_special_tokens=False):
assert type(batch_text) in [list, tuple], (type(batch_text))
ids = self.tok(batch_text, add_special_tokens=False)['input_ids']
if not add_special_tokens:
return ids
prefix, suffix = [self.cls_token_id, self.Q_marker_token_id], [self.sep_token_id]
ids = [prefix + lst + suffix + [self.mask_token_id] * (self.query_maxlen - (len(lst)+3)) for lst in ids]
return ids
def tensorize(self, batch_text, bsize=None):
assert type(batch_text) in [list, tuple], (type(batch_text))
# add placehold for the [Q] marker
batch_text = ['. ' + x for x in batch_text]
obj = self.tok(batch_text, padding='max_length', truncation=True,
return_tensors='pt', max_length=self.query_maxlen)
ids, mask = obj['input_ids'], obj['attention_mask']
# postprocess for the [Q] marker and the [MASK] augmentation
ids[:, 1] = self.Q_marker_token_id
ids[ids == 0] = self.mask_token_id
if bsize:
batches = _split_into_batches(ids, mask, bsize)
return batches
return ids, mask
| 2,449 | 36.692308 | 115 | py |
ColBERT | ColBERT-master/colbert/modeling/tokenization/utils.py | import torch
def tensorize_triples(query_tokenizer, doc_tokenizer, queries, positives, negatives, bsize):
assert len(queries) == len(positives) == len(negatives)
assert bsize is None or len(queries) % bsize == 0
N = len(queries)
Q_ids, Q_mask = query_tokenizer.tensorize(queries)
D_ids, D_mask = doc_tokenizer.tensorize(positives + negatives)
D_ids, D_mask = D_ids.view(2, N, -1), D_mask.view(2, N, -1)
# Compute max among {length of i^th positive, length of i^th negative} for i \in N
maxlens = D_mask.sum(-1).max(0).values
# Sort by maxlens
indices = maxlens.sort().indices
Q_ids, Q_mask = Q_ids[indices], Q_mask[indices]
D_ids, D_mask = D_ids[:, indices], D_mask[:, indices]
(positive_ids, negative_ids), (positive_mask, negative_mask) = D_ids, D_mask
query_batches = _split_into_batches(Q_ids, Q_mask, bsize)
positive_batches = _split_into_batches(positive_ids, positive_mask, bsize)
negative_batches = _split_into_batches(negative_ids, negative_mask, bsize)
batches = []
for (q_ids, q_mask), (p_ids, p_mask), (n_ids, n_mask) in zip(query_batches, positive_batches, negative_batches):
Q = (torch.cat((q_ids, q_ids)), torch.cat((q_mask, q_mask)))
D = (torch.cat((p_ids, n_ids)), torch.cat((p_mask, n_mask)))
batches.append((Q, D))
return batches
def _sort_by_length(ids, mask, bsize):
if ids.size(0) <= bsize:
return ids, mask, torch.arange(ids.size(0))
indices = mask.sum(-1).sort().indices
reverse_indices = indices.sort().indices
return ids[indices], mask[indices], reverse_indices
def _split_into_batches(ids, mask, bsize):
batches = []
for offset in range(0, ids.size(0), bsize):
batches.append((ids[offset:offset+bsize], mask[offset:offset+bsize]))
return batches
| 1,833 | 34.269231 | 116 | py |
ColBERT | ColBERT-master/colbert/modeling/tokenization/__init__.py | from colbert.modeling.tokenization.query_tokenization import *
from colbert.modeling.tokenization.doc_tokenization import *
from colbert.modeling.tokenization.utils import tensorize_triples
| 190 | 46.75 | 65 | py |
cili | cili-master/make-tsv.py | #!/usr/bin/env python3
"""
Script to produce a TSV file for a release of CILI.
The mappings to the Princeton WordNet generally don't need to be
released regularly as they are unlikely to change and are already
included in WN-LMF releases of the PWN, so this script reduces the
ili.ttl file to a two-column tab-separated-value file containing only
the ILI inventory and their definitions. This assumes that every ILI
has a definition, which is true by design. The resulting .tsv file is
less than half the size of the .ttl file when uncompressed, but
roughly the same size when compressed. TSV is generally much faster to
parse, however, and doesn't require an RDF library, so it is more
appealing for downstream applications.
Requirements:
- Python 3.6+
- rdflib
Usage:
python3 make-tsv.py > cili.tsv
"""
import sys
from rdflib import Graph
from rdflib.namespace import SKOS
g = Graph()
g.parse("ili.ttl", format='ttl')
# pair each ILI (ignoring the URL part) with its definition
data = [(subj.rpartition('/')[2], obj)
for subj, obj
in g.subject_objects(predicate=SKOS.definition)]
# sort by ILI number
data.sort(key=lambda pair: int(pair[0].lstrip('i')))
print('ILI\tDefinition')
for ili, definition in data:
print(f'{ili}\t{definition}')
| 1,284 | 26.934783 | 70 | py |
cili | cili-master/make-html.py | #!/usr/bin/env python3
"""
Requirements:
- Python 3.6+
- rdflib
Usage:
python3 make-html.py OUTDIR
"""
from typing import Dict
import sys
from pathlib import Path
from rdflib import Graph
from rdflib.namespace import RDF, DC, SKOS, Namespace
if len(sys.argv) != 2:
sys.exit('usage: python3 make-html.py OUTDIR')
OUTDIR = Path(sys.argv[1])
if OUTDIR.exists():
sys.exit(f'{OUTDIR!s} already exists; remove or rename it, then try again')
OUTDIR.mkdir()
css = '''\
:root {
--text-color: #111;
--background-color: white;
}
body {
width: 100%;
color: var(--text-color);
margin: 0;
background-color: var(--background-color);
font-family: "Roboto", "Fira Sans", sans-serif;
}
header {
width: 100%;
margin: 0;
padding: 10px;
background-color: black;
color: #eee;
}
header h1 { margin-top: 0; text-align: center; }
article {
width: 800px;
margin: 10px auto;
padding: 10px;
border-radius: 10px;
}
article.ili { background-color: rgba(128,128,128,.1); }
article footer {
margin: 10px;
text-align: right;
}
blockquote {
margin: 10px 0;
padding: 10px;
border-left: 4px solid #888;
background-color: rgba(128,128,128,.1)
}
dl {
display: grid;
grid-template-columns: max-content auto;
}
dt { grid-column-start: 1; }
dd { grid-column-start: 2; }
.ili-type, dd { font-weight: bold; }
a { color: rgb(90, 170, 255); text-decoration: none; }
a:hover { text-decoration: underline; }
a:active { color: rgb(120, 200, 255); }
@media screen and (max-width: 799px) {
article {
width: 400px;
}
}
@media (prefers-color-scheme: dark) {
body {
--text-color: #eee;
--background-color: black;
}
}
'''
base = '''\
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<link href="_static/style.css" rel="stylesheet">
<title>{title}</title>
</head>
<body>
<header>
<h1>Global WordNet Association: Interlingual Index</h1>
</header>
{content}
</body>
</html>
'''
article = '''\
<article class="ili" itemscope itemtype="{type!s}" itemid="{subject!s}">
<h1>{ili}</h1>
<div class="ili-type">{short_type!s}</div>
<blockquote itemprop="http://www.w3.org/2004/02/skos/core#definition">
{definition!s}
</blockquote>
<dl>
<dt>Status</dt>
<dd itemprop="status">{status!s}</dd>
<dt>Source</dt>
<dd><a href="{source_info[url]}">{source_info[name]}</a>
–
<a itemprop="http://purl.org/dc/elements/1.1/source" href="{source!s}">{source_info[local]}</a>
</dd>
</dl>
<footer>Part of <a href="https://github.com/globalwordnet/cili/">globalwordnet/cili</a></footer>
</article>
'''
ILI = Namespace('http://globalwordnet.org/ili/')
sources = {
'http://wordnet-rdf.princeton.edu/wn30/': ('Princeton WordNet 3.0',
'https://wordnet.princeton.edu/'),
}
def source_info(url: str) -> Dict[str, str]:
for src in sources:
if url.startswith(src):
local = url.removeprefix(src).lstrip('/#')
name, project_url = sources[src]
return {'name': name, 'url': project_url, 'local': local}
raise LookupError(f'source info not found for {url!s}')
def short_name(s: str) -> str:
return s.rpartition('/')[2]
g = Graph()
g.parse("ili.ttl", format='ttl')
for subj in g.subjects():
type = g.value(subject=subj, predicate=RDF.type)
if type not in (ILI.Concept, ILI.Instance):
continue
ili = short_name(subj)
source = g.value(subject=subj, predicate=DC.source)
data = {
'ili': ili,
'subject': subj,
'type': type,
'short_type': short_name(type),
'definition': g.value(subject=subj, predicate=SKOS.definition),
'status': g.value(subject=subj, predicate=ILI.status, default='active'),
'source': source,
'source_info': source_info(source),
}
content = base.format(title=f'ILI: {ili}', content=article.format(**data))
(OUTDIR / f'{ili}.html').write_text(content)
(OUTDIR / '.nojekyll').touch() # for GitHub pages
(OUTDIR / '_static').mkdir()
(OUTDIR / '_static' / 'style.css').write_text(css)
(OUTDIR / 'index.html').write_text(base.format(
title='Interlingual Index',
content='''\
<article>
<a href="https://github.com/globalwordnet/cili">https://github.com/globalwordnet/cili</a>
</article>
'''))
| 4,438 | 22.363158 | 105 | py |
gate-teamware | gate-teamware-master/version.py | import json
import yaml
import sys
PACKAGE_JSON_FILE_PATH = "package.json"
DOCS_PACKAGE_JSON_FILE_PATH = "docs/package.json"
CITATION_FILE_PATH = "CITATION.cff"
MASTER_VERSION_FILE = "VERSION"
def check():
"""
Intended for use in CI pipelines, checks versions in files and exits with non-zero exit code if they don't match.
"""
js_version = get_package_json_version(PACKAGE_JSON_FILE_PATH)
print(f"package.json version is {js_version}")
docs_js_version = get_package_json_version(DOCS_PACKAGE_JSON_FILE_PATH)
print(f"docs package.json version is {docs_js_version}")
with open(CITATION_FILE_PATH, "r") as f:
citation_file = yaml.safe_load(f)
citation_version = citation_file['version']
print(f"CITATION.cff version is {citation_version}")
master_version = get_master_version()
print(f"VERSION file version is {master_version}")
if js_version != master_version or docs_js_version != master_version or citation_version != master_version:
print("One or more versions does not match")
sys.exit(1)
else:
print("All versions match!")
def get_package_json_version(file_path: str) -> str:
with open(file_path, "r") as f:
package_json = json.load(f)
js_version = package_json['version']
return js_version
def get_master_version():
with open(MASTER_VERSION_FILE, "r") as f:
master_version = f.readline().strip()
return master_version
def update():
"""
Updates all versions to match the master version file.
"""
master_version = get_master_version()
update_package_json_version(PACKAGE_JSON_FILE_PATH, master_version)
update_package_json_version(DOCS_PACKAGE_JSON_FILE_PATH, master_version)
with open(CITATION_FILE_PATH, "r") as f:
citation_file = yaml.safe_load(f)
print(f"Writing master version {master_version} to {CITATION_FILE_PATH}")
with open(CITATION_FILE_PATH, "w") as f:
citation_file['version'] = master_version
yaml.dump(citation_file, f)
check()
def update_package_json_version(file_path:str, version_no:str):
with open(file_path, "r") as f:
package_json = json.load(f)
print(f"Writing master version {version_no} to {file_path}")
with open(file_path, "w") as f:
package_json['version'] = version_no
json.dump(package_json, f, indent=2)
if __name__ == "__main__":
if sys.argv[1] == 'check':
print("Checking versions...")
check()
elif sys.argv[1] == 'update':
print("Updating versions...")
update()
else:
print(f"Unknown function {sys.argv[1]}, available functions are 'check' and 'update'.")
| 2,693 | 30.325581 | 117 | py |
gate-teamware | gate-teamware-master/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teamware.settings.base')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 669 | 28.130435 | 77 | py |
gate-teamware | gate-teamware-master/backend/views.py | import tempfile
import json
import math
import csv
from zipfile import ZipFile
from django.conf import settings
from django.http import StreamingHttpResponse, HttpResponse
from django.shortcuts import render
from django.views import View
from backend.models import Project, Document, DocumentType
class MainView(View):
"""
The main view of the app (index page)
"""
template_page = "index.html"
def get(self, request, *args, **kwargs):
"""
:param request:
:return:
"""
context = {
"settings": settings
}
return render(request, self.template_page, context=context)
class DownloadAnnotationsView(View):
def get(self, request, project_id, doc_type, export_type, json_format, entries_per_file, anonymize="anonymize"):
anonymize = False if anonymize=="deanonymize" else True
if request.user.is_manager or request.user.is_staff or request.user.is_superuser:
response = StreamingHttpResponse(self.generate_download(project_id, doc_type, export_type, json_format, anonymize, documents_per_file=entries_per_file))
export_format_extension = ""
if export_type == "json" or export_type == "jsonl":
export_format_extension += export_type
if json_format == "raw" or json_format == "gate":
export_format_extension += "-"+json_format
elif export_type == "csv":
export_format_extension = export_type
response['Content-Type'] = 'application/zip'
response['Content-Disposition'] = f'attachment;filename="project{project_id:04d}-{export_format_extension}.zip"'
return response
return HttpResponse("No permission to access this endpoint", status=401)
def generate_download(self, project_id, doc_type="all", export_type="json", json_format="raw", anonymize=True, chunk_size=512, documents_per_file=500):
project = Project.objects.get(pk=project_id)
with tempfile.TemporaryFile() as z:
with ZipFile(z, "w") as zip:
all_docs = project.documents.all()
if doc_type == "training":
all_docs = project.documents.filter(doc_type=DocumentType.TRAINING)
elif doc_type == "test":
all_docs = project.documents.filter(doc_type=DocumentType.TEST)
elif doc_type == "annotation":
all_docs = project.documents.filter(doc_type=DocumentType.ANNOTATION)
num_docs = all_docs.count()
num_slices = math.ceil(num_docs/documents_per_file)
for slice_index in range(num_slices):
start_index = slice_index*documents_per_file
end_index = ((slice_index+1)*documents_per_file)
if end_index >= num_docs:
end_index = num_docs
slice_docs = all_docs[start_index:end_index]
with tempfile.NamedTemporaryFile("w+") as f:
self.write_docs_to_file(f, slice_docs, export_type, json_format, anonymize)
zip.write(f.name, f"project-{project_id}-{doc_type}-{slice_index:04d}.{export_type}")
# Stream file output
z.seek(0)
while True:
c = z.read(chunk_size)
if c:
yield c
else:
break
def write_docs_to_file(self, file, documents, export_type, json_format, anonymize):
if export_type == "json":
self.write_docs_as_json(file, documents, json_format, anonymize)
elif export_type == "jsonl":
self.write_docs_as_jsonl(file, documents, json_format, anonymize)
elif export_type == "csv":
self.write_docs_as_csv(file, documents, anonymize)
def write_docs_as_json(self, file, documents, json_format, anonymize):
doc_dict_list = []
for document in documents:
doc_dict_list.append(document.get_doc_annotation_dict(json_format, anonymize))
file.write(json.dumps(doc_dict_list))
file.flush()
def write_docs_as_jsonl(self, file, documents, json_format, anonymize):
for document in documents:
doc_dict = document.get_doc_annotation_dict(json_format, anonymize)
file.write(json.dumps(doc_dict) + "\n")
file.flush()
def write_docs_as_csv(self, file, documents, anonymize):
doc_dict_list = []
keys_list = []
for document in documents:
doc_dict_list.append(self.flatten_json(document.get_doc_annotation_dict("csv", anonymize), "."))
for doc_dict in doc_dict_list:
keys_list = self.insert_missing_key(keys_list, doc_dict)
writer = csv.writer(file, delimiter=",", quotechar='"')
# Header row
writer.writerow(keys_list)
# Data
for doc_dict in doc_dict_list:
row = []
for key in keys_list:
if key in doc_dict:
row.append(doc_dict[key])
else:
row.append(None)
writer.writerow(row)
file.flush()
def flatten_json(self, b, delim):
val = {}
for i in b.keys():
if isinstance(b[i], dict):
get = self.flatten_json(b[i], delim)
for j in get.keys():
val[i + delim + j] = get[j]
elif isinstance(b[i], list):
for index, obj in enumerate(b[i]):
if isinstance(obj, dict):
get = self.flatten_json(obj, delim)
for j in get.keys():
val[i + delim + str(index) + delim + j] = get[j]
else:
val[i + delim + str(index)] = obj
else:
val[i] = b[i]
return val
def insert_missing_key(self, key_list, obj_dict):
key_list = list(key_list)
key_set = set(key_list)
obj_keys = list(obj_dict.keys())
obj_key_set = set(obj_keys)
diff_set = obj_key_set.difference(key_set)
num_obj_keys = len(obj_keys)
# Do key filling in order
missing_keys_list = [key for key in obj_keys if key in diff_set]
for missing_key in missing_keys_list:
prev_key = None
next_key = None
for i, item in enumerate(obj_keys):
if obj_keys[i] == missing_key:
prev_key = obj_keys[i-1] if i > 0 else None
next_key = obj_keys[i+1] if i < num_obj_keys - 1 else None
break
if prev_key in key_set:
prev_key_index = key_list.index(prev_key)
key_list.insert(prev_key_index + 1, missing_key)
elif next_key in key_set:
next_key_index = key_list.index(next_key)
key_list.insert(next_key_index, missing_key)
else:
key_list.insert(-1, missing_key)
key_set = set(key_list)
return key_list
| 7,294 | 34.585366 | 164 | py |
gate-teamware | gate-teamware-master/backend/signals.py | from django.db.models.signals import pre_delete
from django.dispatch import receiver
from backend.models import ServiceUser, Annotation
| 137 | 26.6 | 50 | py |
gate-teamware | gate-teamware-master/backend/errors.py | class AuthError(PermissionError):
pass
| 43 | 13.666667 | 33 | py |
gate-teamware | gate-teamware-master/backend/rpcserver.py | import json
import logging
import inspect
from json.decoder import JSONDecodeError
from django.http import JsonResponse, HttpRequest
from django.views import View
from backend.errors import AuthError
log = logging.getLogger(__name__)
REGISTERED_RPC_METHODS = {}
PARSE_ERROR = -32700
INVALID_REQUEST = -32600
METHOD_NOT_FOUND = -32601
INVALID_PARAMS = -32602
INTERNAL_ERROR = -32603
AUTHENTICATION_ERROR = -32000
UNAUTHORIZED_ERROR = -32001
class RPCMethod:
def __init__(self, function, authenticate, requires_manager=False, requires_admin=False):
self.function = function
self.authenticate = authenticate
self.requires_manager = requires_manager
self.requires_admin = requires_admin
class JSONRPCEndpoint(View):
@staticmethod
def endpoint_listing():
endpoints_list = {}
for func_name, rmethod in REGISTERED_RPC_METHODS.items():
argspec = inspect.getfullargspec(rmethod.function)
args_list = []
if len(argspec.args) > 1:
args_list = argspec.args[1:]
endpoints_list[func_name] = {
"description": rmethod.function.__doc__,
"arguments": args_list,
"defaults": argspec.defaults,
"require_authentication": rmethod.authenticate,
"require_manager": rmethod.requires_manager,
"require_admin": rmethod.requires_admin
}
return endpoints_list
def success_response(self, data, msg_id=None, http_status=200):
context = {
"jsonrpc": "2.0",
"result": data
}
if msg_id is not None:
context["id"] = msg_id
return JsonResponse(context, status=http_status)
def error_response(self, code, message, msg_id=None, http_status=400):
context = {
"jsonrpc": "2.0",
"error":
{
"code": code,
"message": message,
}
}
if msg_id is not None:
context["id"] = msg_id
return JsonResponse(context, status=http_status)
def post(self, request: HttpRequest, *args, **kwargs):
msg_id = None
method_name = None
params = []
try:
# Parse message
msg = json.loads(request.body)
# Check id
if "id" in msg:
msg_id = msg["id"]
# Check protocol header
if "jsonrpc" not in msg or msg["jsonrpc"] != "2.0":
log.warning(f"No jsonrpc field in request")
return self.error_response(INVALID_REQUEST, "Not json rpc 2.0", msg_id, http_status=400)
# Get method name
if "method" in msg:
method_name = msg["method"]
if method_name not in REGISTERED_RPC_METHODS:
log.warning(f"No method name {method_name} in request")
return self.error_response(METHOD_NOT_FOUND, f"Method {method_name} was not found", http_status=405)
# Get params
if "params" in msg:
params = msg["params"]
# Get and call method
method = REGISTERED_RPC_METHODS[method_name]
# Check user role
if method.authenticate and not request.user.is_authenticated:
raise AuthError("Must be logged in to perform this operation.")
if method.requires_manager and not (request.user.is_manager or request.user.is_staff or request.user.is_superuser):
raise PermissionError("Must be a manager to perform this operation.")
if method.requires_admin and not (request.user.is_staff or request.user.is_superuser):
raise PermissionError("Must be a admin to perform this operation.")
result = method.function(request, *params)
log.info(f"Called {method_name}")
return self.success_response(result, msg_id)
except JSONDecodeError as e:
log.exception(f"Unable to parse json string from request body {request.body}")
return self.error_response(PARSE_ERROR, "Invalid JSON format in request")
except ValueError as e:
log.exception(f"Value error on rpc function {method_name}")
return self.error_response(INVALID_REQUEST, f"{e}", http_status=400)
except TypeError as e:
log.exception(f"Type error on rpc function {method_name}")
return self.error_response(INVALID_PARAMS, f"{e}", http_status=400)
except RuntimeError as e:
log.exception(f"Runtime error on rpc function {method_name}")
return self.error_response(INVALID_REQUEST, f"{e}", http_status=400)
except AuthError as e:
log.exception(f"Authentication failed trying to access {method_name}")
return self.error_response(AUTHENTICATION_ERROR, f"{e}", http_status=401)
except PermissionError as e:
log.exception(f"Not allowed to use rpc function {method_name}")
return self.error_response(UNAUTHORIZED_ERROR, f"Permission Denied: {e}", http_status=401)
except Exception as e:
log.exception(f"Unknown rpc exception on method {method_name}")
return self.error_response(INTERNAL_ERROR, f"Unknown error: {e}", http_status=500)
def rpc_method(func):
"""
Used as a decorator. Register the method to the list of RPC functions available.
The decorated function can throw PermissionError or AuthError which will be converted
to the correct error code automatically.
"""
REGISTERED_RPC_METHODS[func.__name__] = RPCMethod(func, False)
return func
def rpc_method_auth(func):
"""
Used as a decorator. Register the method to the list of RPC functions available,
authentication check is performed automatically.
The decorated function can throw PermissionError or AuthError which will be converted
to the correct error code automatically.
"""
REGISTERED_RPC_METHODS[func.__name__] = RPCMethod(func, True)
return func
def rpc_method_manager(func):
"""
Used as a decorator. Register the method to the list of RPC functions available,
authentication check is performed automatically.
The decorated function can throw PermissionError or AuthError which will be converted
to the correct error code automatically.
"""
REGISTERED_RPC_METHODS[func.__name__] = RPCMethod(func, True, requires_manager=True)
return func
def rpc_method_admin(func):
"""
Used as a decorator. Register the method to the list of RPC functions available,
authentication check is performed automatically.
The decorated function can throw PermissionError or AuthError which will be converted
to the correct error code automatically.
"""
REGISTERED_RPC_METHODS[func.__name__] = RPCMethod(func, True, requires_manager=True, requires_admin=True)
return func
| 7,035 | 32.826923 | 127 | py |
gate-teamware | gate-teamware-master/backend/rpc.py | import secrets
import logging
import datetime
import json
import os
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.auth import authenticate, get_user_model, login as djlogin, logout as djlogout
from django.contrib.auth.decorators import permission_required
from django.contrib.admin.views.decorators import staff_member_required
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.db.models import manager
from django.core import mail
from django.db.models import Q
from django.http import JsonResponse, HttpRequest
from django.shortcuts import redirect, render
from django.template.loader import render_to_string
from django.utils import timezone
import gatenlp
from django.utils.html import strip_tags
from gatenlp import annotation_set
# https://pypi.org/project/gatenlp/
from backend.errors import AuthError
from backend.rpcserver import rpc_method, rpc_method_auth, rpc_method_manager, rpc_method_admin
from backend.models import Project, Document, DocumentType, Annotation, AnnotatorProject, AnnotationChangeHistory, \
UserDocumentFormatPreference, document_preference_str
from backend.utils.misc import get_value_from_key_path, insert_value_to_key_path, read_custom_document
from backend.utils.serialize import ModelSerializer
log = logging.getLogger(__name__)
serializer = ModelSerializer()
User = get_user_model()
#####################################
### Initilisation ###
#####################################
@rpc_method
def initialise(request):
"""
Provide the initial context information to initialise the Teamware app
context_object:
user:
isAuthenticated: bool
isManager: bool
isAdmin: bool
configs:
docFormatPref: bool
global_configs:
allowUserDelete: bool
"""
context_object = {
"user": is_authenticated(request),
"configs": {
"docFormatPref": get_user_document_pref_from_request(request)
},
"global_configs": {
"allowUserDelete": settings.ALLOW_USER_DELETE
}
}
return context_object
def get_user_document_pref_from_request(request):
if request.user.is_authenticated:
return document_preference_str(request.user.doc_format_pref)
else:
return document_preference_str(UserDocumentFormatPreference.JSON)
#####################################
### Login/Logout/Register Methods ###
#####################################
@rpc_method
def is_authenticated(request):
"""
Checks that the current user has logged in.
"""
context = {
"isAuthenticated": False,
"isManager": False,
"isAdmin": False,
}
if request.user.is_authenticated:
context["isAuthenticated"] = True
context["isActivated"] = request.user.is_activated
context["username"] = request.user.username
if not request.user.is_anonymous:
if request.user.is_manager or request.user.is_staff:
context["isManager"] = True
if request.user.is_staff:
context["isAdmin"] = True
return context
@rpc_method
def login(request, payload):
context = {}
if "username" not in payload:
raise RuntimeError("No username provided")
if "password" not in payload:
raise RuntimeError("No password provided")
user = authenticate(username=payload["username"], password=payload["password"])
if user is not None:
if user.is_deleted:
raise AuthError("Cannot login with a deleted account")
djlogin(request, user)
context["username"] = user.username
context["isAuthenticated"] = user.is_authenticated
context["isManager"] = user.is_manager or user.is_staff
context["isAdmin"] = user.is_staff
context["isActivated"] = user.is_activated
return context
else:
raise AuthError("Invalid username or password.")
@rpc_method
def logout(request):
djlogout(request)
return
@rpc_method
def register(request, payload):
context = {}
username = payload.get("username")
password = payload.get("password")
email = payload.get("email")
agreed_privacy_policy = True
if not get_user_model().objects.filter(username=username).exists():
user = get_user_model().objects.create_user(username=username, password=password, email=email, agreed_privacy_policy=agreed_privacy_policy)
_generate_user_activation(user)
djlogin(request, user)
context["username"] = payload["username"]
context["isAuthenticated"] = True
context["isActivated"] = user.is_activated
return context
else:
raise ValueError("Username already exists")
@rpc_method
def generate_user_activation(request, username):
try:
user = get_user_model().objects.get(username=username)
if user.is_activated:
raise ValueError(f"User {username}'s account is already activated.")
_generate_user_activation(user)
except User.DoesNotExist:
log.exception(f"Trying to generate activation code for user: {username} that doesn't exist")
raise ValueError("User does not exist.")
def _generate_user_activation(user):
if settings.ACTIVATION_WITH_EMAIL:
register_token = secrets.token_urlsafe(settings.ACTIVATION_TOKEN_LENGTH)
user.activate_account_token = register_token
user.activate_account_token_expire = timezone.now() + \
datetime.timedelta(days=settings.ACTIVATION_EMAIL_TIMEOUT_DAYS)
user.save()
app_name = settings.APP_NAME
activate_url_base = urljoin(settings.APP_URL, settings.ACTIVATION_URL_PATH)
activate_url = f"{activate_url_base}?username={user.username}&token={user.activate_account_token}"
context = {
"app_name": app_name,
"activate_url": activate_url,
}
message = render_to_string("registration_mail.html", context)
num_sent = mail.send_mail(subject=f"Activate your account at {app_name}",
message=strip_tags(message),
html_message=message,
from_email=settings.ADMIN_EMAIL,
recipient_list=[user.email],
fail_silently=False
)
if num_sent < 1:
log.warning(f"Could not send registration email for user {user.username}")
else:
user.is_account_activated = True
user.save()
@rpc_method
def activate_account(request, username, token):
try:
if token is None or len(token) < settings.ACTIVATION_TOKEN_LENGTH:
log.error(f"Token of invalid length provided: {token} username: {username}")
raise ValueError("Invalid token provided")
user = get_user_model().objects.get(username=username, activate_account_token=token)
if user.activate_account_token_expire < timezone.now():
raise ValueError("Token has expired")
user.is_account_activated = True
user.activate_account_token = None
user.activate_account_token_expire = None
user.save()
except User.DoesNotExist as e:
log.exception(f"Activate account, invalid token provided: {token}")
raise ValueError("Invalid token provided")
@rpc_method
def generate_password_reset(request, username):
user = None
try:
user = get_user_model().objects.get(username=username)
register_token = secrets.token_urlsafe(settings.PASSWORD_RESET_TOKEN_LENGTH)
user.reset_password_token = register_token
user.reset_password_token_expire = timezone.now() + \
datetime.timedelta(hours=settings.PASSWORD_RESET_TIMEOUT_HOURS)
user.save()
app_name = settings.APP_NAME
reset_url_base = urljoin(settings.APP_URL, settings.PASSWORD_RESET_URL_PATH)
reset_url = f"{reset_url_base}?username={user.username}&token={user.reset_password_token}"
context = {
"app_name": app_name,
"reset_url": reset_url,
}
message = render_to_string("password_reset_mail.html", context)
num_sent = mail.send_mail(subject=f"Reset your password at {app_name}",
message=strip_tags(message),
html_message=message,
from_email=settings.ADMIN_EMAIL,
recipient_list=[user.email],
fail_silently=False
)
if num_sent < 1:
log.warning(f"Could not send password reset email for user {user.username}")
except User.DoesNotExist as e:
raise ValueError("Username does not exist.")
@rpc_method
def reset_password(request, username, token, new_password):
try:
if token is None or len(token) < settings.PASSWORD_RESET_TOKEN_LENGTH:
log.error(f"Token of invalid length provided: {token} username: {username}")
raise ValueError("Invalid token provided")
user = get_user_model().objects.get(username=username, reset_password_token=token)
if user.reset_password_token_expire < timezone.now():
raise ValueError("Token has expired")
user.set_password(new_password)
user.reset_password_token = None
user.reset_password_token_expire = None
user.save()
except User.DoesNotExist as e:
log.exception(f"Reset password, invalid token provided: {token}")
raise ValueError("Invalid token provided")
@rpc_method_auth
def change_password(request, payload):
user = request.user
user.set_password(payload.get("password"))
user.save()
return
@rpc_method_auth
def change_email(request, payload):
user = request.user
user.email = payload.get("email")
user.is_account_activated = False # User needs to re-verify their e-mail again
user.save()
_generate_user_activation(user) # Generate
return
@rpc_method_auth
def set_user_receive_mail_notifications(request, do_receive_notifications):
user = request.user
user.receive_mail_notifications = do_receive_notifications
user.save()
@rpc_method_auth
def set_user_document_format_preference(request, doc_preference):
user = request.user
# Convert to enum value
if doc_preference == "JSON":
user.doc_format_pref = UserDocumentFormatPreference.JSON
elif doc_preference == "CSV":
user.doc_format_pref = UserDocumentFormatPreference.CSV
else:
raise ValueError(f"Document preference value {doc_preference} is invalid")
user.save()
#############################
### User specific methods ###
#############################
@rpc_method_auth
def get_user_details(request):
user = request.user
data = {
"username": user.username,
"email": user.email,
"created": user.created,
"receive_mail_notifications": user.receive_mail_notifications,
}
user_role = "annotator"
if user.is_staff:
user_role = "admin"
elif user.is_manager:
user_role = "manager"
data["user_role"] = user_role
# Convert doc preference to string
data["doc_format_pref"] = document_preference_str(user.doc_format_pref)
return data
@rpc_method_auth
def get_user_annotated_projects(request):
"""
Gets a list of projects that the user has annotated
"""
user = request.user
projects_list = []
for project in Project.objects.filter(documents__annotations__user_id=user.pk).distinct().order_by("-id"):
projects_list.append({
"id": project.pk,
"name": project.name,
"allow_annotation_change": project.allow_annotation_change,
"configuration": project.configuration,
})
return projects_list
@rpc_method_auth
def get_user_annotations_in_project(request, project_id, current_page=1, page_size=None):
"""
Gets a list of documents in a project where the user has performed annotations in.
:param project_id: The id of the project to query
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
user = request.user
if project_id is None:
raise Exception("Must have project_id")
if current_page < 1:
raise Exception("Page must start from 1")
current_page = current_page - 1 # Change to zero index
project = Project.objects.get(pk=project_id)
user_annotated_docs = project.documents.filter(doc_type=DocumentType.ANNOTATION,
annotations__user_id=user.pk).distinct()
total_count = user_annotated_docs.count()
if user_annotated_docs.count() < 1:
raise Exception(f"No annotations in this project {project.pk}:{project.name}")
if page_size is not None:
start_index = current_page * page_size
end_index = (current_page + 1) * page_size
paginated_docs = user_annotated_docs[start_index:end_index]
else:
paginated_docs = user_annotated_docs
documents_out = []
for document in paginated_docs:
annotations_list = [annotation.get_listing() for annotation in document.annotations.filter(user=user)]
documents_out.append(document.get_listing(annotations_list))
return {"items": documents_out, "total_count": total_count}
@rpc_method_auth
def user_delete_personal_information(request):
request.user.delete_user_personal_information()
@rpc_method_auth
def user_delete_account(request):
if settings.ALLOW_USER_DELETE:
request.user.delete()
else:
raise Exception("Teamware's current configuration does not allow user accounts to be deleted.")
##################################
### Project Management Methods ###
##################################
@rpc_method_manager
def create_project(request):
with transaction.atomic():
proj = Project.objects.create()
proj.owner = request.user
proj.save()
serialized_project = serializer.serialize(proj, exclude_fields=set(["annotators", "annotatorproject"]))
serialized_project["annotators"] = get_project_annotators(request, proj.id)
return serialized_project
@rpc_method_manager
def delete_project(request, project_id):
with transaction.atomic():
proj = Project.objects.get(pk=project_id)
proj.delete()
return True
@rpc_method_manager
def update_project(request, project_dict):
with transaction.atomic():
project = serializer.deserialize(Project, project_dict, exclude_fields=set(["annotators", "annotatorproject"]))
return True
@rpc_method_manager
def get_project(request, project_id):
proj = Project.objects.get(pk=project_id)
out_proj = {
**serializer.serialize(proj, exclude_fields=set(["annotators", "annotatorproject"])),
**proj.get_annotators_dict(),
**proj.get_project_stats()
}
return out_proj
@rpc_method_manager
def clone_project(request, project_id):
with transaction.atomic():
current_project = Project.objects.get(pk=project_id)
new_project = current_project.clone(owner=request.user)
return serializer.serialize(new_project, exclude_fields=set(["annotators", "annotatorproject"]))
@rpc_method_manager
def import_project_config(request, pk, project_dict):
with transaction.atomic():
serializer.deserialize(Project, {
"id": pk,
**project_dict
}, Project.get_project_export_field_names())
@rpc_method_manager
def export_project_config(request, pk):
proj = Project.objects.get(pk=pk)
return serializer.serialize(proj, Project.get_project_export_field_names())
@rpc_method_manager
def get_projects(request, current_page=1, page_size=None, filters=None):
"""
Gets the list of projects. Query result can be limited by using current_page and page_size and sorted
by using filters.
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:param filters: Filter option used to search project, currently only string is used to search
for project title
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
if current_page < 1:
raise Exception("Page index starts from 1")
current_page = current_page - 1 # Change to 0 index for query
projects_query = None
total_count = 0
# Perform filtering
if isinstance(filters, str):
# Search project title if is filter is a string only
projects_query = Project.objects.filter(name__contains=filters.strip())
total_count = projects_query.count()
else:
projects_query = Project.objects.all()
total_count = projects_query.count()
# Perform pagination
if current_page is None or page_size is None or current_page*page_size >= total_count:
# Returns first page if limits are None or current_page goes over index
projects = projects_query
else:
start_index = current_page*page_size
end_index = (current_page+1)*page_size
projects = projects_query[start_index:end_index]
# Serialize
output_projects = []
for proj in projects:
out_proj = {
**serializer.serialize(proj, {"id", "name", "created"}),
**proj.get_annotators_dict(),
**proj.get_project_stats()
}
output_projects.append(out_proj)
return {"items": output_projects, "total_count": total_count}
def _get_project_documents(project_id, current_page=1, page_size=None, filters=None, doc_type=DocumentType.ANNOTATION):
"""
Gets the list of documents and its annotations. Query result can be limited by using current_page and page_size
and sorted by using filters
:param project_id: The id of the project that the documents belong to, is a required variable
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:param filters: Filter currently only searches for ID of documents for project title
:param doc_type: Integer enum representation of document type Document.[ANNOTATION, TRAINING, TEST]
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
if project_id is None:
raise Exception("project_id must be provided in the options")
if current_page < 1:
raise Exception("Page index starts from 1")
current_page = current_page - 1 # Change to 0 index for query
project = Project.objects.get(pk=project_id)
documents_query = None
total_count = 0
# Filter
if isinstance(filters, str):
# Search for id
documents_query = project.documents.filter(pk=filters.strip(), doc_type=doc_type)
total_count = documents_query.count()
else:
documents_query = project.documents.filter(doc_type=doc_type).all()
total_count = documents_query.count()
# Paginate
if current_page is None or page_size is None or current_page*page_size >= total_count:
documents = documents_query.all()
else:
start_index = current_page * page_size
end_index = (current_page + 1) * page_size
documents = documents_query[start_index:end_index]
# Serialize
documents_out = []
for document in documents:
annotations_list = [a.get_listing() for a in document.annotations.all()]
documents_out.append(document.get_listing(annotations_list))
return {"items": documents_out, "total_count": total_count}
@rpc_method_manager
def get_project_documents(request, project_id, current_page=1, page_size=None, filters=None):
"""
Gets the list of documents and its annotations. Query result can be limited by using current_page and page_size
and sorted by using filters
:param project_id: The id of the project that the documents belong to, is a required variable
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:param filters: Filter currently only searches for ID of documents
for project title
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
return _get_project_documents(project_id, current_page, page_size, filters, DocumentType.ANNOTATION)
@rpc_method_manager
def get_project_test_documents(request, project_id, current_page=1, page_size=None, filters=None):
"""
Gets the list of documents and its annotations. Query result can be limited by using current_page and page_size
and sorted by using filters
:param project_id: The id of the project that the documents belong to, is a required variable
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:param filters: Filter currently only searches for ID of documents
for project title
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
return _get_project_documents(project_id, current_page, page_size, filters, DocumentType.TEST)
@rpc_method_manager
def get_project_training_documents(request, project_id, current_page=1, page_size=None, filters=None):
"""
Gets the list of documents and its annotations. Query result can be limited by using current_page and page_size
and sorted by using filters
:param project_id: The id of the project that the documents belong to, is a required variable
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:param filters: Filter currently only searches for ID of documents
for project title
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
return _get_project_documents(project_id, current_page, page_size, filters, DocumentType.TRAINING)
def _add_project_document(project_id, document_data, doc_type=DocumentType.ANNOTATION):
project = Project.objects.get(pk=project_id)
document = Document.objects.create(project=project, doc_type=doc_type)
document.data = document_data
document.save()
return document.pk
@rpc_method_manager
def add_project_document(request, project_id, document_data):
with transaction.atomic():
return _add_project_document(project_id, document_data=document_data, doc_type=DocumentType.ANNOTATION)
@rpc_method_manager
def add_project_test_document(request, project_id, document_data):
with transaction.atomic():
return _add_project_document(project_id, document_data=document_data, doc_type=DocumentType.TEST)
@rpc_method_manager
def add_project_training_document(request, project_id, document_data):
with transaction.atomic():
return _add_project_document(project_id, document_data=document_data, doc_type=DocumentType.TRAINING)
@rpc_method_manager
def add_document_annotation(request, doc_id, annotation_data):
with transaction.atomic():
document = Document.objects.get(pk=doc_id)
annotation = Annotation.objects.create(document=document, user=request.user)
annotation.data = annotation_data
return annotation.pk
@rpc_method_manager
def get_annotations(request, project_id):
"""
Serialize project annotations as GATENLP format JSON using the python-gatenlp interface.
"""
project = Project.objects.get(pk=project_id)
annotations = []
for document in project.documents.all():
# create a GateNLP Document instance
doc = gatenlp.Document(text=document.data['text'])
doc.name = str(document.pk)
for annotation in document.annotations.all():
# add an Annotation_Set named as the annotation user
annset = doc.annset(name=annotation.user.username)
# add the annotation to the annotation set
annset.add(start=0,
end=len(document.data['text']),
anntype="Document",
features=dict(label=annotation.data, _id=annotation.pk),
)
# For each document, append the annotations
annotations.append(doc.save_mem(fmt="bdocjs"))
return annotations
@rpc_method_manager
def delete_documents_and_annotations(request, doc_id_ary, anno_id_ary):
for anno_id in anno_id_ary:
Annotation.objects.filter(pk=anno_id).delete()
for doc_id in doc_id_ary:
Document.objects.filter(pk=doc_id).delete()
return True
@rpc_method_manager
def get_possible_annotators(request, proj_id):
project = Project.objects.get(pk=proj_id)
# get a list of IDs of annotators that is currently active in any project
active_annotators = User.objects.filter(annotatorproject__status=AnnotatorProject.ACTIVE).values_list('id', flat=True)
project_annotators = project.annotators.all().values_list('id', flat=True)
# Do an exclude filter to remove annotator with the those ids
valid_annotators = User.objects.filter(is_deleted=False).exclude(id__in=active_annotators).exclude(id__in=project_annotators)
output = [serializer.serialize(annotator, {"id", "username", "email"}) for annotator in valid_annotators]
return output
@rpc_method_manager
def get_project_annotators(request, proj_id):
project_annotators = AnnotatorProject.objects.filter(project_id=proj_id)
output = []
for ap in project_annotators:
output.append({
**serializer.serialize(ap.annotator, {"id", "username", "email"}),
**serializer.serialize(ap, exclude_fields={"annotator", "project"}),
**ap.get_stats()
})
return output
@rpc_method_manager
def add_project_annotator(request, proj_id, username):
with transaction.atomic():
annotator = User.objects.get(username=username)
project = Project.objects.get(pk=proj_id)
project.add_annotator(annotator)
project.save()
return True
@rpc_method_manager
def make_project_annotator_active(request, proj_id, username):
with transaction.atomic():
annotator = User.objects.get(username=username)
project = Project.objects.get(pk=proj_id)
project.make_annotator_active(annotator)
return True
@rpc_method_manager
def project_annotator_allow_annotation(request, proj_id, username):
with transaction.atomic():
annotator = User.objects.get(username=username)
project = Project.objects.get(pk=proj_id)
project.annotator_set_allowed_to_annotate(annotator)
@rpc_method_manager
def remove_project_annotator(request, proj_id, username):
with transaction.atomic():
annotator = User.objects.get(username=username)
project = Project.objects.get(pk=proj_id)
project.remove_annotator(annotator)
project.save()
return True
@rpc_method_manager
def reject_project_annotator(request, proj_id, username):
with transaction.atomic():
annotator = User.objects.get(username=username)
project = Project.objects.get(pk=proj_id)
project.reject_annotator(annotator)
project.save()
return True
@rpc_method_manager
def get_annotation_timings(request, proj_id):
project = Project.objects.get(pk=proj_id)
annotation_timings = []
documents = project.documents.select_related("project").all()
for document in documents:
for annotation in document.annotations.all():
if annotation.time_to_complete:
data_point = {'x': annotation.time_to_complete, 'y': 0}
annotation_timings.append(data_point)
return annotation_timings
@rpc_method_manager
def delete_annotation_change_history(request, annotation_change_history_id):
annotation_change_history = AnnotationChangeHistory.objects.get(pk=annotation_change_history_id)
if request.user.is_associated_with_annotation(annotation_change_history.annotation):
if annotation_change_history.annotation.change_history.all().count() > 1:
annotation_change_history.delete()
else:
raise RuntimeError("Must have at least a single annotation change history for a completed annotation.")
else:
raise PermissionError("No permission to access the annotation history")
###############################
### Annotator methods ###
###############################
@rpc_method_auth
def get_annotation_task(request):
"""
Gets the annotator's current task, returns a dictionary about the annotation task that contains all the information
needed to render the Annotate view.
"""
with transaction.atomic():
# Times out any pending annotation
Annotation.check_for_timed_out_annotations()
# Gets project the user's associated with
user = request.user
project = user.annotates.filter(annotatorproject__status=AnnotatorProject.ACTIVE).distinct().first()
# No project to annotate
if not project:
return None
# Gets the annotation task or None
return project.get_annotator_task(user)
@rpc_method_auth
def get_annotation_task_with_id(request, annotation_id):
"""
Get annotation task dictionary for a specific annotation_id, must belong to the annotator (or is a manager or above)
"""
with transaction.atomic():
user = request.user
annotation = Annotation.objects.get(pk=annotation_id)
if not annotation.user_allowed_to_annotate(user):
raise PermissionError(
f"User {user.username} trying to complete annotation id {annotation_id} that doesn't belong to them")
if annotation.document and annotation.document.project:
return annotation.document.project.get_annotation_task_dict(annotation,
include_task_history_in_project=False)
else:
raise RuntimeError(f"Could not get the annotation task with id {annotation_id}")
@rpc_method_auth
def complete_annotation_task(request, annotation_id, annotation_data, elapsed_time=None):
"""
Complete the annotator's current task
"""
with transaction.atomic():
# Gets project the user's associated with
user = request.user
annotation = Annotation.objects.get(pk=annotation_id)
if not annotation.user_allowed_to_annotate(user):
raise PermissionError(
f"User {user.username} trying to complete annotation id {annotation_id} that doesn't belong to them")
if annotation:
annotation.complete_annotation(annotation_data, elapsed_time)
@rpc_method_auth
def reject_annotation_task(request, annotation_id):
"""
Reject the annotator's current task
"""
with transaction.atomic():
# Gets project the user's associated with
user = request.user
annotation = Annotation.objects.get(pk=annotation_id)
if not annotation.user_allowed_to_annotate(user):
raise PermissionError(
f"User {user.username} trying to complete annotation id {annotation_id} that doesn't belong to them")
if annotation:
annotation.reject_annotation()
@rpc_method_auth
def change_annotation(request, annotation_id, new_data):
"""Adds annotation data to history"""
try:
annotation = Annotation.objects.get(pk=annotation_id)
if annotation.document.doc_type is not DocumentType.ANNOTATION:
raise RuntimeError("It not possible to change annotations created for testing or training documents.")
if annotation.user_allowed_to_annotate(request.user) or request.user.is_manager_or_above():
annotation.change_annotation(new_data, request.user)
except Annotation.DoesNotExist:
raise RuntimeError(f"Annotation with ID {annotation_id} does not exist")
@rpc_method_auth
def get_document(request, document_id):
""" Obsolete: to be deleted"""
doc = Document.objects.get(pk=document_id)
if request.user.is_associated_with_document(doc):
return doc.get_listing(annotation_list=[anno.get_listing() for anno in doc.annotations.all()])
else:
raise PermissionError("No permission to access the document")
@rpc_method_auth
def get_annotation(request, annotation_id):
""" Obsolete: to be deleted"""
annotation = Annotation.objects.get(pk=annotation_id)
if request.user.is_associated_with_annotation(annotation):
return annotation.get_listing()
else:
raise PermissionError("No permission to access the annotation")
@rpc_method_auth
def annotator_leave_project(request):
""" Allow annotator to leave their currently associated project. """
user = request.user
project = user.active_project
if project is None:
raise Exception("No current active project")
project.remove_annotator(get_user_model().objects.get(pk=user.id))
###############################
### User Management Methods ###
###############################
@rpc_method_admin
def get_all_users(request):
users = User.objects.all()
output = [serializer.serialize(user, {"id", "username", "email", "is_manager", "is_staff"}) for user in users]
return output
@rpc_method_admin
def get_user(request, username):
user = User.objects.get(username=username)
data = {
"id": user.id,
"username": user.username,
"email": user.email,
"is_manager": user.is_manager,
"is_admin": user.is_staff,
"is_activated": user.is_activated
}
return data
@rpc_method_admin
def admin_update_user(request, user_dict):
user = User.objects.get(id=user_dict["id"])
user.username = user_dict["username"]
user.email = user_dict["email"]
user.is_manager = user_dict["is_manager"]
user.is_staff = user_dict["is_admin"]
user.is_account_activated = user_dict["is_activated"]
user.save()
return user_dict
@rpc_method_admin
def admin_update_user_password(request, username, password):
user = User.objects.get(username=username)
user.set_password(password)
user.save()
@rpc_method_admin
def admin_delete_user_personal_information(request, username):
user = User.objects.get(username=username)
user.delete_user_personal_information()
@rpc_method_admin
def admin_delete_user(request, username):
if settings.ALLOW_USER_DELETE:
user = User.objects.get(username=username)
user.delete()
else:
raise Exception("Teamware's current configuration does not allow the deleting of users")
##################################
### Privacy Policy/T&C Methods ###
##################################
@rpc_method
def get_privacy_policy_details(request):
details = settings.PRIVACY_POLICY
custom_docs = {
'CUSTOM_PP_DOCUMENT': read_custom_document(settings.CUSTOM_PP_DOCUMENT_PATH) if os.path.isfile(
settings.CUSTOM_PP_DOCUMENT_PATH) else None,
'CUSTOM_TC_DOCUMENT': read_custom_document(settings.CUSTOM_TC_DOCUMENT_PATH) if os.path.isfile(
settings.CUSTOM_TC_DOCUMENT_PATH) else None
}
details.update(custom_docs)
url = {
'URL': request.headers['Host']
}
details.update(url)
return details
###############################
### Utility Methods ###
###############################
@rpc_method
def get_endpoint_listing(request):
from .rpcserver import JSONRPCEndpoint
return JSONRPCEndpoint.endpoint_listing()
| 36,144 | 33.754808 | 147 | py |
gate-teamware | gate-teamware-master/backend/admin.py | from django.contrib import admin
from django.contrib.auth import get_user_model
from .models import Project, Document, Annotation
# Register your models here.
@admin.register(get_user_model())
class UserAdmin(admin.ModelAdmin):
pass
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
pass
@admin.register(Document)
class DocumentAdmin(admin.ModelAdmin):
pass
@admin.register(Annotation)
class AnnotationAdmin(admin.ModelAdmin):
pass
| 466 | 20.227273 | 49 | py |
gate-teamware | gate-teamware-master/backend/models.py | import math
import uuid
from django.conf import settings
import logging
import django
from datetime import timedelta
from django.contrib.auth.models import AbstractUser
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import timezone
from django.db.models import Q, F, Count
from backend.utils.misc import get_value_from_key_path, insert_value_to_key_path, generate_random_string
from backend.utils.telemetry import TelemetrySender
log = logging.getLogger(__name__)
class UserDocumentFormatPreference:
JSON = 0
CSV = 1
USER_DOC_FORMAT_PREF = (
(JSON, 'JSON'),
(CSV, 'CSV')
)
def document_preference_str(pref: UserDocumentFormatPreference.USER_DOC_FORMAT_PREF) -> str:
if pref == UserDocumentFormatPreference.JSON:
return "JSON"
else:
return "CSV"
class DocumentType:
ANNOTATION = 0
TRAINING = 1
TEST = 2
DOCUMENT_TYPE = (
(ANNOTATION, 'Annotation'),
(TRAINING, 'Training'),
(TEST, 'Test')
)
class ServiceUser(AbstractUser):
"""
Custom user class.
"""
is_manager = models.BooleanField(default=False)
created = models.DateTimeField(default=timezone.now)
is_account_activated = models.BooleanField(default=False)
activate_account_token = models.TextField(null=True, blank=True)
activate_account_token_expire = models.DateTimeField(null=True, blank=True)
reset_password_token = models.TextField(null=True, blank=True)
reset_password_token_expire = models.DateTimeField(null=True, blank=True)
receive_mail_notifications = models.BooleanField(default=True)
doc_format_pref = models.IntegerField(choices=UserDocumentFormatPreference.USER_DOC_FORMAT_PREF,
default=UserDocumentFormatPreference.JSON)
agreed_privacy_policy = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
@property
def has_active_project(self):
return self.annotatorproject_set.filter(status=AnnotatorProject.ACTIVE).count() > 0
@property
def active_project(self):
"""
Gets the project that user's currently active in
:returns: Project object that user's active in, None if not active in any project
"""
active_annotator_project = self.annotatorproject_set.filter(status=AnnotatorProject.ACTIVE).first()
if active_annotator_project:
return active_annotator_project.project
return None
@property
def is_activated(self):
"""
Checks whether the user has activated their account, but also takes into account
of the REGISTER_WITH_EMAIL_ACTIVATION settings.
"""
if settings.ACTIVATION_WITH_EMAIL:
return self.is_account_activated
else:
return True
@is_activated.setter
def is_activated(self, value):
self.is_account_activated = value
def is_associated_with_document(self, document):
if self.is_manager or self.is_staff or self.is_superuser:
return True
if self.annotations.filter(document_id=document.pk).count() > 0:
return True
if self.annotates:
if not self.annotates.filter(pk=document.project.pk).first():
return False
if self.annotates.filter(pk=document.project.pk).first().documents.count() > 0:
return True
else:
# If user is no longer active on a project, but has annotations from that project, this should have been caught above
return False
def is_associated_with_annotation(self, annotation):
if self.is_manager or self.is_staff or self.is_superuser:
return True
return self.annotations.filter(pk=annotation.pk).count() > 0
def is_manager_or_above(self):
if self.is_manager or self.is_staff or self.is_superuser:
return True
else:
return False
def clear_pending_annotations(self) -> None:
"""
Clear all of the user's pending annotation in the system to allow other annotators
to take up the task slot.
"""
pending_annotations = self.annotations.filter(status=Annotation.PENDING)
pending_annotations.delete()
def delete_user_personal_information(self) -> None:
"""
Replace user's personal data with placeholder
"""
self.is_deleted = True
retry_limit = 1000
retry_counter = 0
while retry_counter < retry_limit:
random_suffix = generate_random_string(settings.DELETED_USER_USERNAME_HASH_LENGTH)
deleted_username = f"{settings.DELETED_USER_USERNAME_PREFIX}_{random_suffix}"
if not get_user_model().objects.filter(username=deleted_username).exists():
break
retry_counter += 1
if retry_counter >= retry_limit:
raise Exception("Could not delete user, reached hash generation retries limit")
self.username = deleted_username
self.first_name = settings.DELETED_USER_FIRSTNAME
self.last_name = settings.DELETED_USER_LASTNAME
self.email = f"{self.username}@{settings.DELETED_USER_EMAIL_DOMAIN}"
self.save()
# Also clear all pending annotations
self.clear_pending_annotations()
def default_document_input_preview():
return {"text": "<p>Some html text <strong>in bold</strong>.</p><p>Paragraph 2.</p>"}
class Project(models.Model):
"""
Model to store annotation projects.
"""
name = models.TextField(default="New project")
uuid = models.UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
description = models.TextField(default="")
annotator_guideline = models.TextField(default="")
created = models.DateTimeField(default=timezone.now)
configuration = models.JSONField(default=list)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True, related_name="owns")
annotations_per_doc = models.IntegerField(default=3)
annotator_max_annotation = models.FloatField(default=0.6)
# Allow annotators to reject document
allow_document_reject = models.BooleanField(default=True)
# Allow annotators to change their annotation after it's been submitted
allow_annotation_change = models.BooleanField(default=True)
# Time it takes for user annotation to timeout (minutes)
annotation_timeout = models.IntegerField(default=60)
# Stores a document that's used for previewing in the AnnotationRenderer
document_input_preview = models.JSONField(default=default_document_input_preview)
# Stores a csv document that's used for previewing in the AnnotationRenderer
document_input_preview_csv = models.TextField(default="")
document_id_field = models.TextField(default="name")
annotators = models.ManyToManyField(get_user_model(), through='AnnotatorProject', related_name="annotates")
has_training_stage = models.BooleanField(default=False)
has_test_stage = models.BooleanField(default=False)
can_annotate_after_passing_training_and_test = models.BooleanField(default=True)
min_test_pass_threshold = models.FloatField(default=1.0, null=True)
document_gold_standard_field = models.TextField(default="gold")
document_pre_annotation_field = models.TextField(default="")
@classmethod
def get_project_config_fields(cls, exclude_fields: set = set()):
exclude_field_types = {
models.ManyToOneRel,
models.ManyToManyField,
models.ManyToManyRel,
}
fields = Project._meta.get_fields()
config_fields = []
for field in fields:
if field.__class__ not in exclude_field_types and field.name not in exclude_fields:
config_fields.append(field)
return config_fields
@classmethod
def get_project_export_field_names(cls):
fields = Project.get_project_config_fields({"owner", "id", "created", "uuid"})
return [field.name for field in fields]
def clone(self, new_name=None, clone_name_prefix="Copy of ", owner=None):
"""
Clones the Project object, does not retain documents and annotator membership
"""
exclude_fields = {"name", "owner", "id", "created", "uuid"}
# Setting project name
new_project_name = new_name if new_name is not None else ""
if clone_name_prefix:
new_project_name = clone_name_prefix + self.name
new_project = Project.objects.create(name=new_project_name)
# Setting owner
new_project.owner = owner
# Copy all config over
config_fields = self.get_project_config_fields(exclude_fields)
for field in config_fields:
setattr(new_project, field.name, getattr(self, field.name))
new_project.save()
return new_project
@property
def num_documents(self):
return self.documents.filter(doc_type=DocumentType.ANNOTATION).count()
@property
def num_test_documents(self):
return self.documents.filter(doc_type=DocumentType.TEST).count()
@property
def num_training_documents(self):
return self.documents.filter(doc_type=DocumentType.TRAINING).count()
@property
def num_annotation_tasks_total(self):
return self.num_documents * self.annotations_per_doc
@property
def num_completed_tasks(self):
return self._get_project_annotations_query(status=Annotation.COMPLETED).count()
@property
def num_pending_tasks(self):
return self._get_project_annotations_query(status=Annotation.PENDING).count()
@property
def num_rejected_tasks(self):
return self._get_project_annotations_query(status=Annotation.REJECTED).count()
@property
def num_timed_out_tasks(self):
return self._get_project_annotations_query(status=Annotation.TIMED_OUT).count()
@property
def num_aborted_tasks(self):
return Annotation.objects.filter(document__project_id=self.pk,
status=Annotation.ABORTED,
document__doc_type=DocumentType.ANNOTATION).count()
@property
def num_occupied_tasks(self):
return (self._get_project_annotations_query(Annotation.COMPLETED) |
self._get_project_annotations_query(Annotation.PENDING)).count()
@property
def num_annotation_tasks_remaining(self):
return self.num_annotation_tasks_total - self.num_occupied_tasks
def _get_project_annotations_query(self, status=None, doc_type=DocumentType.ANNOTATION):
if status is None:
return Annotation.objects.filter(document__project_id=self.pk,
document__doc_type=doc_type)
else:
return Annotation.objects.filter(document__project_id=self.pk,
status=status,
document__doc_type=doc_type)
@property
def is_completed(self):
# Project must have documents to be completed
if self.num_annotation_tasks_total <= 0:
return False
return self.num_annotation_tasks_total - self.num_completed_tasks < 1
@property
def max_num_task_per_annotator(self):
return math.ceil(
self.annotator_max_annotation * self.documents.filter(doc_type=DocumentType.ANNOTATION).count())
@property
def num_annotators(self):
return self.annotators.filter(annotatorproject__status=AnnotatorProject.ACTIVE).count()
@property
def num_all_annotators(self) -> int:
"""Count of all annotators associated with project."""
return self.annotators.filter().count()
@property
def is_project_configured(self):
return len(self.configuration) > 0 and self.num_documents > 0
@property
def project_configuration_error_message(self):
errors = []
if len(self.configuration) < 1:
errors.append("No annotation widgets defined in the configuration")
if self.num_documents < 1:
errors.append("No documents to annotate")
return errors
def delete(self):
"""
Overloaded delete method to optionally send project telemetry stats prior to deletion.
"""
try:
if settings.TELEMETRY_ON and self.num_all_annotators > 0:
self.send_telemetry("deleted")
finally:
super().delete()
def add_annotator(self, user):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
except ObjectDoesNotExist:
allowed_to_annotate = not self.has_test_stage and not self.has_training_stage
annotator_project = AnnotatorProject.objects.create(annotator=user,
project=self,
status=AnnotatorProject.ACTIVE,
allowed_to_annotate=allowed_to_annotate)
return annotator_project
def make_annotator_active(self, user):
"""
Makes the user active in the project again. An user can be made inactive from the project as a
result of completing all annotation task, manager marking them as completed the project,
rejecting them from the project or the user has left the project themselves.
"""
# Check that user is not active in another project
active_project = user.active_project
if active_project == self:
raise Exception("User already active in this project")
if active_project is not None:
raise Exception(f"User is already active in project {active_project.name}")
if self.annotator_reached_quota(user):
raise Exception(f"User is already reached annotation quota")
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.status = AnnotatorProject.ACTIVE
annotator_project.rejected = False
annotator_project.save()
except ObjectDoesNotExist:
raise Exception("User must be added to the project before they can be made active.")
def annotator_completed_training(self, user, finished_time=timezone.now()):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.training_completed = finished_time
annotator_project.training_score = self.get_annotator_document_score(user, DocumentType.TRAINING)
if annotator_project.project.can_annotate_after_passing_training_and_test and not annotator_project.project.has_test_stage:
annotator_project.allowed_to_annotate = True
annotator_project.save()
except ObjectDoesNotExist:
raise Exception(f"User {user.username} is not an annotator of the project.")
def get_annotator_document_score(self, user, doc_type):
test_docs = self.documents.filter(doc_type=doc_type)
score = 0
for document in test_docs:
# Checks answers for all test documents
user_annotations = document.annotations.filter(user_id=user.pk)
if user_annotations.count() > 1:
# User should not have more than 1 annotation per document
raise Exception(f"User {user.username} has more than 1 annotation in document")
annotation = user_annotations.first()
# Skip if there's no annotation
if not annotation:
continue
# Check that answer key exists in document
answers = get_value_from_key_path(document.data, self.document_gold_standard_field)
if answers is None:
raise Exception(f"No gold standard (answer) field inside test document")
if self.check_annotation_answer(annotation.data, answers):
score += 1
return score
def check_annotation_answer(self, annotation_data, answers):
"""
Compare answers between the annotation.data and document's gold standard field with answers
"""
is_correct = True
for label in answers:
if label not in annotation_data:
return False # Label does not exist in annotation
annotation_val = annotation_data[label]
answer_val = answers[label]["value"]
if isinstance(annotation_val, str) and isinstance(answer_val, str):
if annotation_val != answer_val:
is_correct = False
elif isinstance(annotation_val, list) and isinstance(answer_val, list):
comparison_set = set(annotation_val) & set(answer_val)
if len(answer_val) != len(annotation_val) or len(comparison_set) != len(answer_val):
is_correct = False
else:
is_correct = False
return is_correct
def annotator_completed_test(self, user, finished_time=timezone.now()):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.test_completed = finished_time
annotator_project.test_score = self.get_annotator_document_score(user, DocumentType.TEST)
annotator_test_score_proportion = annotator_project.test_score / self.num_test_documents if self.num_test_documents > 0 else 0
if self.can_annotate_after_passing_training_and_test and \
annotator_test_score_proportion >= self.min_test_pass_threshold:
annotator_project.allowed_to_annotate = True
annotator_project.save()
except ObjectDoesNotExist:
raise Exception(f"User {user.username} is not an annotator of the project.")
def annotator_set_allowed_to_annotate(self, user, finished_time=timezone.now()):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.allowed_to_annotate = True
annotator_project.save()
except ObjectDoesNotExist:
raise Exception(f"User {user.username} is not an annotator of the project.")
def reject_annotator(self, user, finished_time=timezone.now()):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.annotations_completed = finished_time
annotator_project.status = AnnotatorProject.COMPLETED
annotator_project.rejected = True
annotator_project.save()
except ObjectDoesNotExist:
raise Exception(f"User {user.username} is not an annotator of the project.")
def remove_annotator(self, user, finished_time=timezone.now()):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.annotations_completed = finished_time
annotator_project.status = AnnotatorProject.COMPLETED
annotator_project.save()
Annotation.clear_all_pending_user_annotations(user)
except ObjectDoesNotExist:
raise Exception(f"User {user.username} is not an annotator of the project.")
def num_annotator_task_remaining(self, user):
num_annotable = self.get_annotator_annotatable_documents_query(user).count()
num_completed_by_user = self.get_annotator_completed_documents_query(user).count()
max_num_docs_user_can_annotate = self.max_num_task_per_annotator
remaining_docs_in_quota = max_num_docs_user_can_annotate - num_completed_by_user
if remaining_docs_in_quota < num_annotable:
return remaining_docs_in_quota
else:
return num_annotable
def get_annotator_annotatable_documents_query(self, user, doc_type=DocumentType.ANNOTATION):
# Filter to get the count of occupied annotations in the document
# (annotations with COMPLETED and PENDING status)
occupied_filter = (Q(annotations__status=Annotation.COMPLETED) |
Q(annotations__status=Annotation.PENDING))
occupied_count = Count('annotations', filter=occupied_filter)
# Filter to get the count of user occupied annotation in the document
# (annotations with COMPLETED, PENDING, and REJECTED status)
user_occupied_filter = (Q(annotations__user_id=user.pk, annotations__status=Annotation.COMPLETED) |
Q(annotations__user_id=user.pk, annotations__status=Annotation.PENDING) |
Q(annotations__user_id=user.pk, annotations__status=Annotation.REJECTED))
user_occupied_count = Count('annotations', filter=user_occupied_filter)
# All remaining documents that user can annotate
annotatable_docs = Document.objects.filter(project_id=self.pk, doc_type=doc_type) \
.annotate(num_occupied=occupied_count) \
.annotate(num_user_occupied=user_occupied_count) \
.filter(num_user_occupied__lt=1)
if doc_type == DocumentType.ANNOTATION:
# Enforce the max number of annotations per document for ANNOTATION docs only (not
# for TRAINING or TEST, which can be annotated by everyone)
annotatable_docs = annotatable_docs.filter(num_occupied__lt=self.annotations_per_doc)
return annotatable_docs
def get_annotator_occupied_documents_query(self, user, doc_type=DocumentType.ANNOTATION):
# Filter to get the count of user occupied annotation in the document
# (annotations with COMPLETED, PENDING, and REJECTED status)
user_occupied_filter = (Q(annotations__user_id=user.pk, annotations__status=Annotation.COMPLETED) |
Q(annotations__user_id=user.pk, annotations__status=Annotation.PENDING) |
Q(annotations__user_id=user.pk, annotations__status=Annotation.REJECTED))
user_occupied_count = Count('annotations', filter=user_occupied_filter)
# Number of user annotated docs in the project
occupied_docs = Document.objects.filter(project_id=self.pk, doc_type=doc_type) \
.annotate(num_user_occupied=user_occupied_count) \
.filter(num_user_occupied__gt=0)
return occupied_docs
def get_annotator_completed_documents_query(self, user, doc_type=DocumentType.ANNOTATION):
# Filter to get the count of user occupied annotation in the document
# (annotations with COMPLETED, PENDING, and REJECTED status)
completed_filter = (Q(annotations__user_id=user.pk, annotations__status=Annotation.COMPLETED))
completed_count = Count('annotations', filter=completed_filter)
# Number of user completed annotated docs in the project
completed_docs = Document.objects.filter(project_id=self.pk, doc_type=doc_type) \
.annotate(num_user_occupied=completed_count) \
.filter(num_user_occupied__gt=0)
return completed_docs
def get_annotator_pending_documents_query(self, user, doc_type=DocumentType.ANNOTATION):
# Filter to get the count of user occupied annotation in the document
# (annotations with COMPLETED, PENDING, and REJECTED status)
pending_filter = (Q(annotations__user_id=user.pk, annotations__status=Annotation.PENDING))
pending_count = Count('annotations', filter=pending_filter)
# Number of user completed annotated docs in the project
pending_docs = Document.objects.filter(project_id=self.pk, doc_type=doc_type) \
.annotate(num_user_occupied=pending_count) \
.filter(num_user_occupied__gt=0)
return pending_docs
def get_annotator_task(self, user):
"""
Gets or creates a new annotation task for user (annotator).
:returns: Dictionary with all information to complete an annotation task. Only project information
is returned if user is waiting to be approved as an annotator. Returns None and removes
user from annotator list if there's no more tasks or user reached quota.
"""
annotation = self.get_current_annotator_task(user)
if annotation:
# User has existing task
return self.get_annotation_task_dict(annotation)
else:
# Tries to generate new task if there's no existing task
if self.annotator_reached_quota(user):
self.remove_annotator(user)
return None # Also return None as we've completed all the task
else:
return self.decide_annotator_task_type_and_assign(user)
def annotator_reached_quota(self, user):
num_user_annotated_docs = (self.get_annotator_completed_documents_query(user) |
self.get_annotator_pending_documents_query(user)).count()
return num_user_annotated_docs >= self.max_num_task_per_annotator
def get_current_annotator_task(self, user):
"""
Gets annotator's current pending task in the project.
"""
current_annotations = user.annotations.filter(status=Annotation.PENDING)
num_annotations = current_annotations.count()
if num_annotations > 1:
raise RuntimeError("Working on more than one annotation at a time! Should not be possible!")
if num_annotations <= 0:
return None
annotation = current_annotations.first()
if annotation.document.project != self:
return RuntimeError(
"The annotation doesn't belong to this project! Annotator should only work on one project at a time")
return annotation
def get_annotation_task_dict(self, annotation, include_task_history_in_project=True):
"""
Returns a dictionary with all information required rendering an annotation task
annotation:Annotation - The annotation to create an annotation task dictionary for
task_history_in_project:bool - Returns a list of annotation ids for this user in the project
"""
document = annotation.document
output = {
**self.get_annotation_task_project_dict(),
"document_id": document.pk,
"document_field_id": get_value_from_key_path(document.data, self.document_id_field),
"document_data": document.data,
"document_type": document.doc_type_str,
"annotation_id": annotation.pk,
"annotation_data": annotation.data,
"allow_document_reject": self.allow_document_reject,
"annotation_timeout": annotation.times_out_at,
"annotator_remaining_tasks": self.num_annotator_task_remaining(user=annotation.user),
"annotator_completed_tasks": self.get_annotator_completed_documents_query(user=annotation.user).count(),
"annotator_completed_training_tasks": self.get_annotator_completed_documents_query(user=annotation.user,
doc_type=DocumentType.TRAINING).count(),
"annotator_completed_test_tasks": self.get_annotator_completed_documents_query(user=annotation.user,
doc_type=DocumentType.TEST).count(),
"document_gold_standard_field": self.document_gold_standard_field,
"document_pre_annotation_field": self.document_pre_annotation_field,
}
if include_task_history_in_project and document.doc_type is DocumentType.ANNOTATION:
# If specified, also returns a list of annotation ids for this user in the project
output["task_history"] = [annotation.pk for annotation in
Annotation.get_annotations_for_user_in_project(annotation.user.pk, self.pk)]
return output
def get_annotation_task_project_dict(self):
return {
"project_name": self.name,
"project_description": self.description,
"project_annotator_guideline": self.annotator_guideline,
"project_config": self.configuration,
"project_id": self.pk,
}
def decide_annotator_task_type_and_assign(self, user):
"""
Assign an available annotation task to a user
"""
# Check annotator's current status in the project
annotator_proj = AnnotatorProject.objects.get(annotator=user, project=self)
# Check annotator's current status in the project
if not annotator_proj.allowed_to_annotate:
# Check whether annotator is in test or training
if self.has_training_stage and not annotator_proj.training_completed:
# Check whether the annotator's completed all training tasks, mark complete if so
if self.get_annotator_annotatable_documents_query(user, doc_type=DocumentType.TRAINING).count() == 0:
self.annotator_completed_training(user)
if self.has_test_stage and not annotator_proj.test_completed:
# Check whether annotator's completed all test tasks, mark complete if so
if self.get_annotator_annotatable_documents_query(user, doc_type=DocumentType.TEST).count() == 0:
self.annotator_completed_test(user)
# Refresh object to ensure the phase changes are picked up
annotator_proj.refresh_from_db()
# Assign task
if annotator_proj.allowed_to_annotate:
# If allowed to annotate then skip over testing and training stage
annotation = self.assign_annotator_task(user)
if annotation:
return self.get_annotation_task_dict(annotation)
else:
# Remove annotator from project if there's no more tasks
annotator_proj.annotations_completed = timezone.now()
annotator_proj.save()
self.remove_annotator(user)
return None
elif self.has_training_stage and not annotator_proj.training_completed:
# Tries to assign training task
return self.get_annotation_task_dict(self.assign_annotator_task(user, DocumentType.TRAINING))
elif self.has_test_stage and not annotator_proj.test_completed:
# Tries to assign test task
return self.get_annotation_task_dict(self.assign_annotator_task(user, DocumentType.TEST))
else:
return self.get_annotation_task_project_dict()
def assign_annotator_task(self, user, doc_type=DocumentType.ANNOTATION):
"""
Assigns an annotation task to the annotator, works for testing, training and annotation tasks.
Annotation task performs an extra check for remaining annotation task (num_annotation_tasks_remaining),
testing and training does not do this check as the annotator must annotate all documents.
"""
if (DocumentType.ANNOTATION and self.num_annotation_tasks_remaining > 0) or \
DocumentType.TEST or DocumentType.TRAINING:
for doc in self.documents.filter(doc_type=doc_type).order_by('?'):
# Check that annotator hasn't annotated and that
# doc hasn't been fully annotated
if doc.user_can_annotate_document(user):
# Returns a new annotation (task) if so
return Annotation.objects.create(user=user,
document=doc,
times_out_at=timezone.now() + timedelta(
minutes=self.annotation_timeout))
return None
def check_project_complete(self):
"""
Checks that all annotations have been completed, release all annotators from project.
If complete, also send telemetry data.
"""
if self.is_completed:
for annotator in self.annotators.all():
self.remove_annotator(annotator)
if settings.TELEMETRY_ON:
self.send_telemetry(status="complete")
def send_telemetry(self, status: str):
"""
Sends telemetry data for the project depending on the status.
"""
if settings.TELEMETRY_ON:
ts = TelemetrySender(status=status, data=self.get_telemetry_stats())
ts.send()
else:
log.info(f"Telemetry is switched off. Not sending telemetry data for project {self.pk}.")
def get_annotators_dict(self):
return {
"annotators": [{"id": ann.id, "username": ann.username, "email": ann.email} for ann in
self.annotators.filter(annotatorproject__status=AnnotatorProject.ACTIVE).all()]
}
def get_project_stats(self):
return {
"owned_by": self.owner.username,
"documents": self.num_documents,
"training_documents": self.num_training_documents,
"test_documents": self.num_test_documents,
"completed_tasks": self.num_completed_tasks,
"pending_tasks": self.num_pending_tasks,
"rejected_tasks": self.num_rejected_tasks,
"timed_out_tasks": self.num_timed_out_tasks,
"aborted_tasks": self.num_aborted_tasks,
"total_tasks": self.num_annotation_tasks_total,
"is_configured": self.is_project_configured,
"configuration_error": None if self.is_project_configured else self.project_configuration_error_message,
"is_completed": self.is_completed,
"num_annotators": self.num_annotators,
}
def get_telemetry_stats(self) -> dict:
"""
Returns a dict of stats specifically for telemetry including no identifying information.
"""
return {
"uuid": str(self.uuid),
"documents": self.num_documents,
"training_documents": self.num_training_documents,
"test_documents": self.num_test_documents,
"completed_tasks": self.num_completed_tasks,
"pending_tasks": self.num_pending_tasks,
"rejected_tasks": self.num_rejected_tasks,
"timed_out_tasks": self.num_timed_out_tasks,
"aborted_tasks": self.num_aborted_tasks,
"total_tasks": self.num_annotation_tasks_total,
"is_configured": self.is_project_configured,
"is_completed": self.is_completed,
"num_annotators": self.num_all_annotators,
}
class AnnotatorProject(models.Model):
"""
Intermediate class to represent annotator-project relationship
"""
ACTIVE = 0
COMPLETED = 1
STATUS = (
(ACTIVE, 'Active'),
(COMPLETED, 'Completed')
)
annotator = models.ForeignKey(get_user_model(), on_delete=models.SET_NULL, null=True, blank=True)
project = models.ForeignKey(Project, on_delete=models.SET_NULL, null=True, blank=True)
training_score = models.FloatField(default=0)
test_score = models.FloatField(default=0)
training_completed = models.DateTimeField(null=True)
test_completed = models.DateTimeField(null=True)
annotations_completed = models.DateTimeField(null=True)
allowed_to_annotate = models.BooleanField(default=False)
status = models.IntegerField(choices=STATUS, default=ACTIVE)
rejected = models.BooleanField(default=False)
@property
def num_annotations(self):
"""Number of annotations completed by this annotator in this project"""
count = 0
for d in self.project.documents.filter(doc_type=DocumentType.ANNOTATION):
count += d.annotations.filter(user=self.annotator).count()
return count
def set_status(self, status):
self.status = status
self.save()
def get_stats(self):
return {
"annotations": self.num_annotations,
}
class Document(models.Model):
"""
Model to represent a document.
"""
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name="documents")
data = models.JSONField(default=dict)
created = models.DateTimeField(default=timezone.now)
doc_type = models.IntegerField(choices=DocumentType.DOCUMENT_TYPE, default=DocumentType.ANNOTATION)
@property
def num_completed_annotations(self):
return self.annotations.filter(status=Annotation.COMPLETED).count()
@property
def num_rejected_annotations(self):
return self.annotations.filter(status=Annotation.REJECTED).count()
@property
def num_timed_out_annotations(self):
return self.annotations.filter(status=Annotation.TIMED_OUT).count()
@property
def num_pending_annotations(self):
return self.annotations.filter(status=Annotation.PENDING).count()
@property
def num_aborted_annotations(self):
return self.annotations.filter(status=Annotation.ABORTED).count()
@property
def num_completed_and_pending_annotations(self):
return self.annotations.filter(
Q(status=Annotation.COMPLETED) | Q(status=Annotation.PENDING)).count()
@property
def doc_type_str(self):
if (self.doc_type == DocumentType.ANNOTATION):
return "Annotation"
elif (self.doc_type == DocumentType.TRAINING):
return "Training"
elif (self.doc_type == DocumentType.TEST):
return "Test"
else:
raise Exception("Unknown document type")
def user_can_annotate_document(self, user):
""" User must not have completed, pending or rejected the document,
and if the document is not a training or test document then it must
not already be fully annotated."""
num_user_annotation_in_doc = self.annotations.filter(
Q(user_id=user.pk, status=Annotation.COMPLETED) |
Q(user_id=user.pk, status=Annotation.PENDING) |
Q(user_id=user.pk, status=Annotation.REJECTED)).count()
if num_user_annotation_in_doc > 1:
raise RuntimeError(
f"The user {user.username} has more than one annotation ({num_user_annotation_in_doc}) in the document.")
return num_user_annotation_in_doc < 1 and (
self.doc_type in (DocumentType.TRAINING, DocumentType.TEST) or
self.num_completed_and_pending_annotations < self.project.annotations_per_doc
)
def num_user_completed_annotations(self, user):
return self.annotations.filter(user_id=user.pk, status=Annotation.COMPLETED).count()
def num_user_pending_annotations(self, user):
return self.annotations.filter(user_id=user.pk, status=Annotation.PENDING).count()
def num_user_rejected_annotations(self, user):
return self.annotations.filter(user_id=user.pk, status=Annotation.REJECTED).count()
def num_user_timed_out_annotations(self, user):
return self.annotations.filter(user_id=user.pk, status=Annotation.TIMED_OUT).count()
def num_user_aborted_annotations(self, user):
return self.annotations.filter(user_id=user.pk, status=Annotation.ABORTED).count()
def user_completed_annotation_of_document(self, user):
return self.num_user_completed_annotations(user) > 0
def get_listing(self, annotation_list=[]):
"""
Get a dictionary representation of document for rendering
"""
doc_out = {
"id": self.pk,
"annotations": annotation_list,
"created": self.created,
"completed": self.num_completed_annotations,
"rejected": self.num_rejected_annotations,
"timed_out": self.num_timed_out_annotations,
"pending": self.num_pending_annotations,
"aborted": self.num_aborted_annotations,
"doc_id": get_value_from_key_path(self.data, self.project.document_id_field),
"project_id": self.project.id,
"data": self.data,
"doc_type": self.doc_type_str,
}
return doc_out
def get_doc_annotation_dict(self, json_format="raw", anonymize=True):
"""
Get dictionary of document and its annotations for export
"""
# Create dictionary for document
doc_dict = None
if json_format == "raw" or json_format == "csv":
doc_dict = self.data
elif json_format == "gate":
ignore_keys = {"text", self.project.document_id_field}
features_dict = {key: value for key, value in self.data.items() if key not in ignore_keys}
doc_dict = {
"text": self.data["text"],
"features": features_dict,
"offset_type": "p",
"name": get_value_from_key_path(self.data, self.project.document_id_field)
}
pass
# Insert annotation sets into the doc dict
annotations = self.annotations.filter(status=Annotation.COMPLETED)
if json_format == "csv":
# Format annotations for CSV export
annotation_sets = {}
for annotation in annotations:
a_data = annotation.data
annotation_dict = {}
# Format for csv, flatten list values
for a_key, a_value in a_data.items():
if isinstance(a_value, list):
annotation_dict[a_key] = ",".join(a_value)
else:
annotation_dict[a_key] = a_value
annotation_dict["duration_seconds"] = annotation.time_to_complete
if anonymize:
annotation_sets[str(annotation.user.id)] = annotation_dict
else:
annotation_sets[annotation.user.username] = annotation_dict
doc_dict["annotations"] = annotation_sets
else:
# Format for JSON in line with GATE formatting
annotation_sets = {}
for annotation in annotations:
a_data = annotation.data
annotation_set = {
"name": annotation.user.id if anonymize else annotation.user.username,
"annotations": [
{
"type": "Document",
"start": 0,
"end": 0,
"id": 0,
"duration_seconds": annotation.time_to_complete,
"features": {
"label": a_data
}
}
],
"next_annid": 1,
}
annotation_sets[annotation.user.username] = annotation_set
doc_dict["annotation_sets"] = annotation_sets
return doc_dict
class Annotation(models.Model):
"""
Model to represent a single annotation.
"""
PENDING = 0
COMPLETED = 1
REJECTED = 2
TIMED_OUT = 3
ABORTED = 4
ANNOTATION_STATUS = (
(PENDING, 'Pending'),
(COMPLETED, 'Completed'),
(REJECTED, 'Rejected'),
(TIMED_OUT, 'Timed out'),
(ABORTED, 'Aborted')
)
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name="annotations", null=True)
document = models.ForeignKey(Document, on_delete=models.CASCADE, related_name="annotations")
_data = models.JSONField(default=dict)
@property
def data(self):
ann_history = self.latest_annotation_history()
if ann_history:
return ann_history.data
return None
@data.setter
def data(self, value):
# The setter's value actually wraps the input inside a tuple for some reason
self._append_annotation_history(value)
times_out_at = models.DateTimeField(default=None, null=True)
created = models.DateTimeField(default=timezone.now)
status = models.IntegerField(choices=ANNOTATION_STATUS, default=PENDING)
status_time = models.DateTimeField(default=None, null=True)
time_to_complete = models.FloatField(default=None, null=True)
def _set_new_status(self, status, time=timezone.now()):
self.ensure_status_pending()
self.status = status
self.status_time = time
def complete_annotation(self, data, elapsed_time=None, time=timezone.now()):
self.data = data
self._set_new_status(Annotation.COMPLETED, time)
self.time_to_complete = elapsed_time
self.save()
# Also check whether the project has been completed
self.document.project.check_project_complete()
def reject_annotation(self, time=timezone.now()):
self._set_new_status(Annotation.REJECTED, time)
self.save()
def timeout_annotation(self, time=timezone.now()):
self._set_new_status(Annotation.TIMED_OUT, time)
self.save()
def abort_annotation(self, time=timezone.now()):
self._set_new_status(Annotation.ABORTED, time)
self.save()
def ensure_status_pending(self):
if self.status == Annotation.PENDING and self.status_time is None:
# Ok if still pending and doesn't have status time
return
if self.status == Annotation.COMPLETED:
log.warning(f"Annotation id {self.id} is already completed.")
raise RuntimeError("The annotation is already completed.")
if self.status == Annotation.REJECTED:
log.warning(f"Annotation id {self.id} is already rejected.")
raise RuntimeError("The annotation is already rejected.")
if self.status == Annotation.TIMED_OUT:
log.warning(f"Annotation id {self.id} is already timed out.")
raise RuntimeError("The annotation is already timed out.")
if self.status == Annotation.ABORTED:
log.warning(f"Annotation id {self.id} is already aborted.")
raise RuntimeError("The annotation is already timed out.")
def user_allowed_to_annotate(self, user):
return self.user.id == user.id
def change_annotation(self, data, by_user=None, time=timezone.now()):
if self.status != Annotation.COMPLETED:
raise RuntimeError("The annotation must be completed before it can be changed")
self._append_annotation_history(data, by_user, time)
def _append_annotation_history(self, data, by_user=None, time=timezone.now()):
if by_user is None:
by_user = self.user
AnnotationChangeHistory.objects.create(data=data,
time=time,
annotation=self,
changed_by=by_user)
def latest_annotation_history(self):
"""
Convenience function for getting the latest annotation data from the change history.
Returns None if there's no annotations.
"""
try:
last_item = self.change_history.last()
return last_item
except models.ObjectDoesNotExist:
return None
def get_listing(self):
"""
Get a dictionary representation of the annotation for rendering.
"""
output = {
"id": self.pk,
"annotated_by": self.user.username,
"created": self.created,
"completed": self.status_time if self.status == Annotation.COMPLETED else None,
"rejected": self.status_time if self.status == Annotation.REJECTED else None,
"timed_out": self.status_time if self.status == Annotation.TIMED_OUT else None,
"aborted": self.status_time if self.status == Annotation.ABORTED else None,
"times_out_at": self.times_out_at,
"change_list": [change_history.get_listing() for change_history in self.change_history.all()],
}
return output
@staticmethod
def check_for_timed_out_annotations(current_time=timezone.now()):
"""
Checks for any annotation that has timed out (times_out_at < current_time) and set the timed_out property
to the current_time.
Returns the of annotations that has become timed out.
"""
timed_out_annotations = Annotation.objects.filter(times_out_at__lt=current_time, status=Annotation.PENDING)
for annotation in timed_out_annotations:
annotation.timeout_annotation(current_time)
return len(timed_out_annotations)
@staticmethod
def clear_all_pending_user_annotations(user):
pending_annotations = Annotation.objects.filter(user_id=user.pk, status=Annotation.PENDING)
if pending_annotations.count() > 1:
raise RuntimeError("More than one pending annotation has been created for the user")
for annotation in pending_annotations:
annotation.abort_annotation()
@staticmethod
def get_annotations_for_user_in_project(user_id, project_id, doc_type=DocumentType.ANNOTATION):
"""
Gets a list of all completed and pending annotation tasks in the project project_id that belong to the
annotator with user_id.
Ordered by descending date and PK so the most recent entry is placed first.
"""
return Annotation.objects.filter(document__project_id=project_id,
document__doc_type=doc_type,
user_id=user_id).distinct().filter(
Q(status=Annotation.COMPLETED) | Q(status=Annotation.PENDING)).order_by("-created", "-pk")
class AnnotationChangeHistory(models.Model):
"""
Model to store the changes in annotation when an annotator makes a change after initial submission
"""
data = models.JSONField(default=dict)
time = models.DateTimeField(default=timezone.now)
annotation = models.ForeignKey(Annotation, on_delete=models.CASCADE, related_name="change_history", null=False)
changed_by = models.ForeignKey(get_user_model(), on_delete=models.SET_NULL, related_name="changed_annotations",
null=True)
def get_listing(self):
return {
"id": self.pk,
"data": self.data,
"time": self.time,
"changed_by": self.changed_by.username,
}
| 50,946 | 40.931687 | 138 | py |
gate-teamware | gate-teamware-master/backend/apps.py | from django.apps import AppConfig
import logging
log = logging.getLogger(__name__)
class BackendConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'backend'
def ready(self):
# This needs to be imported in order to
# pick up all the registered rpc methods
import backend.rpc
| 343 | 20.5 | 56 | py |
gate-teamware | gate-teamware-master/backend/__init__.py | 0 | 0 | 0 | py |
|
gate-teamware | gate-teamware-master/backend/migrations/0029_serviceuser_agreed_privacy_policy.py | # Generated by Django 3.2.15 on 2023-02-22 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0028_project_uuid'),
]
operations = [
migrations.AddField(
model_name='serviceuser',
name='agreed_privacy_policy',
field=models.BooleanField(default=False),
),
]
| 402 | 20.210526 | 53 | py |
gate-teamware | gate-teamware-master/backend/migrations/0021_rename_can_annotate_after_passing_test_project_can_annotate_after_passing_training_and_test.py | # Generated by Django 3.2 on 2022-05-18 16:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0020a_training_score_not_null'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='can_annotate_after_passing_test',
new_name='can_annotate_after_passing_training_and_test',
),
]
| 431 | 21.736842 | 68 | py |
gate-teamware | gate-teamware-master/backend/migrations/0015a_remove_annotates_old.py | # Generated by Django 3.2.8 on 2022-03-18 16:20
from django.utils import timezone
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backend', '0015_auto_20220318_1620'),
]
operations = [
migrations.RemoveField(
model_name='serviceuser',
name='annotates_old',
),
]
| 448 | 20.380952 | 47 | py |
gate-teamware | gate-teamware-master/backend/migrations/0028_project_uuid.py | # Generated by Django 3.2.15 on 2023-01-25 11:07
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('backend', '0027_merge_20221110_1556'),
]
operations = [
migrations.AddField(
model_name='project',
name='uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
]
| 418 | 19.95 | 71 | py |
gate-teamware | gate-teamware-master/backend/migrations/0025_project_document_pre_annotation_field.py | # Generated by Django 3.2.15 on 2022-11-01 15:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0024_rename_data_annotation__data'),
]
operations = [
migrations.AddField(
model_name='project',
name='document_pre_annotation_field',
field=models.TextField(default=''),
),
]
| 416 | 20.947368 | 57 | py |
gate-teamware | gate-teamware-master/backend/migrations/0007_auto_20211026_0107.py | # Generated by Django 3.2 on 2021-10-26 01:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0006_auto_20211020_2201'),
]
operations = [
migrations.AddField(
model_name='project',
name='annotator_guideline',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='project',
name='document_input_preview',
field=models.JSONField(default={}),
),
]
| 558 | 22.291667 | 47 | py |
gate-teamware | gate-teamware-master/backend/migrations/0023_annotationchangehistory.py | # Generated by Django 3.2 on 2022-07-01 15:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('backend', '0022_project_allow_annotation_change'),
]
operations = [
migrations.CreateModel(
name='AnnotationChangeHistory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.JSONField(default=dict)),
('time', models.DateTimeField(default=django.utils.timezone.now)),
('annotation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='change_history', to='backend.annotation')),
('changed_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='changed_annotations', to=settings.AUTH_USER_MODEL)),
],
),
]
| 1,046 | 37.777778 | 172 | py |
gate-teamware | gate-teamware-master/backend/migrations/0018_project_document_gold_standard_field.py | # Generated by Django 3.2 on 2022-03-25 18:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0017_document_doc_type'),
]
operations = [
migrations.AddField(
model_name='project',
name='document_gold_standard_field',
field=models.TextField(default='gold'),
),
]
| 405 | 20.368421 | 51 | py |
gate-teamware | gate-teamware-master/backend/migrations/0002_auto_20210726_1312.py | # Generated by Django 3.2 on 2021-07-26 13:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='serviceuser',
name='activate_account_token',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='serviceuser',
name='activate_account_token_expire',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='serviceuser',
name='is_activated',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='serviceuser',
name='reset_password_token',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='serviceuser',
name='reset_password_token_expire',
field=models.DateTimeField(null=True),
),
]
| 1,075 | 26.589744 | 53 | py |
gate-teamware | gate-teamware-master/backend/migrations/0030_serviceuser_is_deleted.py | # Generated by Django 3.2.15 on 2023-03-16 16:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0029_serviceuser_agreed_privacy_policy'),
]
operations = [
migrations.AddField(
model_name='serviceuser',
name='is_deleted',
field=models.BooleanField(default=False),
),
]
| 412 | 20.736842 | 62 | py |
gate-teamware | gate-teamware-master/backend/migrations/0024_rename_data_annotation__data.py | # Generated by Django 3.2 on 2022-07-15 14:27
from django.utils import timezone
from django.db import migrations, models
def _migrate_annotation_data(apps, schema_editor):
Annotation = apps.get_model("backend", "Annotation")
AnnotationChangeHistory = apps.get_model("backend", "AnnotationChangeHistory")
for annotation in Annotation.objects.all():
if annotation._data:
# This migration changes annotation's .data field to ._data so that .data can be used for writing
# to the change history instead, we then need to the contents of ._data to the change history as
# ._data will be removed in the next migration.
AnnotationChangeHistory.objects.create(data=annotation._data,
annotation_id=annotation.pk,
time=annotation.status_time if annotation.status_time else timezone.now(),
changed_by=annotation.user)
class Migration(migrations.Migration):
dependencies = [
('backend', '0023_annotationchangehistory'),
]
operations = [
migrations.RenameField(
model_name='annotation',
old_name='data',
new_name='_data',
),
migrations.RunPython(_migrate_annotation_data, reverse_code=migrations.RunPython.noop),
]
| 1,413 | 39.4 | 125 | py |
gate-teamware | gate-teamware-master/backend/migrations/0014_annotation_time_to_complete.py | # Generated by Django 3.2.8 on 2022-02-04 13:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0013_project_allow_document_reject'),
]
operations = [
migrations.AddField(
model_name='annotation',
name='time_to_complete',
field=models.FloatField(default=None, null=True),
),
]
| 420 | 21.157895 | 61 | py |
gate-teamware | gate-teamware-master/backend/migrations/0019_annotatorproject_allowed_to_annotate.py | # Generated by Django 3.2 on 2022-03-30 16:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0018_project_document_gold_standard_field'),
]
operations = [
migrations.AddField(
model_name='annotatorproject',
name='allowed_to_annotate',
field=models.BooleanField(default=False),
),
]
| 426 | 21.473684 | 65 | py |
gate-teamware | gate-teamware-master/backend/migrations/0014a_change_user_annotates_to_old.py | # Generated by Django 3.2.8 on 2022-03-18 16:20
from django.utils import timezone
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backend', '0014_annotation_time_to_complete'),
]
operations = [
migrations.RenameField(
model_name='serviceuser',
old_name='annotates',
new_name='annotates_old',
),
]
| 495 | 21.545455 | 56 | py |
gate-teamware | gate-teamware-master/backend/migrations/0005_serviceuser_receive_mail_notifications.py | # Generated by Django 3.2 on 2021-07-29 14:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0004_merge_20210727_1522'),
]
operations = [
migrations.AddField(
model_name='serviceuser',
name='receive_mail_notifications',
field=models.BooleanField(default=True),
),
]
| 410 | 20.631579 | 52 | py |
gate-teamware | gate-teamware-master/backend/migrations/0010_alter_project_document_input_preview.py | # Generated by Django 3.2 on 2021-10-26 01:21
import backend.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0009_alter_project_document_input_preview'),
]
operations = [
migrations.AlterField(
model_name='project',
name='document_input_preview',
field=models.JSONField(default=backend.models.default_document_input_preview),
),
]
| 481 | 23.1 | 90 | py |
gate-teamware | gate-teamware-master/backend/migrations/0009_alter_project_document_input_preview.py | # Generated by Django 3.2 on 2021-10-26 01:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0008_alter_project_document_input_preview'),
]
operations = [
migrations.AlterField(
model_name='project',
name='document_input_preview',
field=models.JSONField(default={'text': '<p>Some html text <strong>in bold</strong>.</p><p>Paragraph 2.</p>'}),
),
]
| 492 | 24.947368 | 123 | py |
gate-teamware | gate-teamware-master/backend/migrations/0003_rename_is_activated_serviceuser_is_account_activated.py | # Generated by Django 3.2 on 2021-07-26 15:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0002_auto_20210726_1312'),
]
operations = [
migrations.RenameField(
model_name='serviceuser',
old_name='is_activated',
new_name='is_account_activated',
),
]
| 386 | 19.368421 | 47 | py |
gate-teamware | gate-teamware-master/backend/migrations/0017_document_doc_type.py | # Generated by Django 3.2.8 on 2022-03-23 17:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0016_annotatorproject_rejected'),
]
operations = [
migrations.AddField(
model_name='document',
name='doc_type',
field=models.IntegerField(choices=[(0, 'Annotation'), (1, 'Training'), (2, 'Test')], default=0),
),
]
| 453 | 22.894737 | 108 | py |
gate-teamware | gate-teamware-master/backend/migrations/0026_project_document_input_preview_csv.py | # Generated by Django 3.2.15 on 2022-10-21 14:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0025_serviceuser_doc_format_pref'),
]
operations = [
migrations.AddField(
model_name='project',
name='document_input_preview_csv',
field=models.TextField(default=''),
),
]
| 412 | 20.736842 | 56 | py |
gate-teamware | gate-teamware-master/backend/migrations/0015_auto_20220318_1620.py | # Generated by Django 3.2.8 on 2022-03-18 16:20
from django.utils import timezone
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def _migrate_project_annotators(apps, schema_editor):
User = apps.get_model('backend', 'ServiceUser')
AnnotatorProject = apps.get_model('backend', 'AnnotatorProject')
Project = apps.get_model('backend', 'Project')
for user in User.objects.all():
project = user.annotates_old
# Add user if currently active in project
if project:
AnnotatorProject.objects.create(annotator=user,
project=project)
# Add all user annotated projects that they're not active in
non_active_project_query = Project.objects.filter(documents__annotations__user=user).distinct()
if project:
non_active_project_query = non_active_project_query.exclude(id=project.id)
for anno_proj in non_active_project_query:
AnnotatorProject.objects.create(annotator=user,
project=anno_proj,
status=1,
annotations_completed=timezone.now()
)
class Migration(migrations.Migration):
dependencies = [
('backend', '0014a_change_user_annotates_to_old'),
]
operations = [
migrations.AddField(
model_name='project',
name='can_annotate_after_passing_test',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='project',
name='has_test_stage',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='project',
name='has_training_stage',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='project',
name='min_test_pass_threshold',
field=models.FloatField(default=1.0, null=True),
),
migrations.CreateModel(
name='AnnotatorProject',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('training_score', models.FloatField(null=True)),
('test_score', models.FloatField(null=True)),
('training_completed', models.DateTimeField(null=True)),
('test_completed', models.DateTimeField(null=True)),
('annotations_completed', models.DateTimeField(null=True)),
('status', models.IntegerField(choices=[(0, 'Active'), (1, 'Completed')], default=0)),
('annotator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('project', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='backend.project')),
],
),
migrations.AddField(
model_name='project',
name='annotators',
field=models.ManyToManyField(related_name='annotates', through='backend.AnnotatorProject', to=settings.AUTH_USER_MODEL),
),
migrations.RunPython(_migrate_project_annotators, reverse_code=migrations.RunPython.noop),
]
| 3,468 | 41.82716 | 147 | py |
gate-teamware | gate-teamware-master/backend/migrations/0020_auto_20220330_2021.py | # Generated by Django 3.2 on 2022-03-30 20:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0019_annotatorproject_allowed_to_annotate'),
]
operations = [
migrations.AlterField(
model_name='annotatorproject',
name='test_score',
field=models.FloatField(default=0),
),
]
| 413 | 20.789474 | 65 | py |
gate-teamware | gate-teamware-master/backend/migrations/0002_auto_20210709_1229.py | # Generated by Django 3.2 on 2021-07-09 12:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='serviceuser',
name='manages',
),
migrations.AddField(
model_name='serviceuser',
name='manager',
field=models.BooleanField(default=False),
),
]
| 489 | 20.304348 | 53 | py |
gate-teamware | gate-teamware-master/backend/migrations/0025_serviceuser_doc_format_pref.py | # Generated by Django 3.2.15 on 2022-09-30 14:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0024_rename_data_annotation__data'),
]
operations = [
migrations.AddField(
model_name='serviceuser',
name='doc_format_pref',
field=models.IntegerField(choices=[(0, 'JSON'), (1, 'CSV')], default=0),
),
]
| 443 | 22.368421 | 84 | py |
gate-teamware | gate-teamware-master/backend/migrations/0016_annotatorproject_rejected.py | # Generated by Django 3.2.8 on 2022-03-23 10:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0015a_remove_annotates_old'),
]
operations = [
migrations.AddField(
model_name='annotatorproject',
name='rejected',
field=models.BooleanField(default=False),
),
]
| 402 | 20.210526 | 53 | py |
gate-teamware | gate-teamware-master/backend/migrations/0022_project_allow_annotation_change.py | # Generated by Django 3.2 on 2022-06-17 12:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0021_rename_can_annotate_after_passing_test_project_can_annotate_after_passing_training_and_test'),
]
operations = [
migrations.AddField(
model_name='project',
name='allow_annotation_change',
field=models.BooleanField(default=True),
),
]
| 475 | 24.052632 | 120 | py |
gate-teamware | gate-teamware-master/backend/migrations/0004_merge_20210727_1522.py | # Generated by Django 3.2 on 2021-07-27 15:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0003_rename_is_activated_serviceuser_is_account_activated'),
('backend', '0003_rename_manager_serviceuser_is_manager'),
]
operations = [
]
| 324 | 20.666667 | 81 | py |
gate-teamware | gate-teamware-master/backend/migrations/0001_initial.py | # Generated by Django 3.2 on 2021-06-18 21:43
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='ServiceUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(default='New project')),
('description', models.TextField(default='')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('configuration', models.JSONField(default=list)),
('annotations_per_doc', models.IntegerField(default=3)),
('annotator_max_annotation', models.FloatField(default=0.6)),
('annotation_timeout', models.IntegerField(default=60)),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='owns', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.JSONField(default=dict)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='documents', to='backend.project')),
],
),
migrations.CreateModel(
name='Annotation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.JSONField(default=dict)),
('times_out_at', models.DateTimeField(default=None, null=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('status', models.IntegerField(choices=[(0, 'Pending'), (1, 'Completed'), (2, 'Rejected'), (3, 'Timed out'), (4, 'Aborted')], default=0)),
('status_time', models.DateTimeField(default=None, null=True)),
('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='annotations', to='backend.document')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='annotations', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='serviceuser',
name='annotates',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='annotators', to='backend.project'),
),
migrations.AddField(
model_name='serviceuser',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='serviceuser',
name='manages',
field=models.ManyToManyField(related_name='managers', to='backend.Project'),
),
migrations.AddField(
model_name='serviceuser',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
| 6,054 | 58.362745 | 329 | py |
gate-teamware | gate-teamware-master/backend/migrations/0027_merge_20221110_1556.py | # Generated by Django 3.2.15 on 2022-11-10 15:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0025_project_document_pre_annotation_field'),
('backend', '0026_project_document_input_preview_csv'),
]
operations = [
]
| 309 | 19.666667 | 66 | py |
gate-teamware | gate-teamware-master/backend/migrations/__init__.py | 0 | 0 | 0 | py |
|
gate-teamware | gate-teamware-master/backend/migrations/0013_project_allow_document_reject.py | # Generated by Django 3.2.8 on 2021-11-11 17:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0012_rename_id_field_project_document_id_field'),
]
operations = [
migrations.AddField(
model_name='project',
name='allow_document_reject',
field=models.BooleanField(default=True),
),
]
| 425 | 21.421053 | 70 | py |
gate-teamware | gate-teamware-master/backend/migrations/0031_auto_20230316_1713.py | # Generated by Django 3.2.15 on 2023-03-16 17:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backend', '0030_serviceuser_is_deleted'),
]
operations = [
migrations.AlterField(
model_name='annotation',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='annotations', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='project',
name='owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='owns', to=settings.AUTH_USER_MODEL),
),
]
| 803 | 29.923077 | 149 | py |
gate-teamware | gate-teamware-master/backend/migrations/0020a_training_score_not_null.py | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0020_auto_20220330_2021'),
]
operations = [
migrations.AlterField(
model_name='annotatorproject',
name='training_score',
field=models.FloatField(default=0),
),
]
| 353 | 18.666667 | 47 | py |
gate-teamware | gate-teamware-master/backend/migrations/0008_alter_project_document_input_preview.py | # Generated by Django 3.2 on 2021-10-26 01:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0007_auto_20211026_0107'),
]
operations = [
migrations.AlterField(
model_name='project',
name='document_input_preview',
field=models.JSONField(default=dict),
),
]
| 400 | 20.105263 | 49 | py |
gate-teamware | gate-teamware-master/backend/migrations/0006_auto_20211020_2201.py | # Generated by Django 3.2 on 2021-10-20 22:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backend', '0005_serviceuser_receive_mail_notifications'),
]
operations = [
migrations.AlterField(
model_name='serviceuser',
name='activate_account_token',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='serviceuser',
name='activate_account_token_expire',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='serviceuser',
name='annotates',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='annotators', to='backend.project'),
),
migrations.AlterField(
model_name='serviceuser',
name='reset_password_token',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='serviceuser',
name='reset_password_token_expire',
field=models.DateTimeField(blank=True, null=True),
),
]
| 1,295 | 31.4 | 154 | py |
gate-teamware | gate-teamware-master/backend/migrations/0012_rename_id_field_project_document_id_field.py | # Generated by Django 3.2 on 2021-11-05 17:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0011_project_id_field'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='id_field',
new_name='document_id_field',
),
]
| 373 | 18.684211 | 45 | py |
gate-teamware | gate-teamware-master/backend/migrations/0003_rename_manager_serviceuser_is_manager.py | # Generated by Django 3.2 on 2021-07-23 11:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0002_auto_20210709_1229'),
]
operations = [
migrations.RenameField(
model_name='serviceuser',
old_name='manager',
new_name='is_manager',
),
]
| 371 | 18.578947 | 47 | py |
gate-teamware | gate-teamware-master/backend/migrations/0011_project_id_field.py | # Generated by Django 3.2 on 2021-11-05 17:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0010_alter_project_document_input_preview'),
]
operations = [
migrations.AddField(
model_name='project',
name='id_field',
field=models.TextField(default='name'),
),
]
| 404 | 20.315789 | 65 | py |
gate-teamware | gate-teamware-master/backend/management/__init__.py | 0 | 0 | 0 | py |
|
gate-teamware | gate-teamware-master/backend/management/commands/check_create_superuser.py | import sys, os
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "If no superusers in database, create one from credentials supplied in environment variables"
def handle(self, *args, **options):
User = get_user_model()
su_count = User.objects.filter(is_superuser=True).count()
if su_count == 0:
username = os.environ.get("SUPERUSER_USERNAME")
password = os.environ.get("SUPERUSER_PASSWORD")
email = os.environ.get("SUPERUSER_EMAIL")
if not User.objects.filter(username=username).exists():
User.objects.create_superuser(username=username, password=password, email=email,
is_account_activated=True)
self.stdout.write(f'No superusers found in database.\nSuperuser created with username {username}')
else:
self.stdout.write(self.style.ERROR('Username already exists'))
raise CommandError("Username already exists")
else:
self.stdout.write(f'{su_count} Superusers found in database.')
| 1,211 | 35.727273 | 114 | py |
gate-teamware | gate-teamware-master/backend/management/commands/__init__.py | 0 | 0 | 0 | py |