repo
stringlengths 2
91
| file
stringlengths 14
211
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
1.36M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
OCDetector | OCDetector-master/EBDetector/TimeDomainFeatures/Conditional-Entropy-master/old_multi_CE.py | ################################################################################
# #
# Program to calculate the Conditional Entropy for all the files in a
# directory #
# This program calculates and saves a file with file name and its period #
# To run: type in terminal -> python3 multi_CE.py #
# #
# To change the files directory: change the path in the
# "#Folder Location" section #
# To change the results directory: change the path in the
# "#Results Location" section #
# To change the period range: change the values in the #
# "#Creates the period array" section #
# The standard precision is 0.0001 but for some stars we need a finner #
# precision, 0.00001 is usualy enough. *precision is the period step! #
# #
################################################################################
import os
import numpy
from periodogram import rephase, get_phase
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def hc(bins, row, col, size): #changed here, original: bins[:,j]
return numpy.sum((lambda p: p*numpy.log(numpy.sum(bins[i,:])/size/p) \
if p > 0 else 0)(bins[i][j] / size)
for i in row for j in col) if size > 0 else numpy.PINF
#Folder Location
path = '/path/to/star/data'
os.chdir(path)
#Results Location
out = '/output/path/'
files = os.listdir(path)
files.sort()
ent_data = []
ce_period = []
#Loop to calculate entropy for all the files in the directory
for star in files:
data = numpy.ma.array(data=numpy.loadtxt(star), mask=None, dtype=float)
periods = numpy.arange(0.1, 32.0001, 0.0001)
xbins = 10
ybins = 5
row = numpy.linspace(0, xbins-1, xbins)
col = numpy.linspace(0, ybins-1, ybins)
entropy = []
for p in periods:
r = rephase(data, p, 0)
r.T[1] = (r.T[1]-numpy.min(r.T[1]))/(numpy.max(r.T[1])-numpy.min(r.T[1]))
bins, binsX, binsY = numpy.histogram2d(r.T[0], r.T[1], [xbins, ybins],[[0,1], [0,1]])
ent = hc(bins, row, col, len(r.T[1]))
#print(star, p, ent)
entropy.append(ent)
name_of_star = star + ".txt"
#name_of_plot = star + ".png"
numpy.savetxt(os.path.join(out, name_of_star),
numpy.dstack((periods, entropy))[0],
fmt='%s')
#Find the position of the minimum entropy to get the correspondent period
minimum = numpy.argmin(entropy)
e_min = entropy[minimum]
p_min = periods[minimum]
line = star, p_min, e_min
ce_per = star, p_min
ent_data.append(line)
ce_period.append(ce_per)
#numpy.savetxt(os.path.join(out, 'CE_RRAB_finner_period_remaining_stars.dat'), ce_period, fmt='%s')
#Print the minimum entropy and the correspondent period
#print('\n', star, p_min, e_min)
#plot the entropy against periods
#fig = plt.figure()
#plt.plot(periods, entropy, 'r')
#plt.plot(periods, entropy, 'k+', markersize=12)
#fig.suptitle(star)
#plt.xlabel('Periods')
#plt.ylabel('Entropy')
#fig.savefig(os.path.join(out,name_of_plot))
numpy.savetxt(os.path.join(out, 'Allfiles_result'), ent_data, fmt='%s')
| 3,259 | 33.680851 | 100 | py |
OCDetector | OCDetector-master/EBDetector/TimeDomainFeatures/Conditional-Entropy-master/old_CE.py | ################################################################################
# #
# Program to calculate the Conditional Entropy for a single pulsating star #
# This program calculates and saves a file with periods and entropies #
# To run: type in terminal -> python3 CE.py #
# #
# To change the file: change the .dat file in the "#Load the data" section #
# To change the period range: change the values in the #
# "#Creates the period array" section #
# The standard precision is 0.0001 but for some stars we need a finner #
# precision, 0.00001 is usualy enough. *precision is the period step! #
# #
################################################################################
import numpy
from periodogram import find_period, rephase, get_phase
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def hc(bins, row, col, size):
return numpy.sum((lambda p: p*numpy.log(numpy.sum(bins[i,:])/size/p) \
if p > 0 else 0)(bins[i][j] / size)
for i in row for j in col) if size > 0 else numpy.PINF
#Load the data
data = numpy.ma.array(data=numpy.loadtxt('/path/to/star/file'),
mask=None, dtype=float)
#Creates the period array
periods = numpy.arange(0.1, 1.00001, 0.00001) #period range (p_min, p_max, step)
#Set the number of rows and columns to calculate entropy
xbins = 10 #keep this fix
ybins = 5 #keep this fix
row = numpy.linspace(0, xbins-1, xbins)
col = numpy.linspace(0, ybins-1, ybins)
#For loop to calculate entropy
entropy = []
for p in periods:
r = rephase(data, p, 0)
r.T[1] = (r.T[1]-numpy.min(r.T[1]))/(numpy.max(r.T[1])-numpy.min(r.T[1]))
bins, binsX, binsY = numpy.histogram2d(r.T[0], r.T[1], [xbins, ybins],
[[0,1], [0,1]])
ent = hc(bins, row, col, len(r.T[1]))
#print(p, ent)
entropy.append(ent)
#Save the period and entropy into a file
#numpy.savetxt('period_entropy.dat',
# numpy.dstack((periods, entropy))[0],
# fmt='%s')
#Find the position of the minimum entropy to get the correspondent period
minimum = numpy.argmin(entropy)
e_min = entropy[minimum]
p_min = periods[minimum]
print(p_min)
#Print the minimum entropy and the correspondent period
#print('\n', p_min, e_min)
'''
#plot the entropy against periods
fig = plt.figure()
plt.plot(periods, entropy, 'r')
plt.plot(periods, entropy, 'k+', markersize=12)
fig.suptitle('OGLE-LMC-CEP-0010')
plt.xlabel('Periods')
plt.ylabel('Entropy')
#fig.savefig('0010_test11.png')
'''
| 2,699 | 35.486486 | 81 | py |
HaRT | HaRT-main/run_hulm_hart.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning HaRT for human language modeling.
"""
import logging
import math
import os
import sys
from args.clm_args import DataTrainingArguments, ModelArguments
from src.model.hart import HaRTPreTrainedModel
from src.model.modeling_hart import HaRTBaseLMHeadModel
from src.model.configuration_hart import HaRTConfig
from data.utils_hart.hulm_data_utils import load_dataset
from data.data_collator import DataCollatorWithPaddingForHaRT
import transformers
from transformers import (
CONFIG_MAPPING,
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
TrainerCallback,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
logger = logging.getLogger(__name__)
class evalLogsCallback(TrainerCallback):
def on_evaluate(self, args, state, control, **kwargs):
if control.should_save:
metrics = kwargs['metrics']
perplexity = math.exp(metrics["eval_loss"])
metrics["perplexity"] = perplexity
self.save_metrics('eval_{}'.format(metrics['epoch']), metrics, args)
def save_metrics(self, split, metrics, args):
import json
path = os.path.join(args.output_dir, f"{split}_results.json")
with open(path, "w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.instantiate_hart:
config = HaRTConfig()
elif model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.add_history:
config.add_history = True
if model_args.use_qh05_wts:
config.use_qh05_wts = True
else:
config.use_qh05_wts = False
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.instantiate_hart:
def add_insep_token(tokenizer):
special_tokens_dict = {'sep_token': str('<|insep|>')}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
assert num_added_toks == 1
assert tokenizer.sep_token == '<|insep|>'
add_insep_token(tokenizer)
if model_args.instantiate_hart:
hartbaseLMModel = HaRTBaseLMHeadModel.from_pretrained(model_args.model_name_or_path, config=config)
hartbaseLMModel.resize_token_embeddings(len(tokenizer))
model = HaRTPreTrainedModel(config, hartbaseLMModel)
### commented out the following code snippet based on: https://discuss.huggingface.co/t/perplexity-from-fine-tuned-gpt2lmheadmodel-with-and-without-lm-head-as-a-parameter/16602
# elif model_args.model_name_or_path and training_args.do_train: ## re-factor this code to use ArHuLM.from_pretrained, because lm_head is not being treated as a parameter
# model = AutoModelForCausalLM.from_pretrained(
# model_args.model_name_or_path,
# from_tf=bool(".ckpt" in model_args.model_name_or_path),
# config=config,
# cache_dir=model_args.cache_dir,
# revision=model_args.model_revision,
# use_auth_token=True if model_args.use_auth_token else None,
# )
elif model_args.model_name_or_path:
model = HaRTPreTrainedModel.from_pretrained(model_args.model_name_or_path)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
model.resize_token_embeddings(len(tokenizer))
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
#Dataset
if data_args.train_table is not None or data_args.dev_table is not None or data_args.test_table is not None:
if data_args.train_table is not None:
train_data, train_uncut_blocks = load_dataset(logger, tokenizer, data_args.train_table, block_size, data_args.max_train_blocks, data_args, 'train', data_args.disable_hulm_batching)
if data_args.dev_table is not None:
eval_data, eval_uncut_blocks = load_dataset(logger, tokenizer, data_args.dev_table, block_size, data_args.max_val_blocks, data_args, 'dev', data_args.disable_hulm_batching)
elif data_args.test_table is not None:
eval_data, eval_uncut_blocks = load_dataset(logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', data_args.disable_hulm_batching)
else:
raise ValueError("This CLM runner requires mysql database tables as train/dev/test data sources currently!")
train_dataset = train_data if training_args.do_train else None
eval_dataset = eval_data
# Data collator
# This will take care of collating batches of type [users, windows, num_tokens]
data_collator = DataCollatorWithPaddingForHaRT(model_args, config, tokenizer, training_args.deepspeed)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval or training_args.do_predict else None,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks=[evalLogsCallback] if training_args.do_train else None
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
metrics["train_blocks_per_sample"] = train_uncut_blocks if data_args.max_train_blocks is None else min(data_args.max_train_blocks, train_uncut_blocks)
metrics["block_size"] = block_size
metrics["gpus"] = training_args.n_gpu
metrics["total_epochs"] = training_args.num_train_epochs
metrics["per_device_train_batch_size"] = training_args.per_device_train_batch_size
metrics["train_table"] = data_args.train_table
if model_args.instantiate_hart:
metrics["history"] = model_args.add_history
metrics["extract_layer"] = model_args.extract_layer if model_args.extract_layer else config.extract_layer if config.extract_layer else None
metrics["layer_ins"] = model_args.layer_ins if model_args.layer_ins else config.layer_ins if config.layer_ins else None
if model_args.add_history:
metrics["0s_initial_history"] = False if model_args.initial_history else True
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
perplexity = math.exp(metrics["eval_loss"])
metrics["perplexity"] = perplexity
metrics["eval_blocks_per_sample"] = eval_uncut_blocks if data_args.max_val_blocks is None else min(data_args.max_val_blocks, eval_uncut_blocks)
metrics["per_device_eval_batch_size"] = training_args.per_device_eval_batch_size
metrics["is_dev"] = True if data_args.dev_table else False
metrics["eval_table"] = data_args.dev_table if data_args.dev_table else data_args.test_table
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Evaluation
if training_args.do_predict:
logger.info("*** Predict ***")
eval_dataset, eval_uncut_blocks = load_dataset(logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', data_args.disable_hulm_batching)
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
perplexity = math.exp(metrics["eval_loss"])
metrics["perplexity"] = perplexity
metrics["eval_blocks_per_sample"] = eval_uncut_blocks if data_args.max_val_blocks is None else min(data_args.max_val_blocks, eval_uncut_blocks)
metrics["per_device_eval_batch_size"] = training_args.per_device_eval_batch_size
metrics["is_dev"] = False
metrics["eval_table"] = data_args.test_table
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 14,575 | 44.55 | 192 | py |
HaRT | HaRT-main/run_ft_hart.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning HaRT for sequence classification."""
import logging
import os
import sys
from typing import Optional
import numpy as np
import torch.nn as nn
import transformers
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainerCallback,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process, IntervalStrategy
from args.ft_args import DataTrainingArguments, ModelArguments
from src.model.configuration_hart import HaRTConfig
from src.model.modeling_hart import HaRTBaseLMHeadModel
from src.model.hart import HaRTPreTrainedModel
from src.model.finetune_hart import HaRTForSequenceClassification
from data.utils_hart.ft_doc_disable_hulm_batching_data_utils import load_dataset as load_no_hulm_dataset
from data.utils_hart.ft_doc_data_utils import load_dataset as load_doc_dataset
from data.utils_hart.ft_user_data_utils import load_dataset as load_user_dataset
from data.data_collator import DataCollatorWithPaddingForHaRT
logger = logging.getLogger(__name__)
class EvalLogsCallback(TrainerCallback):
def on_evaluate(self, args, state, control, metrics, **kwargs):
if 'epoch' in metrics.keys() and control.should_save:
self.save_metrics('eval_{}'.format(metrics['epoch']), metrics, args)
elif not control.should_save and state.best_model_checkpoint:
metrics['best_model_checkpoint'] = state.best_model_checkpoint
metrics['best_model_metric'] = state.best_metric
elif 'epoch' not in metrics.keys():
self.save_metrics('eval_wo_epoch', metrics, args)
def save_metrics(self, split, metrics, args):
import json
path = os.path.join(args.output_dir, f"{split}_results.json")
with open(path, "w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
class EarlyStoppingCallback(TrainerCallback):
"""
A :class:`~transformers.TrainerCallback` that handles early stopping.
Args:
early_stopping_patience (:obj:`int`):
Use with :obj:`metric_for_best_model` to stop training when the specified metric worsens for
:obj:`early_stopping_patience` evaluation calls.
early_stopping_threshold(:obj:`float`, `optional`):
Use with TrainingArguments :obj:`metric_for_best_model` and :obj:`early_stopping_patience` to denote how
much the specified metric must improve to satisfy early stopping conditions. `
This callback depends on :class:`~transformers.TrainingArguments` argument `load_best_model_at_end` functionality
to set best_metric in :class:`~transformers.TrainerState`.
"""
def __init__(self, metric_for_early_stopping, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
self.early_stopping_patience = early_stopping_patience
self.early_stopping_threshold = early_stopping_threshold
self.metric_for_early_stopping = metric_for_early_stopping
self.prev_metric_value = None
# early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
self.early_stopping_patience_counter = 0
def check_metric_value(self, args, state, control, metric_value):
#TODO: use args.greater_is_better which is w.r.t. early stopping metric
# operator = np.greater if args.greater_is_better else np.less
operator = np.less
if self.prev_metric_value is None or (
operator(metric_value, self.prev_metric_value)
and abs(metric_value - self.prev_metric_value) >= self.early_stopping_threshold
):
self.early_stopping_patience_counter = 0
else:
self.early_stopping_patience_counter += 1
self.prev_metric_value = metric_value
def on_train_begin(self, args, state, control, **kwargs):
assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True"
assert (
self.metric_for_early_stopping is not None
), "EarlyStoppingCallback requires metric_for_early_stopping to be defined"
assert (
args.evaluation_strategy != IntervalStrategy.NO
), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch"
def on_evaluate(self, args, state, control, metrics, **kwargs):
metric_to_check = self.metric_for_early_stopping
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics.get(metric_to_check)
if metric_value is None:
logger.warning(
f"early stopping required metric_for_early_stopping, but did not find {metric_to_check} so early stopping is disabled"
)
return
self.check_metric_value(args, state, control, metric_value)
if self.early_stopping_patience_counter >= self.early_stopping_patience:
control.should_training_stop = True
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(model_args.init_seed)
# Labels
# It can be a classification or a regression task
num_labels = data_args.num_labels
if num_labels > 1:
is_regression = False # classification task
else:
is_regression = True # regression task
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.load_non_PT_hulm_model:
config = HaRTConfig(
num_labels=num_labels,
finetuning_task=data_args.task_name if data_args.task_name is not None else data_args.task_type,
use_history_output=data_args.use_history_output
)
if model_args.add_history:
config.add_history = True
if model_args.use_qh05_wts:
config.use_qh05_wts = True
else:
config.use_qh05_wts = False
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
def add_insep_token(tokenizer):
special_tokens_dict = {'sep_token': str('<|insep|>')}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
assert num_added_toks == 1
assert tokenizer.sep_token == '<|insep|>'
add_insep_token(tokenizer)
else:
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name if data_args.task_name is not None else data_args.task_type,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.use_history_output=data_args.use_history_output
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.pad_token_id = tokenizer.eos_token_id
config.freeze_model = model_args.freeze_model
config.use_hart_no_hist = model_args.use_hart_no_hist
if training_args.do_train and not model_args.load_non_PT_hulm_model:
model = HaRTForSequenceClassification(config, model_args.model_name_or_path)
elif training_args.do_train and model_args.load_non_PT_hulm_model:
hartbaseLMModel = HaRTBaseLMHeadModel.from_pretrained(model_args.model_name_or_path, config=config)
hartbaseLMModel.resize_token_embeddings(len(tokenizer))
hart = HaRTPreTrainedModel(config, hartbaseLMModel)
model = HaRTForSequenceClassification(config, pt_model=hart)
elif training_args.do_eval and not training_args.do_train:
model = HaRTForSequenceClassification.from_pretrained(model_args.model_name_or_path)
else:
raise ValueError("You're neither training nor evaluating. Can't pick a model because I don't know what do you want to do.")
def freeze_params(model: nn.Module):
for par in model.parameters():
par.requires_grad = False
if model_args.freeze_model:
freeze_params(model.transformer)
if data_args.task_type=='user':
freeze_params(model.transformer.transformer)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Preprocessed and padded datasets with labels
def load_dataset(args):
if data_args.task_type=='document':
if model_args.use_hart_no_hist:
return load_no_hulm_dataset(*args)
else:
return load_doc_dataset(*args)
elif data_args.task_type=='user':
return load_user_dataset(*args)
if data_args.train_table is not None or data_args.dev_table is not None or data_args.test_table is not None:
if data_args.train_table is not None:
args = [logger, tokenizer, data_args.train_table, block_size, data_args.max_train_blocks, data_args, 'train', data_args.disable_hulm_batching]
train_dataset, train_uncut_blocks = load_dataset(args)
if data_args.dev_table is not None:
args = [logger, tokenizer, data_args.dev_table, block_size, data_args.max_val_blocks, data_args, 'dev', data_args.disable_hulm_batching]
eval_dataset, eval_uncut_blocks = load_dataset(args)
elif data_args.test_table is not None:
args = [logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', data_args.disable_hulm_batching]
eval_dataset, eval_uncut_blocks = load_dataset(args)
else:
raise ValueError("This FT runner requires train/dev/test data source paths currently!")
def compute_metrics(p: EvalPrediction):
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import scipy
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=-1)
if is_regression:
if data_args.save_preds_labels:
np.savetxt(training_args.output_dir +'/preds.txt', preds)
np.savetxt(training_args.output_dir + '/labels.txt', p.label_ids)
mse = ((preds - p.label_ids) ** 2).mean().item()
r_pear, p_value = scipy.stats.pearsonr(preds, p.label_ids)
# from https://www.aclweb.org/anthology/W18-0604.pdf
r_meas1 = 0.77
r_meas2 = 0.70
r_dis = r_pear/((r_meas1*r_meas2)**0.5)
return {
'mse': mse,
'r_dis': r_dis,
'r_pear': r_pear,
'p_value': p_value
}
else:
indices = p.label_ids!=-100 # make sure to ignore the labels marked as -100
labels = p.label_ids[indices]
if not model_args.use_hart_no_hist:
preds = preds[indices]
if data_args.save_preds_labels:
np.savetxt(training_args.output_dir +'/preds.txt', preds, fmt='%d')
np.savetxt(training_args.output_dir + '/labels.txt', labels, fmt='%d')
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall
}
# Data collator
if not data_args.disable_hulm_batching:
# This one will take care of collating batches of type [users, blocks, block_size]
data_collator = DataCollatorWithPaddingForHaRT(model_args, config, tokenizer, is_ft=True, is_user_level_ft=data_args.task_type=='user')
else:
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator = default_data_collator
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks=[
EvalLogsCallback,
EarlyStoppingCallback(
model_args.metric_for_early_stopping,
model_args.early_stopping_patience,
model_args.early_stopping_threshold
)
] if training_args.do_train else None
)
# Training
if training_args.do_train:
checkpoint = None
if last_checkpoint is not None:
checkpoint = last_checkpoint
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics['model_seed'] = model_args.init_seed
metrics['train_seed'] = training_args.seed
metrics['lr'] = training_args.learning_rate
metrics['pretrained_model_loc'] = model_args.model_name_or_path
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
metrics["train_blocks_per_sample"] = train_uncut_blocks if data_args.max_train_blocks is None else min(data_args.max_train_blocks, train_uncut_blocks)
metrics["block_size"] = block_size
metrics["gpus"] = training_args.n_gpu
metrics["total_epochs"] = training_args.num_train_epochs
metrics["per_device_train_batch_size"] = training_args.per_device_train_batch_size
metrics["train_table"] = data_args.train_table
metrics["dev_table"] = data_args.dev_table
if config.add_history:
metrics["history"] = model_args.add_history
metrics["extract_layer"] = config.extract_layer if config.extract_layer else None
metrics["layer_ins"] = config.layer_ins if config.layer_ins else None
if model_args.add_history:
metrics["0s_initial_history"] = False if model_args.initial_history else True
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
if training_args.do_eval and data_args.dev_table is not None and not training_args.do_train:
if data_args.dev_table is not None:
logger.info("*** Evaluate Dev set ***")
eval_test('dev', data_args, training_args, eval_dataset, eval_uncut_blocks, trainer)
elif data_args.test_table is not None:
args = [logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', data_args.disable_hulm_batching]
eval_dataset, eval_uncut_blocks = load_dataset(args)
logger.info("*** Evaluate Test set ***")
eval_test('test', data_args, training_args, eval_dataset, eval_uncut_blocks, trainer)
else:
raise ValueError("Expecting dev or test data to run eval.")
# Evaluation
if training_args.do_predict:
if data_args.test_table is None:
raise ValueError("You are trying to predict on test data but forgot to provide a test data source path!")
if data_args.dev_table is not None:
args = [logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', data_args.disable_hulm_batching]
eval_dataset, eval_uncut_blocks = load_dataset(args)
logger.info("*** Evaluate Test set ***")
eval_test('test', data_args, training_args, eval_dataset, eval_uncut_blocks, trainer)
def eval_test(test_type, data_args, training_args, eval_dataset, eval_uncut_blocks, trainer):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = (
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
)
metrics["test_samples"] = min(max_eval_samples, len(eval_dataset))
metrics["test_blocks_per_sample"] = eval_uncut_blocks if data_args.max_val_blocks is None else min(data_args.max_val_blocks, eval_uncut_blocks)
metrics["per_device_test_batch_size"] = training_args.per_device_eval_batch_size
metrics["test_table"] = data_args.test_table
trainer.log_metrics("eval_{}".format(test_type), metrics)
trainer.save_metrics("eval_{}".format(test_type), metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 21,480 | 46.419426 | 158 | py |
HaRT | HaRT-main/run_clm_gpt2.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning GPT-2HLC for causal language modeling.
"""
import logging
import math
import os
import sys
from src.model_gpt2hlc.gpt2hlcLMhead import GPT2hlcLMHeadModel
from args.clm_args import DataTrainingArguments, ModelArguments
from data.utils_gpt2hlc.clm_data_utils import load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
TrainerCallback,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
logger = logging.getLogger(__name__)
class evalLogsCallback(TrainerCallback):
def on_evaluate(self, args, state, control, **kwargs):
if control.should_save:
metrics = kwargs['metrics']
perplexity = math.exp(metrics["eval_loss"])
metrics["perplexity"] = perplexity
self.save_metrics('eval_{}'.format(metrics['epoch']), metrics, args)
def save_metrics(self, split, metrics, args):
import json
path = os.path.join(args.output_dir, f"{split}_results.json")
with open(path, "w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
tokenizer.pad_token = tokenizer.eos_token
config.pad_token_id = tokenizer.pad_token_id
if model_args.model_name_or_path:
if model_args.model_name_or_path == 'gpt2':
## using the following code snippet only for pre-trained gpt2 -- based on: https://discuss.huggingface.co/t/perplexity-from-fine-tuned-gpt2lmheadmodel-with-and-without-lm-head-as-a-parameter/16602
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
model = GPT2hlcLMHeadModel.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
model.resize_token_embeddings(len(tokenizer))
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
#Dataset
if data_args.train_table is not None or data_args.dev_table is not None or data_args.test_table is not None:
if data_args.train_table is not None:
train_data, train_uncut_blocks = load_dataset(logger, tokenizer, data_args.train_table, block_size, data_args.max_train_blocks, data_args, 'train')
if data_args.dev_table is not None:
eval_data, eval_uncut_blocks = load_dataset(logger, tokenizer, data_args.dev_table, block_size, data_args.max_val_blocks, data_args, 'dev')
elif data_args.test_table is not None:
eval_data, eval_uncut_blocks = load_dataset(logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test')
else:
raise ValueError("This CLM runner requires mysql database tables as train/dev/test data sources currently!")
train_dataset = train_data if training_args.do_train else None
eval_dataset = eval_data
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval or training_args.do_predict else None,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
callbacks=[evalLogsCallback] if training_args.do_train else None
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
metrics["train_blocks_per_sample"] = train_uncut_blocks if data_args.max_train_blocks is None else min(data_args.max_train_blocks, train_uncut_blocks)
metrics["block_size"] = block_size
metrics["gpus"] = training_args.n_gpu
metrics["total_epochs"] = training_args.num_train_epochs
metrics["per_device_train_batch_size"] = training_args.per_device_train_batch_size
metrics["train_table"] = data_args.train_table
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval or training_args.do_predict:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
perplexity = math.exp(metrics["eval_loss"])
metrics["perplexity"] = perplexity
metrics["eval_blocks_per_sample"] = eval_uncut_blocks if data_args.max_val_blocks is None else min(data_args.max_val_blocks, eval_uncut_blocks)
metrics["per_device_eval_batch_size"] = training_args.per_device_eval_batch_size
metrics["is_dev"] = True if data_args.dev_table else False
metrics["eval_table"] = data_args.dev_table if data_args.dev_table else data_args.test_table
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 12,138 | 42.823105 | 210 | py |
HaRT | HaRT-main/run_ft_gpt2hlc.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning GPT-2HLC for sequence classification."""
import logging
import os
import sys
from typing import Optional
import numpy as np
import pandas as pd
import torch.nn as nn
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainerCallback,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process, IntervalStrategy
from src.user_ft_trainer import Trainer as user_Trainer
from args.ft_args import DataTrainingArguments, ModelArguments
from src.model_gpt2hlc.finetune_gpt2hlc import GPT2hlcForSequenceClassification
from data.utils_hart.ft_doc_disable_hulm_batching_data_utils import load_dataset as load_no_hulm_dataset
from data.utils_gpt2hlc.ft_user_data_utils_gpt2hlc import load_dataset as load_user_dataset
from data.data_collator import user_default_data_collator
logger = logging.getLogger(__name__)
class EvalLogsCallback(TrainerCallback):
def on_evaluate(self, args, state, control, metrics, **kwargs):
if 'epoch' in metrics.keys() and control.should_save:
self.save_metrics('eval_{}'.format(metrics['epoch']), metrics, args)
elif not control.should_save and state.best_model_checkpoint:
metrics['best_model_checkpoint'] = state.best_model_checkpoint
metrics['best_model_metric'] = state.best_metric
elif 'epoch' not in metrics.keys():
self.save_metrics('eval_wo_epoch', metrics, args)
def save_metrics(self, split, metrics, args):
import json
path = os.path.join(args.output_dir, f"{split}_results.json")
with open(path, "w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
class EarlyStoppingCallback(TrainerCallback):
"""
A :class:`~transformers.TrainerCallback` that handles early stopping.
Args:
early_stopping_patience (:obj:`int`):
Use with :obj:`metric_for_best_model` to stop training when the specified metric worsens for
:obj:`early_stopping_patience` evaluation calls.
early_stopping_threshold(:obj:`float`, `optional`):
Use with TrainingArguments :obj:`metric_for_best_model` and :obj:`early_stopping_patience` to denote how
much the specified metric must improve to satisfy early stopping conditions. `
This callback depends on :class:`~transformers.TrainingArguments` argument `load_best_model_at_end` functionality
to set best_metric in :class:`~transformers.TrainerState`.
"""
def __init__(self, metric_for_early_stopping, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
self.early_stopping_patience = early_stopping_patience
self.early_stopping_threshold = early_stopping_threshold
self.metric_for_early_stopping = metric_for_early_stopping
self.prev_metric_value = None
# early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
self.early_stopping_patience_counter = 0
def check_metric_value(self, args, state, control, metric_value):
#TODO: use args.greater_is_better which is w.r.t. early stopping metric
# operator = np.greater if args.greater_is_better else np.less
operator = np.less
if self.prev_metric_value is None or (
operator(metric_value, self.prev_metric_value)
and abs(metric_value - self.prev_metric_value) >= self.early_stopping_threshold
):
self.early_stopping_patience_counter = 0
else:
self.early_stopping_patience_counter += 1
self.prev_metric_value = metric_value
def on_train_begin(self, args, state, control, **kwargs):
assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True"
assert (
self.metric_for_early_stopping is not None
), "EarlyStoppingCallback requires metric_for_early_stopping to be defined"
assert (
args.evaluation_strategy != IntervalStrategy.NO
), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch"
def on_evaluate(self, args, state, control, metrics, **kwargs):
metric_to_check = self.metric_for_early_stopping
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics.get(metric_to_check)
if metric_value is None:
logger.warning(
f"early stopping required metric_for_early_stopping, but did not find {metric_to_check} so early stopping is disabled"
)
return
self.check_metric_value(args, state, control, metric_value)
if self.early_stopping_patience_counter >= self.early_stopping_patience:
control.should_training_stop = True
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Labels
# It can be a classification or a regression task
num_labels = data_args.num_labels
if num_labels > 1:
is_regression = False # classification task
else:
is_regression = True # regression task
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name if data_args.task_name is not None else data_args.task_type,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=False if 'bertweet' in model_args.model_name_or_path else model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
add_prefix_space=None,
)
tokenizer.pad_token = tokenizer.pad_token if 'bertweet' in model_args.model_name_or_path else tokenizer.eos_token
config.pad_token_id = tokenizer.pad_token_id
config.freeze_model = model_args.freeze_model
if data_args.task_type=='document':
if 'bertweet' in model_args.model_name_or_path:
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
model = GPT2hlcForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
elif data_args.task_type=='user':
config.add_history = None
def add_insep_token(tokenizer):
special_tokens_dict = {'sep_token': str('<|insep|>')}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
assert num_added_toks == 1
assert tokenizer.sep_token == '<|insep|>'
if tokenizer.sep_token is None:
add_insep_token(tokenizer)
model = GPT2hlcForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
def freeze_params(model: nn.Module):
for par in model.parameters():
par.requires_grad = False
modules = [
model.transformer.wte,
model.transformer.wpe,
model.transformer.drop,
model.transformer.h[:10],
model.transformer.ln_f
]
for x in modules:
freeze_params(x)
def freeze_params(model: nn.Module):
for par in model.parameters():
par.requires_grad = False
if model_args.freeze_model:
freeze_params(model.transformer)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Preprocessed and padded datasets with labels
def load_dataset(args):
if data_args.task_type=='document':
return load_no_hulm_dataset(*args)
elif data_args.task_type=='user':
return load_user_dataset(*args)
if data_args.train_table is not None or data_args.dev_table is not None or data_args.test_table is not None:
if data_args.train_table is not None:
args = [logger, tokenizer, data_args.train_table, block_size, data_args.max_train_blocks, data_args, 'train', True]
train_dataset, train_uncut_blocks = load_dataset(args)
if data_args.dev_table is not None:
args = [logger, tokenizer, data_args.dev_table, block_size, data_args.max_val_blocks, data_args, 'dev', True]
eval_dataset, eval_uncut_blocks = load_dataset(args)
elif data_args.test_table is not None:
args = [logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', True]
eval_dataset, eval_uncut_blocks = load_dataset(args)
else:
raise ValueError("This FT runner requires mysql database tables as train/dev/test data sources currently!")
def compute_metrics(p: EvalPrediction):
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import scipy
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=-1)
if hasattr(p, 'user_ids'):
user_mapper = pd.DataFrame(p.user_ids, columns=['user_id'])
user_mapper['preds'] = preds
user_mapper['labels'] = p.label_ids
assert len(preds) == len(p.label_ids) == len(p.user_ids), "Mismatch in the number of user_ids, predictions and labels!"
user_mapper = user_mapper.groupby('user_id').agg({'preds':'mean', 'labels':'mean'}).reset_index()
if data_args.save_preds_labels:
np.savetxt(training_args.output_dir +'/preds.txt', user_mapper['preds'].to_numpy())
np.savetxt(training_args.output_dir + '/labels.txt', user_mapper['labels'].to_numpy())
if is_regression:
mse = ((user_mapper.preds - user_mapper.labels) ** 2).mean().item()
r_pear, p_value = scipy.stats.pearsonr(user_mapper.preds, user_mapper.labels)
# from https://www.aclweb.org/anthology/W18-0604.pdf
r_meas1 = 0.77
r_meas2 = 0.70
r_dis = r_pear/((r_meas1*r_meas2)**0.5)
return {
'mse': mse,
'r_dis': r_dis,
'r_pear':r_pear,
'p_value': p_value
}
else:
indices = p.label_ids!=-100 # make sure to ignore the labels marked as -100
labels = p.label_ids[indices]
preds = preds[indices]
if data_args.save_preds_labels:
np.savetxt(training_args.output_dir +'/preds.txt', preds, fmt='%d')
np.savetxt(training_args.output_dir + '/labels.txt', labels, fmt='%d')
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall
}
# Data collator
if data_args.task_type=='user':
# This one will take care of collating batches with user_ids, labels, input_ids and attention_mask
data_collator = user_default_data_collator
else:
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator = default_data_collator
# Initialize our Trainer
if data_args.task_type=='document':
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks=[
EvalLogsCallback,
EarlyStoppingCallback(
model_args.metric_for_early_stopping,
model_args.early_stopping_patience,
model_args.early_stopping_threshold
)
] if training_args.do_train else None
)
elif data_args.task_type=='user':
trainer = user_Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks=[
EvalLogsCallback,
EarlyStoppingCallback(
model_args.metric_for_early_stopping,
model_args.early_stopping_patience,
model_args.early_stopping_threshold
)
] if training_args.do_train else None
)
# Training
if training_args.do_train:
checkpoint = None
if last_checkpoint is not None:
checkpoint = last_checkpoint
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics['seed'] = training_args.seed
metrics['pretrained_model_loc'] = model_args.model_name_or_path
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
metrics["train_blocks_per_sample"] = train_uncut_blocks if data_args.max_train_blocks is None else min(data_args.max_train_blocks, train_uncut_blocks)
metrics["block_size"] = block_size
metrics["gpus"] = training_args.n_gpu
metrics["total_epochs"] = training_args.num_train_epochs
metrics["per_device_train_batch_size"] = training_args.per_device_train_batch_size
metrics["train_table"] = data_args.train_table
metrics["dev_table"] = data_args.dev_table
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
if training_args.do_eval and not training_args.do_train:
if data_args.dev_table is not None:
logger.info("*** Evaluate Dev set ***")
eval_test('dev', data_args, training_args, eval_dataset, eval_uncut_blocks, trainer)
elif data_args.test_table is not None:
args = [logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', "True"]
eval_dataset, eval_uncut_blocks = load_dataset(args)
logger.info("*** Evaluate Test set ***")
eval_test('test', data_args, training_args, eval_dataset, eval_uncut_blocks, trainer)
else:
raise ValueError("Expecting dev or test data to run eval.")
# Evaluation
if training_args.do_predict:
if data_args.test_table is None:
raise ValueError("You are trying to predict on test data but forgot to provide a test data source path!")
if data_args.dev_table is not None:
args = [logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', True]
eval_dataset, eval_uncut_blocks = load_dataset(args)
logger.info("*** Evaluate Test set ***")
eval_test('test', data_args, training_args, eval_dataset, eval_uncut_blocks, trainer)
def eval_test(test_type, data_args, training_args, eval_dataset, eval_uncut_blocks, trainer):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = (
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
)
metrics["test_samples"] = min(max_eval_samples, len(eval_dataset))
metrics["test_blocks_per_sample"] = eval_uncut_blocks if data_args.max_val_blocks is None else min(data_args.max_val_blocks, eval_uncut_blocks)
metrics["per_device_test_batch_size"] = training_args.per_device_eval_batch_size
metrics["test_table"] = data_args.test_table
trainer.log_metrics("eval_{}".format(test_type), metrics)
trainer.save_metrics("eval_{}".format(test_type), metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 21,952 | 44.926778 | 158 | py |
HaRT | HaRT-main/extra_utils/save_user_states.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import logging
import os
import sys
import transformers
from transformers import (
AutoConfig,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from args.ft_args import DataTrainingArguments, ModelArguments
from src.model.configuration_hart import HaRTConfig
from src.model.hart import HaRTPreTrainedModel
from extra_utils.finetune_to_save_states import ArHulmForSequenceClassification
from data.utils_hart.ft_doc_data_utils import load_dataset as load_doc_dataset
from data.utils_hart.ft_user_data_utils import load_dataset as load_user_dataset
from data.data_collator import DataCollatorWithPaddingForHaRT
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
# check_min_version("4.6.0.dev0")
logger = logging.getLogger(__name__)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(model_args.init_seed)
# Labels
# For document level tasks like stance detection, provide num_labels for classification
# For user level tasks, num_labels is set to 1, making it a regression task
if data_args.task_type=='document':
is_regression = False
num_labels = data_args.num_labels
else:
is_regression = True
num_labels = 1 #regression task
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.load_non_PT_hulm_model:
config = HaRTConfig(num_labels=num_labels, use_history_output=data_args.use_history_output)
if model_args.add_history:
config.add_history = True
if model_args.use_qh05_wts:
config.use_qh05_wts = True
else:
config.use_qh05_wts = False
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
def add_insep_token(tokenizer):
special_tokens_dict = {'sep_token': str('<|insep|>')}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
assert num_added_toks == 1
assert tokenizer.sep_token == '<|insep|>'
add_insep_token(tokenizer)
else:
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.use_history_output=data_args.use_history_output
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.pad_token_id = tokenizer.eos_token_id
arhulm = HaRTPreTrainedModel.from_pretrained(model_args.model_name_or_path)
model = ArHulmForSequenceClassification(config, training_args.output_dir, data_args.agg_type, arhulm)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Preprocessed and padded datasets with labels
def load_dataset(args):
if data_args.task_type=='document':
return load_doc_dataset(*args)
elif data_args.task_type=='user':
return load_user_dataset(*args)
# Data collator
if not data_args.disable_hulm_batching:
# This one will take care of collating batches of type [users, blocks, block_size]
data_collator = DataCollatorWithPaddingForHaRT(model_args, config, tokenizer, is_ft=True, is_user_level_ft=data_args.task_type=='user')
else:
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator = default_data_collator
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
tokenizer=tokenizer,
data_collator=data_collator,
)
table = data_args.train_table if data_args.train_table else data_args.dev_table if data_args.dev_table else data_args.test_table
data_type = 'train' if data_args.train_table else 'dev' if data_args.dev_table else 'test'
args = [logger, tokenizer, table, block_size, data_args.max_val_blocks, data_args, data_type, data_args.disable_hulm_batching]
eval_dataset, eval_uncut_blocks = load_dataset(args)
logger.info("*** Evaluate all test set ***")
metrics = trainer.evaluate(eval_dataset=eval_dataset)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9,356 | 43.557143 | 143 | py |
HaRT | HaRT-main/extra_utils/finetune_user_gpt2_save_states.py | from more_itertools import split_at
import pandas as pd
import torch
import torch.nn as nn
from torch.nn import MSELoss
from transformers.utils import logging
from transformers import GPT2PreTrainedModel, GPT2Model
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
logger = logging.get_logger(__name__)
class GPT2ForSequenceClassification(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config, tokenizer, args, save_user_states=False):
super().__init__(config)
self.tokenizer = tokenizer
self.config = config
self.args = args
self.save_user_states = save_user_states
self.num_labels = config.num_labels
self.transformer = GPT2Model(config)
# self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
# self.transform = nn.Linear(config.n_embd, config.n_embd)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def process_into_messages(self, data):
def pad_message(data, pad_value):
for i,x in enumerate(data):
if len(x) > 200:
x = x[0:200]
data[i] = x
else:
x.extend([pad_value]*(200-len(x)))
return data
def pad_and_collate_data(data):
i_values = data
a_values = [[1]*len(x) for x in i_values]
i_values = pad_message(i_values, self.tokenizer.eos_token_id)
a_values = pad_message(a_values, 0)
return i_values, a_values
def split_into_messages(data):
i_values = data.reshape(-1).tolist()
i_values = list(split_at(i_values, lambda x:x==self.tokenizer.eos_token_id))[0]
i_values = i_values[:-1] if i_values[-1]==self.tokenizer.sep_token_id else i_values
i_values = list(split_at(i_values, lambda x:x==self.tokenizer.sep_token_id))
return i_values
input_ids = split_into_messages(data)
input_ids, attention_mask = pad_and_collate_data(input_ids)
return input_ids, attention_mask, len(input_ids)
def _prepare_inputs(self, input):
"""
Prepare :obj:`inputs` before feeding them to the model, converting tensors to cuda tensors
"""
if isinstance(input, torch.Tensor):
input = input.to(self.args.device)
return input
def get_user_embedding(self, hidden_states, attention_mask, user_num_msgs):
user_hs_splits = torch.split(hidden_states, user_num_msgs)
user_attn_splits = torch.split(attention_mask, user_num_msgs)
assert len(user_hs_splits) == len(user_attn_splits)
user_states = None
for states, masks in zip(user_hs_splits, user_attn_splits):
masks = masks.unsqueeze(dim=2)
masked_states = states*masks
summed_msg_hs = torch.sum(masked_states, dim=1)
summed_msg_masks = torch.sum(masks, dim=1)
message_states = summed_msg_hs/summed_msg_masks
summed_user_states = torch.sum(summed_msg_hs, dim=0)
num_msgs = message_states.shape[0]
averaged_user_states = summed_user_states/num_msgs
if user_states is None:
user_states = averaged_user_states.unsqueeze(dim=0)
else:
user_states = torch.cat([user_states, averaged_user_states.unsqueeze(dim=0)])
return user_states
def forward(
self,
input_ids=None,
user_ids=None,
history=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
user_num_msgs = []
if user_ids is not None:
user_messages = torch.Tensor()
user_msg_attn = torch.Tensor()
for u_input_ids in input_ids:
ids, attn, num_msgs = self.process_into_messages(u_input_ids)
user_num_msgs.append(num_msgs)
ids = torch.Tensor(ids)
attn = torch.Tensor(attn)
user_messages = torch.cat([user_messages, ids])
user_msg_attn = torch.cat([user_msg_attn, attn])
input_ids = self._prepare_inputs(user_messages).long()
attention_mask = self._prepare_inputs(user_msg_attn).long()
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# hidden_states = self.transform(self.ln_f(hidden_states))
if user_ids is not None:
user_states = self.get_user_embedding(hidden_states, attention_mask, user_num_msgs)
if self.save_user_states:
users = pd.DataFrame(user_ids.cpu().detach().numpy(), columns=['user_id'])
users = users.loc[users.index.repeat(self.config.embed)]
users.reset_index(drop=True, inplace=True)
user_states = pd.DataFrame(user_states.cpu().detach().numpy())
user_states = user_states.stack().reset_index()
user_states['level_0'] = users['user_id']
user_states.rename(columns={'level_0':'user_id','level_1': 'column_number', 0:'value'}, inplace=True)
user_states.to_csv(self.args.output_dir + '/test_states_' + str(user_ids[0].item()) + '.csv', index=False)
logits = user_states
loss = loss = torch.Tensor([0.1]).cuda()
else:
# logits = self.score(self.transform(self.ln_f(hidden_states)))
logits = self.score(user_states)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.to(self.dtype).view(-1))
else:
raise ValueError("You're in the wrong finetuner!")
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| 7,761 | 39.217617 | 119 | py |
HaRT | HaRT-main/extra_utils/save_user_states_gpt2.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import logging
import os
import sys
import transformers
from transformers import (
AutoConfig,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from args.ft_args import DataTrainingArguments, ModelArguments
from extra_utils.finetune_user_gpt2_save_states import GPT2ForSequenceClassification
from data.utils_hart.ft_doc_data_utils import load_dataset as load_doc_dataset
from data.utils_hart.ft_user_data_utils import load_dataset as load_user_dataset
from data.data_collator import DataCollatorWithPaddingForHaRT
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
# check_min_version("4.6.0.dev0")
logger = logging.getLogger(__name__)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(model_args.init_seed)
# Labels
# For document level tasks like stance detection, provide num_labels for classification
# For user level tasks, num_labels is set to 1, making it a regression task
if data_args.task_type=='document':
is_regression = False
num_labels = data_args.num_labels
else:
is_regression = True
num_labels = 1 #regression task
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name if data_args.task_name is not None else data_args.task_type,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer.pad_token = tokenizer.eos_token
config.pad_token_id = tokenizer.pad_token_id
config.add_history = None
model = GPT2ForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
args=training_args,
tokenizer=tokenizer,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
save_user_states=True
)
def add_insep_token(tokenizer):
special_tokens_dict = {'sep_token': str('<|insep|>')}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
assert num_added_toks == 1
assert tokenizer.sep_token == '<|insep|>'
add_insep_token(tokenizer)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Preprocessed and padded datasets with labels
def load_dataset(args):
if data_args.task_type=='document':
return load_doc_dataset(*args)
elif data_args.task_type=='user':
return load_user_dataset(*args)
# Data collator
if not data_args.disable_hulm_batching:
# This one will take care of collating batches of type [users, blocks, block_size]
data_collator = DataCollatorWithPaddingForHaRT(model_args, config, tokenizer, is_ft=True, is_user_level_ft=data_args.task_type=='user')
else:
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator = default_data_collator
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
tokenizer=tokenizer,
data_collator=data_collator,
)
table = data_args.train_table if data_args.train_table else data_args.dev_table if data_args.dev_table else data_args.test_table
data_type = 'train' if data_args.train_table else 'dev' if data_args.dev_table else 'test'
args = [logger, tokenizer, table, block_size, data_args.max_val_blocks, data_args, data_type, data_args.disable_hulm_batching]
eval_dataset, eval_uncut_blocks = load_dataset(args)
logger.info("*** Evaluate all test set ***")
metrics = trainer.evaluate(eval_dataset=eval_dataset)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 8,810 | 42.835821 | 143 | py |
HaRT | HaRT-main/extra_utils/finetune_to_save_states.py | import pandas as pd
import torch
import torch.nn as nn
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
from ..src.model.hart import HaRTPreTrainedModel
class ArHulmForSequenceClassification(HaRTPreTrainedModel):
# _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config, output_dir, agg_type, arhulm=None):
super().__init__(config)
self.num_labels = config.num_labels
self.use_history_output = config.use_history_output
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
self.output_dir = output_dir
self.agg_type = agg_type
if arhulm:
self.transformer = arhulm
else:
self.transformer = HaRTPreTrainedModel(config)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def forward(
self,
input_ids=None,
user_ids=None,
history=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
history=history,
output_block_last_hidden_states=True,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
users = pd.DataFrame(user_ids.cpu().detach().numpy(), columns=['user_id'])
users = users.loc[users.index.repeat(768)]
users.reset_index(drop=True, inplace=True)
if self.agg_type=='last':
user_states = transformer_outputs.history[0][-1]
elif self.agg_type=='sum':
all_blocks_user_states = torch.stack(transformer_outputs.history[0], dim=1)
user_states = torch.sum(all_blocks_user_states, dim=1)
elif self.agg_type=='avg':
all_blocks_user_states = torch.stack(transformer_outputs.history[0], dim=1)
user_states = torch.sum(all_blocks_user_states, dim=1)/all_blocks_user_states.shape[1]
elif self.agg_type=='masked_last':
states = transformer_outputs.history[0]
masks = transformer_outputs.history[1]
multiplied = tuple(l * r for l, r in zip(states, masks))
all_blocks_user_states = torch.stack(multiplied, dim=1).cpu().detach()
all_blocks_masks = torch.stack(masks, dim=1)
divisor = torch.sum(all_blocks_masks, dim=1).cpu().detach()
user_states = all_blocks_user_states[range(all_blocks_user_states.shape[0]), divisor.squeeze()-1]
elif self.agg_type=='masked_sum':
states = transformer_outputs.history[0]
masks = transformer_outputs.history[1]
multiplied = tuple(l * r for l, r in zip(states, masks))
all_blocks_user_states = torch.stack(multiplied, dim=1)
user_states = torch.sum(all_blocks_user_states, dim=1)
elif self.agg_type=='masked_avg':
states = transformer_outputs.history[0]
masks = transformer_outputs.history[1]
multiplied = tuple(l * r for l, r in zip(states, masks))
all_blocks_user_states = torch.stack(multiplied, dim=1)
all_blocks_masks = torch.stack(masks, dim=1)
sum = torch.sum(all_blocks_user_states, dim=1)
divisor = torch.sum(all_blocks_masks, dim=1)
user_states = sum/divisor
logits = user_states
loss = torch.Tensor([0.1]).cuda()
user_states = pd.DataFrame(user_states.cpu().detach().numpy())
user_states = user_states.stack().reset_index()
user_states['level_0'] = users['user_id']
user_states.rename(columns={'level_0':'user_id','level_1': 'column_number', 0:'value'}, inplace=True)
user_states.to_csv(self.output_dir + '/test_states_' + str(user_ids[0].item()) + '.csv', index=False)
return SequenceClassifierOutputWithPast(
loss=loss,
logits=logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| 5,303 | 42.834711 | 119 | py |
HaRT | HaRT-main/src/modeling_outputs.py | from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from transformers.file_utils import ModelOutput
@dataclass
class HaRTOutput(ModelOutput):
"""
Superset of ArHulmCausalLMOutput with history in the output additionally.
The description of ArHuLMOutput is as follows which also has copied description of CausalLMOutputWithCrossAttentions from Hugging Face's transformers.modeling_outputs.py:
Base class for Auto regressive Human language model outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
# logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
# Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
all_blocks_last_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_block_last_hidden_states=True`` is passed or when ``config.output_block_last_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of each block's last hidden state)
of shape :obj:`(batch_size, blocks_length, sequence_length, hidden_size)`.
Last hidden-states of the model's blocks at the output of last layer for each block.
all_blocks_extract_layer_hs (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_block_last_hidden_states=True`` is passed or when ``config.output_block_last_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of each block's last hidden state)
of shape :obj:`(batch_size, blocks_length, sequence_length, hidden_size)`.
Extract Layer's hidden-states of the model's blocks at the output of last layer for each block.
history (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`. where each sequence length for a respective batch instance will have the same hidden_embeds.
Residual history of the users in the batch by the end of the model processing after applying recurrence throughout.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Cross attentions weights after the attention softmax, used to compute the weighted average in the
cross-attention heads.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`torch.FloatTensor` tuples of length :obj:`config.n_layers`, with each tuple containing the
cached key, value states of the self-attention and the cross-attention layers if model is used in
encoder-decoder setting. Only relevant if ``config.is_decoder = True``.
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
# extract_layer_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``extract_layer is not None`` (i.e, a value is passed) or when ``config.extract_layer has a value other than None``):
# Tuple of :obj:`torch.FloatTensor` (one for the output of (each) extract layer) -- currently takes in 1 value of extract_layer
# of shape :obj:`(batch_size, sequence_length, hidden_size)`.
# Hidden-states of the model at the output of extract layer.
"""
loss: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
history: torch.FloatTensor = None
all_blocks_last_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
all_blocks_extract_layer_hs: Optional[Tuple[torch.FloatTensor]] = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class HaRTBaseModelOutput(ModelOutput):
"""
Overridden BaseModelOutputWithPastAndCrossAttentions to add extract_layer hidden states to the output for AR HuLM model.
The description of BaseModelOutputWithPastAndCrossAttentions is as follows copied from Hugging Face's transformers.modeling_outputs.py:
[Adding an arg 'extract_layer_hidden_states' in the description]
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads,
encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
extract_layer_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``extract_layer is not None`` (i.e, a value is passed) or when ``config.extract_layer has a value other than None``):
Tuple of :obj:`torch.FloatTensor` (one for the output of (each) extract layer) -- currently takes in 1 value of extract_layer
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of extract layer.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
extract_layer_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class HaRTBaseCausalLMOutput(ModelOutput):
"""
Overridden CausalLMOutputWithCrossAttentions to add extract_layer hidden states to the output for AR HuLM model.
The description of CausalLMOutputWithCrossAttentions is as follows copied from Hugging Face's transformers.modeling_outputs.py:
[Adding an arg 'extract_layer_hidden_states' in the description]
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Cross attentions weights after the attention softmax, used to compute the weighted average in the
cross-attention heads.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`torch.FloatTensor` tuples of length :obj:`config.n_layers`, with each tuple containing the
cached key, value states of the self-attention and the cross-attention layers if model is used in
encoder-decoder setting. Only relevant if ``config.is_decoder = True``.
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
extract_layer_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``extract_layer is not None`` (i.e, a value is passed) or when ``config.extract_layer has a value other than None``):
Tuple of :obj:`torch.FloatTensor` (one for the output of (each) extract layer) -- currently takes in 1 value of extract_layer
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of extract layer.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
extract_layer_hidden_states: Optional[Tuple[torch.FloatTensor]] = None | 14,216 | 70.80303 | 215 | py |
HaRT | HaRT-main/src/user_ft_trainer.py | import collections
import numpy as np
import torch
import torch.nn as nn
from packaging import version
from typing import NamedTuple, Any, Dict, List, Optional, Tuple, Union
from torch.utils.data.dataloader import DataLoader
from transformers import Trainer
from transformers.trainer_pt_utils import (
DistributedTensorGatherer,
SequentialDistributedSampler,
nested_detach,
nested_concat
)
from transformers.trainer_utils import PredictionOutput, denumpify_detensorize
from transformers.file_utils import is_torch_tpu_available
from transformers.utils import logging
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
logger = logging.get_logger(__name__)
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (:obj:`np.ndarray`): Predictions of the model.
label_ids (:obj:`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: np.ndarray
user_ids: np.ndarray
class Trainer(Trainer):
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
if self.args.deepspeed and not self.args.do_train:
# no harm, but flagging to the user that deepspeed config is ignored for eval
# flagging only for when --do_train wasn't passed as only then it's redundant
logger.info("Detected the deepspeed argument but it will not be used for evaluation")
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, half it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
users_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
users_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels, users = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
if inputs.get('user_ids') is not None and inputs['user_ids'] is not None:
users_host = users if users_host is None else nested_concat(users_host, users, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
users_gatherer.add_arrays(self._gather_and_numpify(users_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host, users_host = None, None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
users_gatherer.add_arrays(self._gather_and_numpify(users_host, "eval_user_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
user_ids = users_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids, user_ids=user_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
has_users = inputs.get('user_ids') is not None
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
if has_users:
users = nested_detach(tuple(inputs.get(name) for name in ['user_ids']))
if len(users) == 1:
users = users[0]
else:
users = None
with torch.no_grad():
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels, users)
| 11,401 | 44.608 | 124 | py |
HaRT | HaRT-main/src/model_gpt2hlc/finetune_gpt2hlc.py | import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.utils import logging
from transformers import GPT2PreTrainedModel, GPT2Model
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
logger = logging.get_logger(__name__)
class GPT2hlcForSequenceClassification(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.freeze_model = config.freeze_model
self.num_labels = config.num_labels
self.finetuning_task = config.finetuning_task
self.transformer = GPT2Model(config)
if self.finetuning_task=='age':
self.transform = nn.Linear(config.n_embd, config.n_embd)
if not self.freeze_model and not self.finetuning_task=='ope':
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def forward(
self,
input_ids=None,
user_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=True if self.freeze_model else output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs.hidden_states[11] if self.freeze_model else transformer_outputs[0]
if self.freeze_model or self.finetuning_task=='ope' or self.finetuning_task=='user':
logits = self.score(hidden_states)
elif self.finetuning_task=='age':
self.score(self.transform(self.ln_f(hidden_states)))
else:
logits = self.score(self.ln_f(hidden_states))
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
assert (
self.config.pad_token_id is not None or batch_size == 1
), "Cannot handle batch sizes > 1 if no padding token is defined."
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[range(batch_size), sequence_lengths]
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(pooled_logits.view(-1), labels.to(self.dtype).view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| 4,886 | 37.785714 | 119 | py |
HaRT | HaRT-main/src/model_gpt2hlc/gpt2hlcLMhead.py | import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from transformers.utils import logging
from transformers import GPT2PreTrainedModel, GPT2Model
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
logger = logging.get_logger(__name__)
class GPT2hlcLMHeadModel(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
)
# @staticmethod
# def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
# """
# This function is used to re-order the :obj:`past_key_values` cache if
# :meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
# called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
# """
# return tuple(
# tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
# for layer_past in past
# )
| 4,241 | 37.563636 | 118 | py |
HaRT | HaRT-main/src/model/hart.py | import torch
import torch.nn as nn
from src.model.modeling_hart import HaRTBasePreTrainedModel, HaRTBaseLMHeadModel
from src.modeling_outputs import HaRTOutput
from transformers.activations import ACT2FN
""" HaRT model pre-trained for the HuLM task """,
class HistoryMLP(nn.Module):
def __init__(self, n_state, config): # in history MLP: n_state=200
super().__init__()
nx = config.n_embd
self.config = config
self.l_hist = nn.Linear(nx, nx)
self.l_hs = nn.Linear(nx, nx)
self.act = ACT2FN["tanh"]
# self.apply(self._init_weights)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def forward(self, history, hidden_state, sequence_mask):
h1 = self.l_hist(history)
h2 = self.l_hs(hidden_state)
# Fixing the bug where sequence length is -1 when all tokens are padded (i.e. attn_mask is all zeros)
h2 = h2 * sequence_mask
# expand along block_len dimension (1) to allow addition with history
h2 = h2.unsqueeze(1) # [batch_size, 1, embed_dim]
return self.act(h1 + h2) # [batch_size, block_size, embed_dim]
class HaRTPreTrainedModel(HaRTBasePreTrainedModel):
def __init__(self, config, hartbaseLMmodel=None):
super().__init__(config)
self.config = config
inner_dim = config.n_inner if config.n_inner is not None else 200
if hartbaseLMmodel:
self.transformer = hartbaseLMmodel
else:
self.transformer = HaRTBaseLMHeadModel(config)
if config.add_history:
self.history_mlp = HistoryMLP(inner_dim, config)
# Model parallel
self.model_parallel = False
self.device_map = None
def get_last_pred_token_hidden_state(self, hs, attn_mask):
batch_size = attn_mask.shape[0]
# finds the last token that is not a padding token in each row.
sequence_lengths = torch.ne(attn_mask, 0).sum(-1) - 1 # [batch_size]
# selects the indices in sequence_lengths for the respective row in batch, i.e,
# finds the embedding of the last non-padded token (indices from sequence_lengths) in each row
last_pred_token_hs = hs[range(batch_size), sequence_lengths] # [batch_size, embed_dim]
# Fixing the bug where sequence length is -1 when all tokens are padded (i.e. attn_mask is all zeros)
sequence_mask = (sequence_lengths != -1).int()
sequence_mask = sequence_mask.unsqueeze(1)
return last_pred_token_hs, sequence_mask
def forward(
self,
input_ids=None,
history=None,
layer_ins=None,
extract_layer=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
output_block_last_hidden_states=None,
output_block_extract_layer_hs=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
layer_ins = layer_ins if layer_ins else self.config.layer_ins
extract_layer = extract_layer if extract_layer else self.config.extract_layer
usr_seq_len, blocks_len, block_size = input_ids.shape
batch_loss = torch.tensor(0.0).to(self.device)
batch_len = 0
all_blocks_last_hs = () if output_block_last_hidden_states else None
all_blocks_history = ()
all_blocks_attn_mask = ()
all_blocks_extract_layer_hs = ()
for i in range(blocks_len):
block_input_ids = input_ids[:,i,:]
block_attention_mask = attention_mask[:,i,:]
block_labels = labels[:,i,:] if labels is not None else None
arhulm_output = self.transformer(
input_ids=block_input_ids,
history=history,
layer_ins=layer_ins,
extract_layer=extract_layer,
past_key_values=past_key_values,
attention_mask=block_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
labels=block_labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_block_last_hs = arhulm_output.last_hidden_state
if output_block_last_hidden_states:
all_blocks_last_hs = all_blocks_last_hs + (arhulm_output.last_hidden_state,)
extract_layer_hs = arhulm_output["extract_layer_hidden_states"][0] if isinstance(arhulm_output, dict) else arhulm_output[-1][0]
if output_block_extract_layer_hs:
all_blocks_extract_layer_hs = all_blocks_extract_layer_hs + (extract_layer_hs, )
if history is not None:
hs, sequence_mask = self.get_last_pred_token_hidden_state(extract_layer_hs, block_attention_mask)
history = self.history_mlp(history, hs, sequence_mask)
all_blocks_history = all_blocks_history + (history[:, 0, :],)
all_blocks_attn_mask = all_blocks_attn_mask + (sequence_mask, )
if labels is not None:
batch_loss += arhulm_output["loss"] if isinstance(arhulm_output, dict) else arhulm_output[0]
batch_len += len(block_labels[block_labels!= -100])
loss = batch_loss/batch_len if labels is not None else None
last_updated_history = history
history_output = (all_blocks_history, all_blocks_attn_mask)
if not return_dict:
output = (last_block_last_hs, last_block_last_hs,) + arhulm_output[3:]
return ((loss,) + output) if loss is not None else output
return HaRTOutput(
loss=loss,
last_hidden_state=last_block_last_hs,
all_blocks_last_hidden_states = all_blocks_last_hs,
all_blocks_extract_layer_hs = all_blocks_extract_layer_hs,
history=history_output,
past_key_values=arhulm_output.past_key_values,
hidden_states=arhulm_output.hidden_states,
attentions=arhulm_output.attentions,
cross_attentions=arhulm_output.cross_attentions,
)
| 7,289 | 41.138728 | 140 | py |
HaRT | HaRT-main/src/model/finetune_hart.py | import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
from .modeling_hart import HaRTBasePreTrainedModel
from .hart import HaRTPreTrainedModel
class HaRTForSequenceClassification(HaRTBasePreTrainedModel):
# _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config, model_name_or_path=None, pt_model=None):
super().__init__(config)
self.freeze_model = config.freeze_model
self.num_labels = config.num_labels
self.finetuning_task = config.finetuning_task
self.use_history_output = config.use_history_output
self.use_hart_no_hist = config.use_hart_no_hist
if model_name_or_path:
self.transformer = HaRTPreTrainedModel.from_pretrained(model_name_or_path)
elif pt_model:
self.transformer = pt_model
else:
self.transformer = HaRTPreTrainedModel(config)
self.init_weights()
if not self.freeze_model and not self.finetuning_task=='ope' and not self.finetuning_task=='user':
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
if self.finetuning_task=='age':
self.transform = nn.Linear(config.n_embd, config.n_embd)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
# Model parallel
self.model_parallel = False
self.device_map = None
def get_pooled_logits(self, logits, input_ids, inputs_embeds):
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
assert (
self.config.pad_token_id is not None or batch_size == 1
), "Cannot handle batch sizes > 1 if no padding token is defined."
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
# since we want the index of the last predicted token of the last block only.
sequence_lengths = sequence_lengths[:, -1]
else:
sequence_lengths = -1
self.logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
# get the logits corresponding to the indices of the last pred tokens (of the last blocks) of each user
pooled_logits = logits[range(batch_size), sequence_lengths]
return pooled_logits
def forward(
self,
input_ids=None,
user_ids=None,
history=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
history=history,
output_block_last_hidden_states=True,
output_block_extract_layer_hs=True,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
all_blocks_last_hidden_states = transformer_outputs.all_blocks_extract_layer_hs if self.freeze_model else transformer_outputs.all_blocks_last_hidden_states
if self.finetuning_task=='user' or self.finetuning_task=='ope' or self.finetuning_task=='age':
if self.use_history_output:
states = transformer_outputs.history[0]
masks = transformer_outputs.history[1]
multiplied = tuple(l * r for l, r in zip(states, masks))
all_blocks_user_states = torch.stack(multiplied, dim=1)
all_blocks_masks = torch.stack(masks, dim=1)
sum = torch.sum(all_blocks_user_states, dim=1)
divisor = torch.sum(all_blocks_masks, dim=1)
hidden_states = sum/divisor
else:
raise ValueError("Since you don't want to use the user-states/history output for a user-level task, please customize the code as per your requirements.")
else:
hidden_states = torch.stack(all_blocks_last_hidden_states, dim=1)
if self.use_hart_no_hist:
logits = self.score(all_blocks_last_hidden_states[0]) if self.freeze_model else self.score(self.ln_f(all_blocks_last_hidden_states[0]))
batch_size, _, sequence_length = input_ids.shape
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
pooled_logits = logits[range(batch_size), sequence_lengths.squeeze()]
else:
if self.finetuning_task=='ope' or self.finetuning_task=='user' or self.freeze_model:
logits = self.score(hidden_states)
elif self.finetuning_task=='age':
logits = self.score(self.transform(self.ln_f(hidden_states)))
else:
logits = self.score(self.ln_f(hidden_states))
pooled_logits = logits if (user_ids is None or self.use_history_output) else \
self.get_pooled_logits(logits, input_ids, inputs_embeds)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(pooled_logits.view(-1), labels.to(self.dtype).view(-1))
else:
labels = labels.long()
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| 7,350 | 43.283133 | 169 | py |
HaRT | HaRT-main/src/model/configuration_hart.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT-2 configuration """
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"gpt2": "https://huggingface.co/gpt2/resolve/main/config.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/config.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/config.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/config.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/config.json",
}
class HaRTConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a :class:`~transformers.GPT2Model` or a
:class:`~transformers.TFGPT2Model`. It is used to instantiate a GPT-2 model according to the specified arguments,
defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration
to that of the GPT-2 `small <https://huggingface.co/gpt2>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 50257):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.GPT2Model` or
:class:`~transformers.TFGPT2Model`.
add_history (:obj:`bool`, `optional`, defaults to :obj:`False`):
Argument used when using recurrent history, used in the models :class:`~ArHuLM`
Whether or not to use user histoy.
layer_ins (:obj:`int`, `optional`, defaults to 2):
At which layer to insert history if add_history=True.
extract_layer (:obj:`int`, `optional`, defaults to 11):
This layer's hidden states are used in history recurrence computation.
n_positions (:obj:`int`, `optional`, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_ctx (:obj:`int`, `optional`, defaults to 1024):
Dimensionality of the causal mask (usually same as n_positions).
n_embd (:obj:`int`, `optional`, defaults to 768):
Dimensionality of the embeddings and hidden states.
n_layer (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
n_inner (:obj:`int`, `optional`, defaults to None):
Dimensionality of the inner feed-forward layers. :obj:`None` will set it to 4 times n_embd
activation_function (:obj:`str`, `optional`, defaults to :obj:`"gelu"`):
Activation function, to be selected in the list :obj:`["relu", "silu", "gelu", "tanh", "gelu_new"]`.
resid_pdrop (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (:obj:`int`, `optional`, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-5):
The epsilon to use in the layer normalization layers
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
summary_type (:obj:`string`, `optional`, defaults to :obj:`"cls_index"`):
Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
and :class:`~transformers.TFGPT2DoubleHeadsModel`.
Has to be one of the following options:
- :obj:`"last"`: Take the last token hidden state (like XLNet).
- :obj:`"first"`: Take the first token hidden state (like BERT).
- :obj:`"mean"`: Take the mean of all tokens hidden states.
- :obj:`"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- :obj:`"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (:obj:`bool`, `optional`, defaults to :obj:`True`):
Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
and :class:`~transformers.TFGPT2DoubleHeadsModel`.
Whether or not to add a projection after the vector extraction.
summary_activation (:obj:`str`, `optional`):
Argument used when doing sequence summary. Used in for the multiple choice head in
:class:`~transformers.GPT2DoubleHeadsModel`.
Pass :obj:`"tanh"` for a tanh activation to the output, any other value will result in no activation.
summary_proj_to_labels (:obj:`bool`, `optional`, defaults to :obj:`True`):
Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
and :class:`~transformers.TFGPT2DoubleHeadsModel`.
Whether the projection outputs should have :obj:`config.num_labels` or :obj:`config.hidden_size` classes.
summary_first_dropout (:obj:`float`, `optional`, defaults to 0.1):
Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel`
and :class:`~transformers.TFGPT2DoubleHeadsModel`.
The dropout ratio to be used after the projection and activation.
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Example::
>>> from transformers import GPT2Model, GPT2Config
>>> # Initializing a GPT2 configuration
>>> configuration = GPT2Config()
>>> # Initializing a model from the configuration
>>> model = GPT2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
#TODO: add doc for add_history, layer_ins, and extract_layer
model_type = "gpt2"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=50257,
add_history=False,
layer_ins=2,
extract_layer=11,
freeze_model=False,
use_hart_no_hist=False,
n_positions=1024,
n_ctx=1024,
n_embd=768,
n_layer=12,
n_head=12,
n_inner=None,
activation_function="gelu_new",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
summary_type="cls_index",
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
gradient_checkpointing=False,
use_cache=True,
bos_token_id=50256,
eos_token_id=50256,
**kwargs
):
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.add_history = add_history
self.layer_ins = layer_ins
self.extract_layer = extract_layer
self.freeze_model = freeze_model
self.use_hart_no_hist = use_hart_no_hist
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
self.gradient_checkpointing = gradient_checkpointing
self.use_cache = use_cache
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
@property
def max_position_embeddings(self):
return self.n_positions
@property
def hidden_size(self):
return self.n_embd
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
| 9,982 | 46.312796 | 119 | py |
HaRT | HaRT-main/src/model/modeling_hart.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch HaRT model."""
import os
import torch
import torch.nn as nn
import torch.utils.checkpoint
from torch.nn import CrossEntropyLoss
from transformers.activations import ACT2FN
from transformers.modeling_utils import (
Conv1D,
PreTrainedModel,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
from transformers.utils import logging
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
from src.model.configuration_hart import HaRTConfig
from src.modeling_outputs import (
HaRTBaseModelOutput,
HaRTBaseCausalLMOutput,
)
logger = logging.get_logger(__name__)
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False, use_history=False, is_cross_attention=False):
super().__init__()
self.config = config
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer(
"bias", torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx)
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.is_cross_attention = is_cross_attention
self.use_history = use_history
if self.is_cross_attention:
self.c_attn = Conv1D(2 * n_state, nx)
self.q_attn = Conv1D(n_state, nx)
elif self.use_history:
if self.config.use_qh05_wts:
self.c_attn = Conv1D(3 * n_state, nx)
self.qh_attn = Conv1D(n_state, nx * 2)
else:
self.c_attn = Conv1D(3 * n_state, nx)
self.qh_attn = Conv1D(n_state, nx * 2)
self.kv_attn = Conv1D(2 * n_state, nx)
else:
self.c_attn = Conv1D(3 * n_state, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / (float(v.size(-1)) ** 0.5)
nd, ns = w.size(-2), w.size(-1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
mask = self.bias[:, :, ns - nd : ns, :ns]
w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = (torch.matmul(w, v),)
if output_attentions:
outputs += (w,)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(
self,
hidden_states,
history=None,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
if encoder_hidden_states is not None:
assert hasattr(
self, "q_attn"
), "If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`."
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
elif history is not None:
if self.config.use_qh05_wts:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self.qh_attn(torch.cat([query, history], dim=-1))
else:
key, value = self.kv_attn(hidden_states).split(self.split_size, dim=2)
query = self.qh_attn(torch.cat([hidden_states, history], dim=-1))
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key.transpose(-2, -1), value) # transpose to have same shapes
else:
present = None
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return (a, present) + attn_outputs[1:] # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
hidden_size = config.n_embd
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_history:
self.attn = Attention(hidden_size, n_ctx, config, scale, use_history=True)
else:
self.attn = Attention(hidden_size, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = Attention(hidden_size, n_ctx, config, scale, is_cross_attention=True)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = MLP(inner_dim, config)
def forward(
self,
hidden_states,
history=None,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
attn_outputs = self.attn(
self.ln_1(hidden_states),
history=history,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + hidden_states
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
cross_attn_outputs = self.crossattention(
self.ln_cross_attn(hidden_states),
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = hidden_states + attn_output
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))
# residual connection
hidden_states = hidden_states + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions, cross_attentions)
class HaRTBasePreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = HaRTConfig
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
is_parallelizable = True
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
if self.config.use_qh05_wts:
module.weight.data.normal_(mean=0.5, std=self.config.initializer_range)
else:
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
class HaRTBaseModel(HaRTBasePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def parallelize(self, device_map=None):
# Check validity of device_map
self.device_map = (
get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
)
assert_device_map(self.device_map, len(self.h))
self.model_parallel = True
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
self.last_device = "cuda:" + str(max(self.device_map.keys()))
self.wte = self.wte.to(self.first_device)
self.wpe = self.wpe.to(self.first_device)
# Load onto devices
for k, v in self.device_map.items():
for block in v:
cuda_device = "cuda:" + str(k)
self.h[block] = self.h[block].to(cuda_device)
# ln_f to last
self.ln_f = self.ln_f.to(self.last_device)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
self.wte = self.wte.to("cpu")
self.wpe = self.wpe.to("cpu")
for index in range(len(self.h)):
self.h[index] = self.h[index].to("cpu")
self.ln_f = self.ln_f.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(
self,
input_ids=None,
history=None,
layer_ins=None,
extract_layer=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
extract_layer_hs = () if extract_layer else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if extract_layer is not None and i==extract_layer:
extract_layer_hs = extract_layer_hs + (hidden_states,)
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure layer_past is on same device as hidden_states (might not be correct)
if layer_past is not None:
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
history if layer_ins is not None and i==layer_ins-1 else None,
None,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
outputs = block(
hidden_states,
history=history if layer_ins is not None and i==layer_ins-1 else None,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# Add last hidden state if extract layer is last layer
if extract_layer is not None and i==extract_layer-1:
extract_layer_hs = extract_layer_hs + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions, extract_layer_hs] if v is not None)
return HaRTBaseModelOutput(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
extract_layer_hidden_states=extract_layer_hs,
)
"""
The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
"""
class HaRTBaseLMHeadModel(HaRTBasePreTrainedModel):
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = HaRTBaseModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.model_parallel = True
def deparallelize(self):
self.transformer.deparallelize()
self.transformer = self.transformer.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
torch.cuda.empty_cache()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
def forward(
self,
input_ids=None,
history=None,
layer_ins=None,
extract_layer=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
history=history,
layer_ins=layer_ins,
extract_layer=extract_layer,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(reduction='sum')
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return HaRTBaseCausalLMOutput(
loss=loss,
logits=lm_logits,
last_hidden_state=transformer_outputs.last_hidden_state,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
extract_layer_hidden_states=transformer_outputs.extract_layer_hidden_states,
) | 32,108 | 40.538163 | 180 | py |
HaRT | HaRT-main/optuna_trials/run_hulm_hart_trials.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Running Optuna trials for fine-tuning HaRT for human language modeling.
"""
import logging
import copy
import json
import math
import os
import sys
sys.path.insert(1, '/home/nisoni/HaRT/HaRT')
from typing import Dict
from args.clm_args import DataTrainingArguments, ModelArguments
from src.model.hart import HaRTPreTrainedModel
from src.model.modeling_hart import HaRTBaseLMHeadModel
from src.model.configuration_hart import HaRTConfig
from data.utils_optuna_trials.hulm_sample_data_utils import load_dataset
from data.data_collator import DataCollatorWithPaddingForHaRT
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
TrainerCallback,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
class evalLogsCallback(TrainerCallback):
def __init__(self):
super().__init__()
self.metrics = {}
def on_evaluate(self, args, state, control, **kwargs):
metrics = kwargs['metrics']
if control.should_save:
metrics["perplexity"] = math.exp(metrics["eval_loss"])
metrics['trial_params'] = json.dumps(state.trial_params)
self.metrics = metrics.copy()
logger.info(json.dumps(metrics))
def on_save(self, args, state, control, **kwargs):
output_dir = state.best_model_checkpoint.split('/checkpoint')[0]
self.save_metrics('eval_{}'.format(self.metrics['epoch']), self.metrics, output_dir)
logger.info("Saving eval metrics after epoch {} into {}".format(self.metrics['epoch'], output_dir))
def on_train_end(self, args, state, control, **kwargs):
output_dir = state.best_model_checkpoint.split('/checkpoint')[0]
metrics = state.trial_params.copy()
metrics["number_of_gpus"] = args.n_gpu
metrics["best_loss"] = state.best_metric
metrics["best_perplexity"] = math.exp(state.best_metric)
metrics["best_model_checkpoint"] = state.best_model_checkpoint
self.metrics = metrics
self.save_metrics('final', self.metrics, output_dir)
def save_metrics(self, split, metrics, output_dir, combined=True):
path = os.path.join(output_dir, f"{split}_results.json")
with open(path, "w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.instantiate_hart:
config = HaRTConfig()
elif model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.add_history:
config.add_history = True
if model_args.use_qh05_wts:
config.use_qh05_wts = True
else:
config.use_qh05_wts = False
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
def add_insep_token(tokenizer):
special_tokens_dict = {'sep_token': str('<|insep|>')}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
assert num_added_toks == 1
assert tokenizer.sep_token == '<|insep|>'
add_insep_token(tokenizer)
hartbaseLMmodel = HaRTBaseLMHeadModel.from_pretrained(model_args.model_name_or_path, config=config)
hartbaseLMmodel.resize_token_embeddings(len(tokenizer))
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
if data_args.train_table is not None or data_args.dev_table is not None or data_args.test_table is not None:
if data_args.train_table is not None:
train_dataset, train_uncut_blocks = load_dataset(logger, tokenizer, data_args.train_table, block_size, data_args.max_train_blocks, data_args, 'train', data_args.disable_hulm_batching)
if data_args.dev_table is not None:
eval_dataset, eval_uncut_blocks = load_dataset(logger, tokenizer, data_args.dev_table, block_size, data_args.max_val_blocks, data_args, 'dev', data_args.disable_hulm_batching)
elif data_args.test_table is not None:
eval_dataset, eval_uncut_blocks = load_dataset(logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', data_args.disable_hulm_batching)
else:
raise ValueError("This CLM runner requires mysql database tables as train/dev/test data sources currently!")
# Data collator
if model_args.instantiate_hart:
# This one will take care of collating batches of type [users, windows, num_tokens]
data_collator = DataCollatorWithPaddingForHaRT(model_args, config, tokenizer, training_args.deepspeed)
else:
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator = default_data_collator
if model_args.search_params:
def model_init():
# Set seed before initializing model.
set_seed(training_args.seed)
model = HaRTPreTrainedModel(config, hartbaseLMmodel)
model.transformer.resize_token_embeddings(len(tokenizer))
model.resize_token_embeddings(len(tokenizer))
return model
####### NOTE: Ray Hyperparameter search is not extensively tested in this project!! #########
def ray_hp_space(trial):
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"seed": tune.uniform(1, 50),
}
def optuna_hp_space(trial):
return {
"learning_rate": trial.suggest_float("learning_rate", 5e-6, 5e-2, log=True),
# "weight_decay": trial.suggest_float("weight_decay", 0, 1, log=True),
}
def compute_objective(metrics: Dict[str, float]) -> float:
"""
The objective to minimize eval loss.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
return loss
trainer = Trainer(
model_init=model_init,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks=[evalLogsCallback]
)
backend = 'ray' if model_args.use_ray else 'optuna' if model_args.use_optuna else None
hp_space = ray_hp_space if model_args.use_ray else optuna_hp_space if model_args.use_optuna else None
best_trial = trainer.hyperparameter_search(
backend=backend,
hp_space=hp_space,
n_trials=model_args.num_trials,
compute_objective=compute_objective)
else:
raise ValueError("This runner is only for hyperparams search trials!")
def log_and_save_metrics():
metrics = {}
metrics["best_trial_details"] = json.dumps(best_trial) # run_id, loss, hyperparams
metrics["best_trial_perplexity"] = math.exp(best_trial[1])
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
metrics["train_blocks_per_sample"] = train_uncut_blocks if data_args.max_train_blocks is None else min(data_args.max_train_blocks, train_uncut_blocks)
metrics["block_size"] = block_size
metrics["gpus"] = training_args.n_gpu
metrics["total_epochs"] = training_args.num_train_epochs
metrics["per_device_train_batch_size"] = training_args.per_device_train_batch_size
metrics["train_table"] = data_args.train_table
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
metrics["eval_blocks_per_sample"] = eval_uncut_blocks if data_args.max_val_blocks is None else min(data_args.max_val_blocks, eval_uncut_blocks)
metrics["per_device_eval_batch_size"] = training_args.per_device_eval_batch_size
metrics["is_dev"] = True if data_args.dev_table else False
metrics["eval_table"] = data_args.dev_table if data_args.dev_table else data_args.test_table
if model_args.instantiate_hart:
metrics["history"] = model_args.add_history
metrics["extract_layer"] = model_args.extract_layer if model_args.extract_layer else config.extract_layer
metrics["layer_ins"] = model_args.layer_ins if model_args.layer_ins else config.layer_ins
if model_args.add_history:
metrics["0s_initial_history"] = False if model_args.initial_history else True
trainer.log_metrics("trial", metrics)
trainer.save_metrics("trial", metrics, combined=False)
log_and_save_metrics()
if training_args.do_predict:
logger.info("*** Evaluate Test set ***")
eval_dataset, eval_uncut_blocks = load_dataset(logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', data_args.disable_hulm_batching)
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
perplexity = math.exp(metrics["eval_loss"])
metrics["perplexity"] = perplexity
metrics["eval_blocks_per_sample"] = eval_uncut_blocks if data_args.max_val_blocks is None else min(data_args.max_val_blocks, eval_uncut_blocks)
metrics["per_device_eval_batch_size"] = training_args.per_device_eval_batch_size
metrics["eval_table"] = data_args.test_table
trainer.log_metrics("test", metrics)
trainer.save_metrics("test", metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 15,867 | 44.994203 | 195 | py |
HaRT | HaRT-main/optuna_trials/run_ft_gpt2hlc_trials.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Running Optuna trials for fine-tuning GPT-2HLC for sequence classification."""
import logging
import copy
import json
import os
import sys
from typing import Optional, Dict, Callable, Union
import numpy as np
import torch.nn as nn
import transformers
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainerCallback,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.integrations import ( # isort: split
default_hp_search_backend,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
from transformers.trainer_utils import BestRun, get_last_checkpoint, is_main_process, IntervalStrategy, HPSearchBackend
from args.ft_args import DataTrainingArguments, ModelArguments
from src.model_gpt2hlc.finetune_gpt2hlc import GPT2hlcForSequenceClassification
from data.utils_hart.ft_doc_disable_hulm_batching_data_utils import load_dataset as load_no_hulm_dataset
from data.utils_gpt2hlc.ft_user_data_utils_gpt2hlc import load_dataset as load_user_dataset
logger = logging.getLogger(__name__)
class CustomTrainer(Trainer):
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
run_hp_search: Optional[Callable[["optuna.Trial"], BestRun]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search if run_hp_search is not None else run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
class EvalLogsCallback(TrainerCallback):
def __init__(self):
super().__init__()
self.metrics = {}
def on_evaluate(self, args, state, control, **kwargs):
metrics = kwargs['metrics']
if control.should_save:
metrics['trial_params'] = json.dumps(state.trial_params)
self.metrics = metrics.copy()
print(json.dumps(metrics))
def on_save(self, args, state, control, **kwargs):
output_dir = state.best_model_checkpoint.split('/checkpoint')[0]
self.save_metrics('eval_{}'.format(self.metrics['epoch']), self.metrics, output_dir)
print("Saving eval metrics after epoch {} into {}".format(self.metrics['epoch'], output_dir))
def on_train_end(self, args, state, control, **kwargs):
output_dir = state.best_model_checkpoint.split('/checkpoint')[0]
metrics = state.trial_params.copy()
metrics["number_of_gpus"] = args.n_gpu
metrics["best_metric"] = state.best_metric
metrics["best_model_checkpoint"] = state.best_model_checkpoint
self.metrics = metrics
self.save_metrics('final', self.metrics, output_dir)
def save_metrics(self, split, metrics, output_dir, combined=True):
path = os.path.join(output_dir, f"{split}_results.json")
with open(path, "w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
class EarlyStoppingCallback(TrainerCallback):
"""
A :class:`~transformers.TrainerCallback` that handles early stopping.
Args:
early_stopping_patience (:obj:`int`):
Use with :obj:`metric_for_best_model` to stop training when the specified metric worsens for
:obj:`early_stopping_patience` evaluation calls.
early_stopping_threshold(:obj:`float`, `optional`):
Use with TrainingArguments :obj:`metric_for_best_model` and :obj:`early_stopping_patience` to denote how
much the specified metric must improve to satisfy early stopping conditions. `
This callback depends on :class:`~transformers.TrainingArguments` argument `load_best_model_at_end` functionality
to set best_metric in :class:`~transformers.TrainerState`.
"""
def __init__(self, metric_for_early_stopping, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
self.early_stopping_patience = early_stopping_patience
self.early_stopping_threshold = early_stopping_threshold
self.metric_for_early_stopping = metric_for_early_stopping
self.prev_metric_value = None
# early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
self.early_stopping_patience_counter = 0
def check_metric_value(self, args, state, control, metric_value):
#TODO: use args.greater_is_better which is w.r.t. early stopping metric
# operator = np.greater if args.greater_is_better else np.less
operator = np.less
if self.prev_metric_value is None or (
operator(metric_value, self.prev_metric_value)
and abs(metric_value - self.prev_metric_value) >= self.early_stopping_threshold
):
self.early_stopping_patience_counter = 0
else:
self.early_stopping_patience_counter += 1
self.prev_metric_value = metric_value
def on_train_begin(self, args, state, control, **kwargs):
assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True"
assert (
self.metric_for_early_stopping is not None
), "EarlyStoppingCallback requires metric_for_early_stopping to be defined"
assert (
args.evaluation_strategy != IntervalStrategy.NO
), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch"
def on_evaluate(self, args, state, control, metrics, **kwargs):
metric_to_check = self.metric_for_early_stopping
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics.get(metric_to_check)
if metric_value is None:
logger.warning(
f"early stopping required metric_for_early_stopping, but did not find {metric_to_check} so early stopping is disabled"
)
return
self.check_metric_value(args, state, control, metric_value)
if self.early_stopping_patience_counter >= self.early_stopping_patience:
control.should_training_stop = True
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
# model initialization seed maybe different from training seed -- to get a stable averaged result.
set_seed(model_args.init_seed)
# Labels
# It can be a classification or a regression task
num_labels = data_args.num_labels
if num_labels > 1:
is_regression = False # classification task
else:
is_regression = True # regression task
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name if data_args.task_name is not None else data_args.task_type,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer.pad_token = tokenizer.eos_token
config.pad_token_id = tokenizer.pad_token_id
config.freeze_model = model_args.freeze_model
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Preprocessed and padded datasets with labels
def load_dataset(args):
if data_args.task_type=='document':
return load_no_hulm_dataset(*args)
elif data_args.task_type=='user':
return load_user_dataset(*args)
if data_args.train_table is not None or data_args.dev_table is not None or data_args.test_table is not None:
if data_args.train_table is not None:
args = [logger, tokenizer, data_args.train_table, block_size, data_args.max_train_blocks, data_args, 'train', True]
train_dataset, train_uncut_blocks = load_dataset(args)
if data_args.dev_table is not None:
args = [logger, tokenizer, data_args.dev_table, block_size, data_args.max_val_blocks, data_args, 'dev', True]
eval_dataset, eval_uncut_blocks = load_dataset(args)
elif data_args.test_table is not None:
args = [logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', True]
eval_dataset, eval_uncut_blocks = load_dataset(args)
else:
raise ValueError("This FT runner requires mysql database tables as train/dev/test data sources currently!")
def compute_metrics(p: EvalPrediction):
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import scipy
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=-1)
if is_regression:
mse = ((preds - p.label_ids) ** 2).mean().item()
r_pear, p_value = scipy.stats.pearsonr(preds, p.label_ids)
# from https://www.aclweb.org/anthology/W18-0604.pdf
r_meas1 = 0.77
r_meas2 = 0.70
r_dis = r_pear/((r_meas1*r_meas2)**0.5)
return {
'mse': mse,
'r_dis': r_dis,
'r_pear': r_pear,
'p_value': p_value
}
else:
indices = p.label_ids!=-100 # make sure to ignore the labels marked as -100
labels = p.label_ids[indices]
preds = preds[indices]
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall
}
# Data collator
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator = default_data_collator
if model_args.search_params:
def model_init():
set_seed(model_args.init_seed)
model = GPT2hlcForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
set_seed(training_args.seed)
def freeze_params(model: nn.Module):
for par in model.parameters():
par.requires_grad = False
if model_args.freeze_model:
freeze_params(model.transformer)
if data_args.task_type=='user':
modules = [
model.transformer.wte,
model.transformer.wpe,
model.transformer.drop,
model.transformer.h[:10],
model.transformer.ln_f
]
for x in modules:
freeze_params(x)
return model
####### NOTE: Ray Hyperparameter search is not extensively tested in this project!! #########
def ray_hp_space(trial):
from ray import tune
return {
"learning_rate": tune.loguniform(5e-6, 5e-4),
}
def stance_optuna_hp_space(trial):
return {
"learning_rate": trial.suggest_float("learning_rate", 5e-6, 5e-4, log=True),
# "weight_decay": trial.suggest_float("weight_decay", 0.0, 1.0, log=False),
}
def sent_optuna_hp_space(trial):
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-7, 1e-5, log=True),
# "weight_decay": trial.suggest_float("weight_decay", 0.0, 1.0, log=False),
}
def doc_optuna_hp_space(trial):
return {
"learning_rate": trial.suggest_float("learning_rate", 5e-6, 5e-4, log=True),
# "weight_decay": trial.suggest_float("weight_decay", 0.0, 1.0, log=False),
}
def compute_objective(metrics: Dict[str, float]) -> float:
"""
The objective to minimize/maximize.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
f1 = metrics.pop(training_args.metric_for_best_model, None)
return f1
def run_hp_search(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
import optuna
def _objective(trial, checkpoint_dir=None):
checkpoint = None
if checkpoint_dir:
for subdir in os.listdir(checkpoint_dir):
if subdir.startswith(PREFIX_CHECKPOINT_DIR):
checkpoint = os.path.join(checkpoint_dir, subdir)
trainer.objective = None
trainer.train(resume_from_checkpoint=checkpoint, trial=trial)
# If there hasn't been any evaluation during the training loop.
if getattr(trainer, "objective", None) is None:
metrics = trainer.evaluate()
trainer.objective = trainer.compute_objective(metrics)
output_dir = trainer.state.best_model_checkpoint.split('/checkpoint')[0]
# TODO: see if can get the best model from best model checkpoint instead of saving
# if yes, use HF trainer instead of CustomTrainer and remove run_hp_search code.
trainer.save_model(output_dir=output_dir)
return trainer.objective
timeout = kwargs.pop("timeout", None)
n_jobs = kwargs.pop("n_jobs", 1)
study = optuna.create_study(direction=direction, **kwargs)
study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs)
best_trial = study.best_trial
return BestRun(str(best_trial.number), best_trial.value, best_trial.params)
# Initialize our Trainer
trainer = CustomTrainer(
model_init=model_init,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks=[
EvalLogsCallback,
EarlyStoppingCallback(
model_args.metric_for_early_stopping,
model_args.early_stopping_patience,
model_args.early_stopping_threshold
)
]
)
backend = 'ray' if model_args.use_ray else 'optuna' if model_args.use_optuna else None
optuna_hp_space = stance_optuna_hp_space if data_args.task_name=='stance' else sent_optuna_hp_space if data_args.task_name=='sentiment' else doc_optuna_hp_space
hp_space = ray_hp_space if model_args.use_ray else optuna_hp_space if model_args.use_optuna else None
best_trial = trainer.hyperparameter_search(
backend=backend,
hp_space=hp_space,
run_hp_search=run_hp_search,
n_trials=model_args.num_trials,
compute_objective=compute_objective,
direction='maximize')
else:
raise ValueError("This runner is only for hyperparams search trials!")
def log_and_save_metrics():
metrics = {}
metrics['pretrained_model_loc'] = model_args.model_name_or_path
metrics["best_trial_details"] = json.dumps(best_trial) # run_id, f1, hyperparams
metrics["best_trial_f1"] = best_trial.objective
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
metrics["train_blocks_per_sample"] = train_uncut_blocks if data_args.max_train_blocks is None else min(data_args.max_train_blocks, train_uncut_blocks)
metrics["block_size"] = block_size
metrics["gpus"] = training_args.n_gpu
metrics["total_epochs"] = training_args.num_train_epochs
metrics["per_device_train_batch_size"] = training_args.per_device_train_batch_size
metrics["train_table"] = data_args.train_table
max_val_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
metrics["eval_blocks_per_sample"] = eval_uncut_blocks if data_args.max_val_blocks is None else min(data_args.max_val_blocks, eval_uncut_blocks)
metrics["per_device_eval_batch_size"] = training_args.per_device_eval_batch_size
metrics["is_dev"] = True if data_args.dev_table else False
metrics["eval_table"] = data_args.dev_table if data_args.dev_table else data_args.test_table
trainer.log_metrics("trial", metrics)
trainer.save_metrics("trial", metrics, combined=False)
log_and_save_metrics()
# Evaluation
if training_args.do_predict:
trainer.model = trainer.model.from_pretrained(training_args.output_dir + '/run-' + best_trial.run_id)
trainer.model = trainer.model.to(training_args.device)
trainer.save_model()
args = [logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', True]
eval_dataset, eval_uncut_blocks = load_dataset(args)
logger.info("*** Evaluate all test set ***")
eval_test(best_trial, 'test', data_args, training_args, eval_dataset, eval_uncut_blocks, trainer)
def eval_test(best_trial, test_type, data_args, training_args, eval_dataset, eval_uncut_blocks, trainer):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
metrics['best_trial_objective'] = best_trial.objective
metrics['best_trial_run_id'] = best_trial.run_id
metrics['best_trial_hyperparams'] = json.dumps(best_trial.hyperparameters)
max_eval_samples = (
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
)
metrics["test_samples"] = min(max_eval_samples, len(eval_dataset))
metrics["test_blocks_per_sample"] = eval_uncut_blocks if data_args.max_val_blocks is None else min(data_args.max_val_blocks, eval_uncut_blocks)
metrics["per_device_test_batch_size"] = training_args.per_device_eval_batch_size
metrics["test_table"] = data_args.test_table
trainer.log_metrics("eval_{}".format(test_type), metrics)
trainer.save_metrics("eval_{}".format(test_type), metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 28,278 | 46.688027 | 168 | py |
HaRT | HaRT-main/optuna_trials/run_ft_hart_trials.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Runnig Optuna trials for fine-tuning HaRT for sequence classification."""
import logging
import copy
import json
import os
import sys
dirname = os.path.dirname(__file__)
sys.path.insert(1, os.path.join(dirname,'..'))
from typing import Optional, Dict, Callable, Union
import numpy as np
import torch.nn as nn
import transformers
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainerCallback,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.integrations import ( # isort: split
default_hp_search_backend,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
from transformers.trainer_utils import BestRun, get_last_checkpoint, is_main_process, IntervalStrategy, HPSearchBackend
from args.ft_args import DataTrainingArguments, ModelArguments
from src.model.configuration_hart import HaRTConfig
from src.model.modeling_hart import HaRTBaseLMHeadModel
from src.model.hart import HaRTPreTrainedModel
from src.model.finetune_hart import HaRTForSequenceClassification
from data.utils_hart.ft_doc_disable_hulm_batching_data_utils import load_dataset as load_no_hulm_dataset
from data.utils_hart.ft_doc_data_utils import load_dataset as load_doc_dataset
from data.utils_hart.ft_user_data_utils import load_dataset as load_user_dataset
from data.data_collator import DataCollatorWithPaddingForHaRT
logger = logging.getLogger(__name__)
class CustomTrainer(Trainer):
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
run_hp_search: Optional[Callable[["optuna.Trial"], BestRun]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search if run_hp_search is not None else run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
class EvalLogsCallback(TrainerCallback):
def __init__(self):
super().__init__()
self.metrics = {}
def on_evaluate(self, args, state, control, **kwargs):
metrics = kwargs['metrics']
if control.should_save:
metrics['trial_params'] = json.dumps(state.trial_params)
self.metrics = metrics.copy()
print(json.dumps(metrics))
def on_save(self, args, state, control, **kwargs):
output_dir = state.best_model_checkpoint.split('/checkpoint')[0]
self.save_metrics('eval_{}'.format(self.metrics['epoch']), self.metrics, output_dir)
print("Saving eval metrics after epoch {} into {}".format(self.metrics['epoch'], output_dir))
def on_train_end(self, args, state, control, **kwargs):
output_dir = state.best_model_checkpoint.split('/checkpoint')[0]
metrics = state.trial_params.copy()
metrics["number_of_gpus"] = args.n_gpu
metrics["best_metric"] = state.best_metric
metrics["best_model_checkpoint"] = state.best_model_checkpoint
self.metrics = metrics
self.save_metrics('final', self.metrics, output_dir)
def save_metrics(self, split, metrics, output_dir, combined=True):
path = os.path.join(output_dir, f"{split}_results.json")
with open(path, "w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
class EarlyStoppingCallback(TrainerCallback):
"""
A :class:`~transformers.TrainerCallback` that handles early stopping.
Args:
early_stopping_patience (:obj:`int`):
Use with :obj:`metric_for_best_model` to stop training when the specified metric worsens for
:obj:`early_stopping_patience` evaluation calls.
early_stopping_threshold(:obj:`float`, `optional`):
Use with TrainingArguments :obj:`metric_for_best_model` and :obj:`early_stopping_patience` to denote how
much the specified metric must improve to satisfy early stopping conditions. `
This callback depends on :class:`~transformers.TrainingArguments` argument `load_best_model_at_end` functionality
to set best_metric in :class:`~transformers.TrainerState`.
"""
def __init__(self, metric_for_early_stopping, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
self.early_stopping_patience = early_stopping_patience
self.early_stopping_threshold = early_stopping_threshold
self.metric_for_early_stopping = metric_for_early_stopping
self.prev_metric_value = None
# early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
self.early_stopping_patience_counter = 0
def check_metric_value(self, args, state, control, metric_value):
#TODO: use args.greater_is_better which is w.r.t. early stopping metric
# operator = np.greater if args.greater_is_better else np.less
operator = np.less
if self.prev_metric_value is None or (
operator(metric_value, self.prev_metric_value)
and abs(metric_value - self.prev_metric_value) >= self.early_stopping_threshold
):
self.early_stopping_patience_counter = 0
else:
self.early_stopping_patience_counter += 1
self.prev_metric_value = metric_value
def on_train_begin(self, args, state, control, **kwargs):
assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True"
assert (
self.metric_for_early_stopping is not None
), "EarlyStoppingCallback requires metric_for_early_stopping to be defined"
assert (
args.evaluation_strategy != IntervalStrategy.NO
), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch"
def on_evaluate(self, args, state, control, metrics, **kwargs):
metric_to_check = self.metric_for_early_stopping
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics.get(metric_to_check)
if metric_value is None:
logger.warning(
f"early stopping required metric_for_early_stopping, but did not find {metric_to_check} so early stopping is disabled"
)
return
self.check_metric_value(args, state, control, metric_value)
if self.early_stopping_patience_counter >= self.early_stopping_patience:
control.should_training_stop = True
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
# model initialization seed maybe different from training seed -- to get a stable averaged result.
set_seed(model_args.init_seed)
# Labels
# It can be a classification or a regression task
num_labels = data_args.num_labels
if num_labels > 1:
is_regression = False # classification task
else:
is_regression = True # regression task
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.load_non_PT_hulm_model:
config = HaRTConfig(
num_labels=num_labels,
finetuning_task=data_args.task_name if data_args.task_name is not None else data_args.task_type,
use_history_output=data_args.use_history_output
)
if model_args.add_history:
config.add_history = True
if model_args.use_qh05_wts:
config.use_qh05_wts = True
else:
config.use_qh05_wts = False
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
def add_insep_token(tokenizer):
special_tokens_dict = {'sep_token': str('<|insep|>')}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
assert num_added_toks == 1
assert tokenizer.sep_token == '<|insep|>'
add_insep_token(tokenizer)
else:
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name if data_args.task_name is not None else data_args.task_type,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.use_history_output=data_args.use_history_output
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config.pad_token_id = tokenizer.eos_token_id
config.freeze_model = model_args.freeze_model
config.use_hart_no_hist = model_args.use_hart_no_hist
if training_args.do_train and not model_args.load_non_PT_hulm_model:
hart = HaRTPreTrainedModel.from_pretrained(model_args.model_name_or_path)
elif training_args.do_train and model_args.load_non_PT_hulm_model:
hartbaseLMModel = HaRTBaseLMHeadModel.from_pretrained(model_args.model_name_or_path, config=config)
hartbaseLMModel.resize_token_embeddings(len(tokenizer))
hart = HaRTPreTrainedModel(config, hartbaseLMModel)
else:
raise ValueError("You're neither training nor evaluating. Can't pick a model because I don't know what do you want to do.")
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warn(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Preprocessed and padded datasets with labels
def load_dataset(args):
if data_args.task_type=='document':
if model_args.use_hart_no_hist:
return load_no_hulm_dataset(*args)
else:
return load_doc_dataset(*args)
elif data_args.task_type=='user':
return load_user_dataset(*args)
if data_args.train_table is not None or data_args.dev_table is not None or data_args.test_table is not None:
if data_args.train_table is not None:
args = [logger, tokenizer, data_args.train_table, block_size, data_args.max_train_blocks, data_args, 'train', data_args.disable_hulm_batching]
train_dataset, train_uncut_blocks = load_dataset(args)
if data_args.dev_table is not None:
args = [logger, tokenizer, data_args.dev_table, block_size, data_args.max_val_blocks, data_args, 'dev', data_args.disable_hulm_batching]
eval_dataset, eval_uncut_blocks = load_dataset(args)
elif data_args.test_table is not None:
args = [logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', data_args.disable_hulm_batching]
eval_dataset, eval_uncut_blocks = load_dataset(args)
else:
raise ValueError("This FT runner requires train/dev/test data source paths currently!")
def compute_metrics(p: EvalPrediction):
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import scipy
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=-1)
if is_regression:
mse = ((preds - p.label_ids) ** 2).mean().item()
r_pear, p_value = scipy.stats.pearsonr(preds, p.label_ids)
# from https://www.aclweb.org/anthology/W18-0604.pdf
r_meas1 = 0.77
r_meas2 = 0.70
r_dis = r_pear/((r_meas1*r_meas2)**0.5)
return {
'mse': mse,
'r_dis': r_dis,
'r_pear': r_pear,
'p_value': p_value
}
else:
indices = p.label_ids!=-100 # make sure to ignore the labels marked as -100
labels = p.label_ids[indices]
if not model_args.use_hart_no_hist:
preds = preds[indices]
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall
}
# Data collator
if not data_args.disable_hulm_batching:
# This one will take care of collating batches of type [users, blocks, block_size]
data_collator = DataCollatorWithPaddingForHaRT(model_args, config, tokenizer, is_ft=True, is_user_level_ft=data_args.task_type=='user')
else:
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator = default_data_collator
if model_args.search_params:
def model_init():
set_seed(model_args.init_seed)
if training_args.do_train and not model_args.load_non_PT_hulm_model:
model = HaRTForSequenceClassification(config, model_args.model_name_or_path)
else:
model = HaRTForSequenceClassification(config, pt_model=hart)
set_seed(training_args.seed)
def freeze_params(model: nn.Module):
for par in model.parameters():
par.requires_grad = False
if model_args.freeze_model:
freeze_params(model.transformer)
if data_args.task_type=='user':
freeze_params(model.transformer.transformer)
return model
####### NOTE: Ray Hyperparameter search is not extensively tested in this project!! #########
def ray_hp_space(trial):
from ray import tune
return {
"learning_rate": tune.loguniform(5e-6, 5e-4),
}
def stance_optuna_hp_space(trial):
return {
"learning_rate": trial.suggest_float("learning_rate", 5e-6, 5e-4, log=True),
# "weight_decay": trial.suggest_float("weight_decay", 0.0, 1.0, log=False),
}
def sent_optuna_hp_space(trial):
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-7, 1e-5, log=True),
# "weight_decay": trial.suggest_float("weight_decay", 0.0, 1.0, log=False),
}
def doc_optuna_hp_space(trial):
return {
"learning_rate": trial.suggest_float("learning_rate", 5e-6, 5e-4, log=True),
# "weight_decay": trial.suggest_float("weight_decay", 0.0, 1.0, log=False),
}
def compute_objective(metrics: Dict[str, float]) -> float:
"""
The objective to minimize/maximize.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
f1 = metrics.pop(training_args.metric_for_best_model, None)
return f1
def run_hp_search(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
import optuna
def _objective(trial, checkpoint_dir=None):
checkpoint = None
if checkpoint_dir:
for subdir in os.listdir(checkpoint_dir):
if subdir.startswith(PREFIX_CHECKPOINT_DIR):
checkpoint = os.path.join(checkpoint_dir, subdir)
trainer.objective = None
trainer.train(resume_from_checkpoint=checkpoint, trial=trial)
# If there hasn't been any evaluation during the training loop.
if getattr(trainer, "objective", None) is None:
metrics = trainer.evaluate()
trainer.objective = trainer.compute_objective(metrics)
output_dir = trainer.state.best_model_checkpoint.split('/checkpoint')[0]
# TODO: see if can get the best model from best model checkpoint instead of saving
# if yes, use HF trainer instead of CustomTrainer and remove run_hp_search code.
trainer.save_model(output_dir=output_dir)
return trainer.objective
timeout = kwargs.pop("timeout", None)
n_jobs = kwargs.pop("n_jobs", 1)
study = optuna.create_study(direction=direction, **kwargs)
study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs)
best_trial = study.best_trial
return BestRun(str(best_trial.number), best_trial.value, best_trial.params)
# Initialize our Trainer
trainer = CustomTrainer(
model_init=model_init,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks=[
EvalLogsCallback,
EarlyStoppingCallback(
model_args.metric_for_early_stopping,
model_args.early_stopping_patience,
model_args.early_stopping_threshold
)
]
)
backend = 'ray' if model_args.use_ray else 'optuna' if model_args.use_optuna else None
optuna_hp_space = stance_optuna_hp_space if data_args.task_name=='stance' else sent_optuna_hp_space if data_args.task_name=='sentiment' else doc_optuna_hp_space
hp_space = ray_hp_space if model_args.use_ray else optuna_hp_space if model_args.use_optuna else None
best_trial = trainer.hyperparameter_search(
backend=backend,
hp_space=hp_space,
run_hp_search=run_hp_search,
n_trials=model_args.num_trials,
compute_objective=compute_objective,
direction='maximize')
else:
raise ValueError("This runner is only for hyperparams search trials!")
def log_and_save_metrics():
metrics = {}
metrics['pretrained_model_loc'] = model_args.model_name_or_path
metrics["best_trial_details"] = json.dumps(best_trial) # run_id, f1, hyperparams
metrics["best_trial_f1"] = best_trial.objective
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
metrics["train_blocks_per_sample"] = train_uncut_blocks if data_args.max_train_blocks is None else min(data_args.max_train_blocks, train_uncut_blocks)
metrics["block_size"] = block_size
metrics["gpus"] = training_args.n_gpu
metrics["total_epochs"] = training_args.num_train_epochs
metrics["per_device_train_batch_size"] = training_args.per_device_train_batch_size
metrics["train_table"] = data_args.train_table
max_val_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
metrics["eval_blocks_per_sample"] = eval_uncut_blocks if data_args.max_val_blocks is None else min(data_args.max_val_blocks, eval_uncut_blocks)
metrics["per_device_eval_batch_size"] = training_args.per_device_eval_batch_size
metrics["is_dev"] = True if data_args.dev_table else False
metrics["eval_table"] = data_args.dev_table if data_args.dev_table else data_args.test_table
if config.add_history:
metrics["history"] = model_args.add_history
metrics["extract_layer"] = config.extract_layer
metrics["layer_ins"] = config.layer_ins
if model_args.add_history:
metrics["0s_initial_history"] = False if model_args.initial_history else True
trainer.log_metrics("trial", metrics)
trainer.save_metrics("trial", metrics, combined=False)
log_and_save_metrics()
# Evaluation
if training_args.do_predict:
trainer.model = trainer.model.from_pretrained(training_args.output_dir + '/run-' + best_trial.run_id)
trainer.model = trainer.model.to(training_args.device)
trainer.save_model()
args = [logger, tokenizer, data_args.test_table, block_size, data_args.max_val_blocks, data_args, 'test', data_args.disable_hulm_batching]
eval_dataset, eval_uncut_blocks = load_dataset(args)
logger.info("*** Evaluate all test set ***")
eval_test(best_trial, 'test', data_args, training_args, eval_dataset, eval_uncut_blocks, trainer)
def eval_test(best_trial, test_type, data_args, training_args, eval_dataset, eval_uncut_blocks, trainer):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
metrics['best_trial_objective'] = best_trial.objective
metrics['best_trial_run_id'] = best_trial.run_id
metrics['best_trial_hyperparams'] = json.dumps(best_trial.hyperparameters)
max_eval_samples = (
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
)
metrics["test_samples"] = min(max_eval_samples, len(eval_dataset))
metrics["test_blocks_per_sample"] = eval_uncut_blocks if data_args.max_val_blocks is None else min(data_args.max_val_blocks, eval_uncut_blocks)
metrics["per_device_test_batch_size"] = training_args.per_device_eval_batch_size
metrics["test_table"] = data_args.test_table
trainer.log_metrics("eval_{}".format(test_type), metrics)
trainer.save_metrics("eval_{}".format(test_type), metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 31,029 | 47.408736 | 168 | py |
HaRT | HaRT-main/HFtrainer_v451_for_reference/trainer.py | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import gc
import inspect
import math
import os
import re
import shutil
import sys
import time
import warnings
from logging import StreamHandler
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_distributed_available,
is_torch_tpu_available,
is_training_run_on_sagemaker,
)
from .modeling_utils import PreTrainedModel, unwrap_model
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
import fairscale
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if version.parse(fairscale.__version__) >= version.parse("0.3"):
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.wrap import auto_wrap
else:
FullyShardedDDP = None
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_training_run_on_sagemaker():
logging.add_handler(StreamHandler(sys.stdout))
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or (args.deepspeed and args.do_train)
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
self._signature_columns = None
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_ddp is not None else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"])
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Build the sampler.
if self.args.group_by_length:
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
model_input_name=model_input_name,
)
else:
if self.args.world_size <= 1:
return RandomSampler(self.train_dataset)
elif self.args.parallel_mode == ParallelMode.TPU and not self.args.dataloader_drop_last:
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
)
else:
return DistributedSampler(
self.train_dataset, num_replicas=self.args.world_size, rank=self.args.process_index
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset does not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_distributed_available():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
self.is_in_train = True
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(self.args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({self.args.output_dir})")
if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(resume_from_checkpoint)
model_reloaded = True
else:
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if train_dataset_is_sized
else self.args.max_steps * self.args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if (
((step + 1) % self.args.gradient_accumulation_steps != 0)
and self.args.local_rank != -1
and self.args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif self.args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
if self.deepspeed:
# free up any memory that might be useful for eval
self.deepspeed = None
self.optimizer = None
self.lr_scheduler = None
self.model_wrapped = self.model
gc.collect() # force memory release
# to restore normal behavior outside of train replay the place_model_on_device logic w/o deepspeed
self.place_model_on_device = self.args.place_model_on_device
if self.is_model_parallel:
self.place_model_on_device = False
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or dist.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
if self.args.deepspeed and not self.args.do_train:
# no harm, but flagging to the user that deepspeed config is ignored for eval
# flagging only for when --do_train wasn't passed as only then it's redundant
logger.info("Detected the deepspeed argument but it will not be used for evaluation")
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, half it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
| 93,217 | 46.415056 | 190 | py |
HaRT | HaRT-main/data/data_collator.py | import torch
from typing import Dict, List
from dataclasses import dataclass
from transformers import BatchEncoding
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
@dataclass
class DataCollatorWithPaddingForHaRT:
"""
Data collator that simply collates batches of lists of dict-like objects
and adds padding where none.
Also, sets other model inputs if passed in args.
"""
def __init__(self, model_args, config, tokenizer: PreTrainedTokenizerBase, deepspeed=False, is_ft=False, is_user_level_ft=False):
self.is_ft = is_ft
self.is_user_level_ft = is_user_level_ft
self.tokenizer = tokenizer
self.output_block_last_hidden_states = None if is_ft else model_args.output_block_last_hidden_states
if model_args.add_history or config.add_history:
self.history = torch.load(model_args.initial_history) if model_args.initial_history else (torch.zeros(config.n_embd))
self.history = self.history.to(torch.float16) if deepspeed else self.history.float()
if not is_ft:
self.layer_ins = model_args.layer_ins if model_args.layer_ins else config.layer_ins
self.extract_layer = model_args.extract_layer if model_args.extract_layer else config.extract_layer
else:
self.history = None
self.layer_ins = None
self.extract_layer = None
def __call__(self, examples: List[List[Dict[str, List]]]) -> List[Dict[str, torch.Tensor]]:
# In this function we'll make the assumption that all `examples` in the batch of lists
# have the same attributes.
# So we will look at the first element as a proxy for what attributes exist
# in the whole batch of lists
if not isinstance(examples[0], list) or \
(not self.is_user_level_ft and not isinstance(examples[0][0], (dict, BatchEncoding))) or \
(self.is_user_level_ft and not isinstance(examples[0][2], (dict, BatchEncoding))):
raise ValueError("You landed on an incorrect collator! This one's AR_HuLM specific.")
first = examples[0][2] if self.is_user_level_ft else examples[0][0]
batch = {}
if self.is_user_level_ft:
batch['user_ids'] = torch.tensor([
example[0]
for example in examples
])
batch['labels'] = torch.tensor([
example[1]
for example in examples
])
# we do this to map it to the examples format as received when not user_level_ft,
# in order to reuse the rest of the following code for data collation
blocks = [example[2:] for example in examples]
examples = blocks
# Handling all possible keys as figured from the first element
for k, v in first.items():
if k not in ("input_ids", "attention_mask", "labels"):
raise ValueError("You've landed at an incorrect collator! This one's specific to AR_HuLM.")
if v is not None and not isinstance(v, str):
pad = self.tokenizer.eos_token_id if k=='input_ids' else 0 if k=='attention_mask' else -100
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([
[block[k] if block is not None
else ([pad]*len(v))
for block in example]
for example in examples])
else:
# Running through each example (i.e., each user of the batch, each user will have multiple blocks of words)
batch[k] = torch.tensor([
[block[k] if block is not None
else ([pad]*len(v))
for block in example]
for example in examples
])
block_size = len(first['input_ids'])
batch['history'] = None if self.history is None else self.history.repeat(len(examples), block_size, 1)
if not self.is_ft:
batch['layer_ins'] = self.layer_ins
batch['extract_layer'] = self.extract_layer
batch['output_block_last_hidden_states'] = self.output_block_last_hidden_states
return batch
def user_default_data_collator(examples: List[List[Dict[str, List]]]) -> List[Dict[str, torch.Tensor]]:
"""
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- ``labels``: handles a single value (int or float) per object
- ``user_id``: handles a single value per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model.
"""
# In this function we'll make the assumption that all `examples` in the batch of lists
# have the same attributes.
# So we will look at the first element as a proxy for what attributes exist
# in the whole batch of lists
if not isinstance(examples[0], list) or \
not isinstance(examples[0][2], (dict, BatchEncoding)):
raise ValueError("You landed on an incorrect collator! This one's AR_HuLM specific.")
return
first = examples[0][2]
batch = {}
batch['user_ids'] = torch.tensor([
example[0]
for example in examples
])
batch['labels'] = torch.tensor([
example[1]
for example in examples
])
# we do this to map it to the examples format as received when not user_level_ft,
# in order to reuse the rest of the following code for data collation
blocks = [example[2:] for example in examples]
examples = blocks
# Handling all possible keys as figured from the first element
for k, v in first.items():
if k not in ("input_ids", "attention_mask", "labels"):
raise ValueError("You've landed at an incorrect collator! This one's specific to AR_HuLM.")
return
if v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([be[k] for example in examples for be in example])
else:
# Running through each example (i.e., each user of the batch, each user will have multiple blocks of words)
batch[k] = torch.tensor([be[k] for example in examples for be in example])
return batch | 7,177 | 48.847222 | 133 | py |
HaRT | HaRT-main/data/utils_gpt2hlc/clm_data_utils.py | import time
import copy
import pandas as pd
from more_itertools import split_at
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from transformers import BatchEncoding
def add_insep_token(tokenizer):
special_tokens_dict = {'sep_token': str('<|insep|>')}
tokenizer.add_special_tokens(special_tokens_dict)
def get_conn(data_args):
myDB = URL(drivername='mysql', host=data_args.hostname,
database=data_args.db, query={'read_default_file': '~/.my.cnf', 'charset': 'utf8mb4'})
engine = create_engine(myDB, encoding='latin1')
conn = engine.connect()
return conn
def get_data(logger, table, data_args, data_type):
logger.info("Getting data from table:{} in {} database".format(table, data_args.db))
conn = get_conn(data_args)
select_clause = 'select user_dataset_id, message_id, message, updated_time from ' + table
order_clause = ' order by user_dataset_id, updated_time'
limit_clause = '' #if not __debug__ else ' limit 10'
if data_type=='train':
if "en_non_oosmsgs" in table:
dev_filter_column = 'is_oosusr_dev'
test_filter_column = 'is_oosusr_test'
where_clause = ' where ' + dev_filter_column + '=0' + ' and ' + test_filter_column + '=0'
stmt = select_clause + where_clause + order_clause + limit_clause
else:
stmt = select_clause + order_clause + limit_clause
results = conn.execute(stmt)
elif data_type=='dev':
if 'en_non_oosmsgs' in table:
filter_column = 'is_oosusr_dev'
where_clause = ' where ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
elif 'en_oosmsgs' in table:
filter_column = 'is_oosmsg_dev'
where_clause = ' where ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
else:
stmt = select_clause + order_clause + limit_clause
results = conn.execute(stmt)
elif data_type=='test':
if 'en_non_oosmsgs' in table:
filter_column = 'is_oosusr_test'
where_clause = ' where ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
elif 'en_oosmsgs' in table:
filter_column = 'is_oosmsg_test'
where_clause = ' where ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
results = conn.execute(stmt)
data = pd.DataFrame(results.fetchall())
data.columns = results.keys()
data = data[data.message.notnull()]
conn.close()
return data
def append_insep(data, tokenizer):
data['message'] = data['message'] + tokenizer.sep_token
def concat(data):
return data.groupby('user_dataset_id')['message'].apply(''.join).reset_index()
def process_data(data, tokenizer, block_size, max_blocks):
def tokenize(data):
return tokenizer(data)
def pad(data, pad_value):
multiplier = (block_size - len(data))%block_size
data.extend([pad_value]*multiplier)
return data
def pad_message(data, pad_value, counter=0):
for i,x in enumerate(data):
if len(x) > block_size:
x = x[0:block_size]
data[i] = x
counter+=1
else:
x.extend([pad_value]*(block_size-len(x)))
return data, counter
def limit_and_split_messages(data):
i_values = data['input_ids'][0:1024*max_blocks] if max_blocks is not None else data['input_ids']
i_values = list(split_at(i_values, lambda x:x==tokenizer.eos_token_id))[0]
i_values = i_values[:-1] if i_values[-1]==tokenizer.sep_token_id else i_values
i_values = list(split_at(i_values, lambda x:x==tokenizer.sep_token_id))
return i_values
def pad_and_collate_data(data, counter):
i_values = data
a_values = [[1]*len(x) for x in i_values]
l_values = copy.deepcopy(i_values)
i_values, counter = pad_message(i_values, tokenizer.eos_token_id, counter)
a_values, _ = pad_message(a_values, 0)
l_values, _ = pad_message(l_values, -100)
return [BatchEncoding(dict(input_ids = i_values[x],
attention_mask=a_values[x], labels = l_values[x]))
for x in range(len(i_values))], counter
def process(data):
counter = 0
tokenized = tokenize(data)
tokenized['input_ids'] = pad(tokenized['input_ids'], tokenizer.eos_token_id)
input_ids = limit_and_split_messages(tokenized)
ret_data, counter = pad_and_collate_data(input_ids, counter)
return ret_data
data['batch_encodings'] = data['message'].apply(process)
def transform_data(logger, tokenizer, data, block_size, max_blocks):
start_time = time.time()
data_new = data[['user_dataset_id', 'message']].copy()
append_insep(data_new, tokenizer)
data_new = concat(data_new)
process_data(data_new, tokenizer, block_size, max_blocks)
logger.info("--- %s seconds ---" % (time.time() - start_time))
return data_new
def group_data(data, logger):
batch = pd.DataFrame(data.batch_encodings.tolist())
batch = batch.stack()
logger.info('************** Total Number of instances = {} *************'.format(len(batch)))
return batch.to_numpy().tolist()
def load_dataset(logger, tokenizer, table, block_size, max_blocks, data_args, data_type):
add_insep_token(tokenizer)
data = get_data(logger, table, data_args, data_type)
data = transform_data(logger, tokenizer, data, block_size, max_blocks)
logger.info('************** Block size = {} *************'.format(block_size))
instances = group_data(data, logger)
return instances, len(instances)
| 5,982 | 39.154362 | 104 | py |
HaRT | HaRT-main/data/utils_gpt2hlc/ft_user_data_utils_gpt2hlc.py | import time
import copy
import pandas as pd
from more_itertools import split_at
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from transformers import BatchEncoding
def add_insep_token(tokenizer):
special_tokens_dict = {'sep_token': str('<|insep|>')}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
def get_conn(data_args):
myDB = URL(drivername='mysql', host=data_args.hostname,
database=data_args.db, query={'read_default_file': '~/.my.cnf', 'charset': 'utf8mb4'})
engine = create_engine(myDB, encoding='latin1')
conn = engine.connect()
return conn
def get_data(logger, table, label_field, data_args, data_type):
logger.info("Getting data from table:{} in {} database".format(table, data_args.db))
conn = get_conn(data_args)
select_clause = 'select user_id, message_id, message, updated_time from ' + table
where_clause = ''
order_clause = ' order by user_id, updated_time'
limit_clause = '' #if not __debug__ else ' limit 10'
if data_type=='train':
if "en_non_oosmsgs" in table:
filter_table = 'masterstats_lbp_upt50_en_train'
where_clause = ' where user_id in (select user_id from ' + filter_table + ')'
stmt = select_clause + where_clause + order_clause + limit_clause
else:
filter_table = '20_outcomes_all'
filter_column = 'r10pct_test_fold'
where_clause = ' where user_id in (select user_id from ' + filter_table + ' where '+ filter_column +'=0)'
stmt = select_clause + where_clause + order_clause + limit_clause
elif data_type=='dev':
if 'en_non_oosmsgs' in table:
filter_column = 'is_oosuser_dev'
where_clause = ' where ' + filter_column + '=1'
elif 'en_oosmsgs' in table:
filter_column = 'is_oosmsg_dev'
where_clause = ' where ' + filter_column + '=1'
else:
filter_table = '20_outcomes_all'
filter_column = 'r10pct_test_fold'
where_clause = ' where user_id in (select user_id from ' + filter_table + ' where '+ filter_column +'=1)'
elif data_type=='test':
if 'en_non_oosmsgs' in table:
filter_column = 'is_oosuser_test'
where_clause = ' where ' + filter_column + '=1'
elif 'en_oosmsgs' in table:
filter_column = 'is_oosmsg_test'
where_clause = ' where ' + filter_column + '=1'
elif data_type=='test_qlength':
if 'en_non_oosmsgs' in table:
filter_table = 'masterstats_lbp_upt50_en_test'
where_clause = ' where user_id in (select user_id from ' + filter_table + ' where qlength >= 100)'
stmt = select_clause + where_clause + order_clause + limit_clause
results = conn.execute(stmt)
data = pd.DataFrame(results.fetchall())
data.columns = results.keys()
data = data[data.message.notnull()]
logger.info("Getting labels for table:{} in {} database".format(table, data_args.db))
labels = get_labels(conn, data_type, table, label_field)
conn.close()
return data, labels
def get_labels(conn, data_type, data_table, label_field):
select_clause = 'select user_id, ' + label_field + ' from '
where_clause = ''
order_clause = ' order by user_id'
limit_clause = '' #if not __debug__ else ' limit 10'
if data_type=='train':
if 'en_non_oosmsgs' in data_table:
table = 'masterstats_lbp_upt50_en_train'
where_clause = ' where user_id in (select distinct user_id from ' + data_table + ')'
else:
table = 'masterstats_lbp_trainingset'
filter_table_where = '20_outcomes_all where r10pct_test_fold=0'
where_clause = ' where user_id in (select distinct user_id from ' + filter_table_where + ')'
elif data_type=='dev':
if 'en_non_oosmsgs' in data_table:
table = 'masterstats_lbp_upt50_en_dev'
elif 'en_oosmsgs' in data_table:
table = 'masterstats_lbp_upt50_en_dev_seen'
else:
table = 'masterstats_lbp_trainingset'
filter_table_where = '20_outcomes_all where r10pct_test_fold=1'
where_clause = ' where user_id in (select distinct user_id from ' + filter_table_where + ')'
elif data_type=='test':
if 'en_non_oosmsgs' in data_table:
table = 'masterstats_lbp_upt50_en_test'
elif 'en_oosmsgs' in data_table:
table = 'masterstats_lbp_upt50_en_test_seen'
else:
table = 'masterstats_lbp_testset'
elif data_type=='test_qlength':
table = 'masterstats_lbp_testset_qlen100'
stmt = select_clause + table + where_clause +order_clause + limit_clause
results = conn.execute(stmt)
labels = pd.DataFrame(results.fetchall())
labels.columns = ['user_id', 'label']
return labels
def append_insep(data, tokenizer):
data['message'] = data['message'] + tokenizer.sep_token
def concat(data):
return data.groupby('user_id')['message'].apply(''.join).reset_index()
def process_data(data, tokenizer, block_size, max_blocks):
def tokenize(data):
return tokenizer(data)
def pad(data, pad_value):
multiplier = (block_size - len(data))%block_size
data.extend([pad_value]*multiplier)
return data
def pad_message(data, pad_value, counter=0):
for i,x in enumerate(data):
if len(x) > block_size:
x = x[0:block_size]
data[i] = x
counter+=1
else:
x.extend([pad_value]*(block_size-len(x)))
return data, counter
def limit_and_split_messages(data):
i_values = data['input_ids'][0:1024*max_blocks] if max_blocks is not None else data['input_ids']
i_values = list(split_at(i_values, lambda x:x==tokenizer.eos_token_id))[0]
i_values = i_values[:-1] if i_values[-1]==tokenizer.sep_token_id else i_values
i_values = list(split_at(i_values, lambda x:x==tokenizer.sep_token_id))
return i_values
def pad_and_collate_data(data, counter):
i_values = data
a_values = [[1]*len(x) for x in i_values]
i_values, counter = pad_message(i_values, tokenizer.eos_token_id, counter)
a_values, _ = pad_message(a_values, 0)
return [BatchEncoding(dict(input_ids = i_values[x],
attention_mask=a_values[x]))
for x in range(len(i_values))], counter
def process(data):
counter = 0
tokenized = tokenize(data)
tokenized['input_ids'] = pad(tokenized['input_ids'], tokenizer.eos_token_id)
input_ids = limit_and_split_messages(tokenized)
ret_data, counter = pad_and_collate_data(input_ids, counter)
return ret_data
data['batch_encodings'] = data['message'].apply(process)
def transform_data(logger, tokenizer, data, block_size, max_blocks):
start_time = time.time()
data_new = data[['user_id', 'message']].copy()
append_insep(data_new, tokenizer)
data_new = concat(data_new)
process_data(data_new, tokenizer, block_size, max_blocks)
logger.info("--- %s seconds ---" % (time.time() - start_time))
return data_new
def join_data_and_labels(data, labels):
assert len(data)==len(labels)
merged_data = pd.merge(data, labels, on='user_id')
assert len(merged_data)==len(data)
assert merged_data.shape[-1]==4
return merged_data
def group_data(data, max_blocks, logger):
batch = data.explode('batch_encodings').reset_index(drop=True)
batch = batch[['user_id', 'label', 'batch_encodings']]
logger.info('************** Total Number of instances = {} *************'.format(len(batch)))
return batch.to_numpy().tolist()
def load_dataset(logger, tokenizer, table, block_size, max_blocks, data_args, data_type):
add_insep_token(tokenizer)
label_field = data_args.task_name
data_type = 'test_qlength' if data_args.task_name == 'ope' else data_type
data, labels = get_data(logger, table, label_field, data_args, data_type)
data = transform_data(logger, tokenizer, data, block_size, max_blocks)
data = join_data_and_labels(data, labels)
logger.info('************** Block size = {} *************'.format(block_size))
instances = group_data(data, max_blocks, logger)
return instances, len(instances)
| 8,585 | 40.278846 | 117 | py |
HaRT | HaRT-main/data/utils_optuna_trials/hulm_sample_data_utils.py | import os
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from data.utils_hart.hulm_data_utils import transform_data, group_data
def get_conn(data_args):
myDB = URL(drivername='mysql', host=data_args.hostname,
database=data_args.db, query={'read_default_file': '~/.my.cnf', 'charset': 'utf8mb4'})
engine = create_engine(myDB, encoding='latin1')
conn = engine.connect()
return conn
''''
To be run only once! This will save the sampled users' IDs in a csv that will be used for all following optuna trials.
'''
def sample_train_users(logger, table, data_args, filename):
logger.info("Getting {} sampled train users from table:{} in {} database, to run optuna trials.".format(str(data_args.num_users_for_optuna), table, data_args.db))
conn = get_conn(data_args)
select_clause = 'select distinct user_dataset_id from ' + table
order_clause = ' order by rand() limit ' + str(data_args.num_users_for_optuna)
dev_filter_column = 'is_oosusr_dev'
test_filter_column = 'is_oosusr_test'
source_filter_column = 'dataset '
source_not_included = "'fb'"
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ') and ' + dev_filter_column + '=0' + ' and ' + test_filter_column + '=0'
stmt = select_clause + where_clause + order_clause
results = conn.execute(stmt)
data = pd.DataFrame(results.fetchall())
data.columns = results.keys()
data.to_csv(filename, index=False)
conn.close()
return data
def get_data(logger, table, data_args, data_type, sampled_users):
logger.info("Getting data from table:{} in {} database".format(table, data_args.db))
conn = get_conn(data_args)
select_clause = 'select user_dataset_id, message_id, message, updated_time from ' + table
order_clause = ' order by user_dataset_id, updated_time'
limit_clause = '' if not __debug__ else ' limit 100'
source_filter_column = 'dataset '
source_not_included = "'fb'"
if data_type=='train':
if "en_non_oosmsgs" in table:
dev_filter_column = 'is_oosusr_dev'
test_filter_column = 'is_oosusr_test'
users_id_column = 'user_dataset_id'
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ') and ' + dev_filter_column + '=0' + ' and ' + test_filter_column + '=0' + ' and ' + users_id_column + ' in (' + sampled_users + ')'
stmt = select_clause + where_clause + order_clause + limit_clause
else:
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ')'
stmt = select_clause + where_clause + order_clause + limit_clause
results = conn.execute(stmt)
elif data_type=='dev':
if 'en_non_oosmsgs' in table:
filter_column = 'is_oosusr_dev'
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ') and ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
elif 'en_oosmsgs' in table:
filter_column = 'is_oosmsg_dev'
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ') and ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
else:
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ')'
stmt = select_clause + where_clause + order_clause + limit_clause
results = conn.execute(stmt)
elif data_type=='test':
if 'en_non_oosmsgs' in table:
filter_column = 'is_oosusr_test'
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ') and ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
elif 'en_oosmsgs' in table:
filter_column = 'is_oosmsg_test'
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ') and ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
results = conn.execute(stmt)
data = pd.DataFrame(results.fetchall())
data.columns = results.keys()
data = data[data.message.notnull()]
conn.close()
return data
def sample_users_if_train(logger, table, data_args):
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../datasets/pt_sampled_users.csv')
try:
sampled_users = pd.read_csv(filename)
if sampled_users.size != data_args.num_users_for_optuna:
sampled_users = sample_train_users(logger, table, data_args, filename)
except FileNotFoundError:
sampled_users = sample_train_users(logger, table, data_args, filename)
sampled_users_string = ', '.join(["'{}'".format(e) for e in sampled_users['user_dataset_id'].to_list()])
return sampled_users_string
def load_dataset(logger, tokenizer, table, block_size, max_blocks, data_args, data_type, disable_hulm_batching):
sampled_users_string = sample_users_if_train(logger, table, data_args) if data_type=='train' else ''
data = get_data(logger, table, data_args, data_type, sampled_users_string)
data = transform_data(logger, tokenizer, data, block_size)
logger.info('************** Block size = {} *************'.format(block_size))
if not disable_hulm_batching:
return group_data(data, max_blocks, logger)
else:
instances, uncut_num_blocks = group_data(data, max_blocks, logger)
flat_list = [item for sublist in instances for item in sublist if item is not None]
return flat_list, uncut_num_blocks
| 5,851 | 49.017094 | 230 | py |
HaRT | HaRT-main/data/utils_hart/ft_user_data_utils.py | import time
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from transformers import BatchEncoding
user_id_column = 'user_id'
message_column = 'message'
order_by_fields = [user_id_column, 'updated_time']
label_column = 'label'
def get_fields(data_args):
if data_args.task_name is not None:
return {
'order_by_fields': [user_id_column, 'message_id'],
'label_field': data_args.task_name
}
else:
return {
'order_by_fields': order_by_fields,
'label_field': label_column
}
def get_conn(data_args):
myDB = URL(drivername='mysql', host=data_args.hostname,
database=data_args.db, query={'read_default_file': '~/.my.cnf', 'charset': 'utf8mb4'})
engine = create_engine(myDB, encoding='latin1')
conn = engine.connect()
return conn
def get_data_from_db(logger, table, label_field, data_args, data_type):
logger.info("Getting data from table:{} in {} database".format(table, data_args.db))
conn = get_conn(data_args)
select_clause = 'select user_id, message_id, message, updated_time from ' + table
where_clause = ''
order_clause = ' order by user_id, updated_time'
limit_clause = '' #if not __debug__ else ' limit 10'
if data_type=='train':
if "en_non_oosmsgs" in table:
filter_table = 'masterstats_lbp_upt50_en_train'
where_clause = ' where user_id in (select user_id from ' + filter_table + ')'
stmt = select_clause + where_clause + order_clause + limit_clause
else:
filter_table = '20_outcomes_all'
filter_column = 'r10pct_test_fold'
where_clause = ' where user_id in (select user_id from ' + filter_table + ' where '+ filter_column +'=0)'
stmt = select_clause + where_clause + order_clause + limit_clause
elif data_type=='dev':
if 'en_non_oosmsgs' in table:
filter_column = 'is_oosuser_dev'
where_clause = ' where ' + filter_column + '=1'
elif 'en_oosmsgs' in table:
filter_column = 'is_oosmsg_dev'
where_clause = ' where ' + filter_column + '=1'
else:
filter_table = '20_outcomes_all'
filter_column = 'r10pct_test_fold'
where_clause = ' where user_id in (select user_id from ' + filter_table + ' where '+ filter_column +'=1)'
elif data_type=='test':
if 'en_non_oosmsgs' in table:
filter_column = 'is_oosuser_test'
where_clause = ' where ' + filter_column + '=1'
elif 'en_oosmsgs' in table:
filter_column = 'is_oosmsg_test'
where_clause = ' where ' + filter_column + '=1'
elif data_type=='test_qlength':
if 'en_non_oosmsgs' in table:
filter_table = 'masterstats_lbp_upt50_en_test'
where_clause = ' where user_id in (select user_id from ' + filter_table + ' where qlength >= 100)'
stmt = select_clause + where_clause + order_clause + limit_clause
results = conn.execute(stmt)
data = pd.DataFrame(results.fetchall())
data.columns = results.keys()
data = data[data.message.notnull()]
logger.info("Getting labels for table:{} in {} database".format(table, data_args.db))
labels = get_labels(conn, data_type, table, label_field)
conn.close()
return data, labels
def get_labels(conn, data_type, data_table, label_field):
select_clause = 'select user_id, ' + label_field + ' from '
where_clause = ''
order_clause = ' order by user_id'
limit_clause = '' #if not __debug__ else ' limit 10'
if data_type=='train':
if 'en_non_oosmsgs' in data_table:
table = 'masterstats_lbp_upt50_en_train'
where_clause = ' where user_id in (select distinct user_id from ' + data_table + ')'
else:
table = 'masterstats_lbp_trainingset'
filter_table_where = '20_outcomes_all where r10pct_test_fold=0'
where_clause = ' where user_id in (select distinct user_id from ' + filter_table_where + ')'
elif data_type=='dev':
if 'en_non_oosmsgs' in data_table:
table = 'masterstats_lbp_upt50_en_dev'
elif 'en_oosmsgs' in data_table:
table = 'masterstats_lbp_upt50_en_dev_seen'
else:
table = 'masterstats_lbp_trainingset'
filter_table_where = '20_outcomes_all where r10pct_test_fold=1'
where_clause = ' where user_id in (select distinct user_id from ' + filter_table_where + ')'
elif data_type=='test':
if 'en_non_oosmsgs' in data_table:
table = 'masterstats_lbp_upt50_en_test'
elif 'en_oosmsgs' in data_table:
table = 'masterstats_lbp_upt50_en_test_seen'
else:
table = 'masterstats_lbp_testset'
elif data_type=='test_qlength':
table = 'masterstats_lbp_testset_qlen100'
stmt = select_clause + table + where_clause +order_clause + limit_clause
results = conn.execute(stmt)
labels = pd.DataFrame(results.fetchall())
labels.columns = [user_id_column, label_column]
return labels
def get_data_from_csv(logger, csv_file, fields, data_type):
logger.info("Getting data from {} data pickle file:{}".format(data_type, csv_file))
data = pd.read_csv(csv_file)
data.sort_values(by=fields['order_by_fields'], inplace=True)
data.reset_index(drop=True, inplace=True)
data_new = data[[user_id_column, message_column]].copy()
labels = data[[user_id_column, label_column]].copy()
labels.drop_duplicates(inplace=True)
return data_new, labels
def get_data_from_pkl(logger, pkl_file, fields, data_type):
logger.info("Getting data from {} data pickle file:{}".format(data_type, csv_file))
data = pd.read_pickle(pkl_file)
data.sort_values(by=fields['order_by_fields'], inplace=True)
data.reset_index(drop=True, inplace=True)
data_new = data[[user_id_column, message_column]].copy()
labels = data[[user_id_column, label_column]].copy()
labels.drop_duplicates(inplace=True)
return data_new, labels
def append_insep(data, tokenizer):
data[message_column] = data[message_column] + tokenizer.sep_token
def concat(data):
return data.groupby(user_id_column)[message_column].apply(''.join).reset_index()
def process_data(data, tokenizer, block_size):
def tokenize(data):
return tokenizer(data)
def pad(data, pad_value):
multiplier = (block_size - len(data))%block_size
data.extend([pad_value]*multiplier)
return data
#TODO: check if this is even required
def convert_to_int(data):
data['input_ids'] = list(map(int,data['input_ids']))
data['attention_mask'] = list(map(int,data['attention_mask']))
def chunks(data):
i_values = data['input_ids']
a_values = data['attention_mask']
return [BatchEncoding(dict(input_ids = i_values[x:x+block_size],
attention_mask=a_values[x:x+block_size]))
for x in range(0, len(i_values), block_size)]
def process(data):
tokenized = tokenize(data)
tokenized['input_ids'] = pad(tokenized['input_ids'], tokenizer.eos_token_id)
tokenized['attention_mask'] = pad(tokenized['attention_mask'], 0)
convert_to_int(tokenized)
return chunks(tokenized)
data['batch_encodings'] = data['message'].apply(process)
def transform_data(logger, tokenizer, data, block_size):
start_time = time.time()
data_new = data[[user_id_column, message_column]].copy()
append_insep(data_new, tokenizer)
data_new = concat(data_new)
process_data(data_new, tokenizer, block_size)
logger.info("--- %s seconds ---" % (time.time() - start_time))
return data_new
def join_data_and_labels(data, labels):
assert len(data)==len(labels)
merged_data = pd.merge(data, labels, on=user_id_column)
assert len(merged_data)==len(data)
assert merged_data.shape[-1]==4
return merged_data
def group_data(data, max_blocks, logger):
batch = pd.DataFrame(data.batch_encodings.tolist())
actual_blocks = len(batch.columns)
logger.info('************** Total Number of blocks = {} *************'.format(len(batch.columns)))
if max_blocks is not None and len(batch.columns) > max_blocks:
batch = batch[range(max_blocks)]
logger.info('************ Trimmed Number of blocks = {} *************'.format(len(batch.columns)))
assert len(data)==len(batch)
data = pd.concat((data[[user_id_column, label_column]], batch), axis=1)
assert data.shape[-1]==batch.shape[-1] + 2
return data.to_numpy().tolist(), actual_blocks
def load_dataset(logger, tokenizer, table, block_size, max_blocks, data_args, data_type, disable_hulm_batching):
label_field = data_args.task_name
data_type = 'test_qlength' if data_args.task_name == 'ope' else data_type
fields = get_fields(data_args)
if 'pkl' in table:
data, labels = get_data_from_pkl(logger, table, fields, data_type)
elif 'csv' in table:
data, labels = get_data_from_csv(logger, table, fields, data_type)
else:
data, labels = get_data_from_db(logger, table, label_field, data_args, data_type)
data = transform_data(logger, tokenizer, data, block_size)
data = join_data_and_labels(data, labels)
logger.info('************** Block size = {} *************'.format(block_size))
if not disable_hulm_batching:
return group_data(data, max_blocks, logger)
else:
instances, uncut_num_blocks = group_data(data, max_blocks, logger)
flat_list = [item for sublist in instances for item in sublist if item is not None]
return flat_list, uncut_num_blocks
| 9,929 | 41.076271 | 117 | py |
HaRT | HaRT-main/data/utils_hart/hulm_data_utils.py | import time
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from transformers import BatchEncoding
user_id_column = 'user_id'
message_column = 'message'
order_by_fields = [user_id_column, 'updated_time']
def get_fields():
return {
'order_by_fields': order_by_fields
}
def get_conn(data_args):
myDB = URL(drivername='mysql', host=data_args.hostname,
database=data_args.db, query={'read_default_file': '~/.my.cnf', 'charset': 'utf8mb4'})
engine = create_engine(myDB, encoding='latin1')
conn = engine.connect()
return conn
def get_data_from_db(logger, table, data_args, data_type):
logger.info("Getting data from table:{} in {} database".format(table, data_args.db))
conn = get_conn(data_args)
select_clause = 'select user_dataset_id' + ', ' + message_column + ', message_id, updated_time from ' + table
order_clause = ' order by ' + ', '.join(order_by_fields)
limit_clause = '' if not __debug__ else ' limit 10'
source_filter_column = 'dataset '
source_not_included = "'fb'"
if data_type=='train':
if "en_non_oosmsgs" in table:
dev_filter_column = 'is_oosusr_dev'
test_filter_column = 'is_oosusr_test'
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ') and ' + dev_filter_column + '=0' + ' and ' + test_filter_column + '=0'
stmt = select_clause + where_clause + order_clause + limit_clause
else:
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ')'
stmt = select_clause + where_clause + order_clause + limit_clause
results = conn.execute(stmt)
elif data_type=='dev':
if 'en_non_oosmsgs' in table:
filter_column = 'is_oosusr_dev'
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ') and ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
elif 'en_oosmsgs' in table:
filter_column = 'is_oosmsg_dev'
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ') and ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
else:
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ')'
stmt = select_clause + where_clause + order_clause + limit_clause
results = conn.execute(stmt)
elif data_type=='test':
if 'en_non_oosmsgs' in table:
filter_column = 'is_oosusr_test'
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ') and ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
elif 'en_oosmsgs' in table:
filter_column = 'is_oosmsg_test'
where_clause = ' where ' + source_filter_column + 'not in (' + source_not_included + ') and ' + filter_column + '=1'
stmt = select_clause + where_clause + order_clause + limit_clause
results = conn.execute(stmt)
data = pd.DataFrame(results.fetchall())
data.columns = results.keys()
data[user_id_column] = data['user_dataset_id']
data = data[data.message.notnull()]
conn.close()
return data
def get_data_from_csv(logger, csv_file, fields, data_type):
logger.info("Getting data from {} data pickle file:{}".format(data_type, csv_file))
data = pd.read_csv(csv_file)
data.sort_values(by=fields['order_by_fields'], inplace=True)
data.reset_index(drop=True, inplace=True)
return data
def get_data_from_pkl(logger, pkl_file, fields, data_type):
logger.info("Getting data from {} data pickle file:{}".format(data_type, pkl_file))
data = pd.read_pickle(pkl_file)
data.sort_values(by=fields['order_by_fields'], inplace=True)
data.reset_index(drop=True, inplace=True)
return data
def append_insep(data, tokenizer):
data[message_column] = data[message_column] + tokenizer.sep_token
def concat(data):
return data.groupby(user_id_column)[message_column].apply(''.join).reset_index()
def process_data(data, tokenizer, block_size):
def tokenize(data):
return tokenizer(data)
def pad(data, pad_value):
multiplier = (block_size - len(data))%block_size
data.extend([pad_value]*multiplier)
return data
def convert_to_int(data):
data['input_ids'] = list(map(int,data['input_ids']))
data['attention_mask'] = list(map(int,data['attention_mask']))
data['labels'] = list(map(int,data['labels']))
def chunks(data):
i_values = data['input_ids']
a_values = data['attention_mask']
l_values = data['labels']
return [BatchEncoding(dict(input_ids = i_values[x:x+block_size],
attention_mask=a_values[x:x+block_size], labels = l_values[x:x+block_size]))
for x in range(0, len(i_values), block_size)]
def process(data):
tokenized = tokenize(data)
tokenized['labels'] = tokenized['input_ids'].copy()
tokenized['input_ids'] = pad(tokenized['input_ids'], tokenizer.eos_token_id)
tokenized['attention_mask'] = pad(tokenized['attention_mask'], 0)
tokenized['labels'] = pad(tokenized['labels'], -100)
convert_to_int(tokenized)
return chunks(tokenized)
data['batch_encodings'] = data[message_column].apply(process)
def transform_data(logger, tokenizer, data, block_size):
start_time = time.time()
data_new = data[[user_id_column, message_column]].copy()
append_insep(data_new, tokenizer)
data_new = concat(data_new)
process_data(data_new, tokenizer, block_size)
logger.info("--- %s seconds ---" % (time.time() - start_time))
return data_new
def group_data(data, max_blocks, logger):
batch = pd.DataFrame(data.batch_encodings.tolist())
actual_blocks = len(batch.columns)
logger.info('************** Total Number of blocks = {} *************'.format(len(batch.columns)))
if max_blocks is not None and len(batch.columns) > max_blocks:
batch = batch[range(max_blocks)]
logger.info('************ Trimmed Number of blocks = {} *************'.format(len(batch.columns)))
return batch.to_numpy().tolist(), actual_blocks
def load_dataset(logger, tokenizer, table, block_size, max_blocks, data_args, data_type, disable_hulm_batching):
fields = get_fields()
if 'pkl' in table:
data = get_data_from_pkl(logger, table, fields, data_type)
elif 'csv' in table:
data = get_data_from_csv(logger, table, fields, data_type)
else:
data = get_data_from_db(logger, table, data_args, data_type)
data = transform_data(logger, tokenizer, data, block_size)
logger.info('************** Block size = {} *************'.format(block_size))
if not disable_hulm_batching:
return group_data(data, max_blocks, logger)
else:
instances, uncut_num_blocks = group_data(data, max_blocks, logger)
flat_list = [item for sublist in instances for item in sublist if item is not None]
return flat_list, uncut_num_blocks
| 7,382 | 44.018293 | 170 | py |
HaRT | HaRT-main/data/utils_hart/ft_doc_disable_hulm_batching_data_utils.py | import time
import math
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from transformers import BatchEncoding
user_id_column = 'user_id'
message_column = 'message'
label_column = 'label'
def get_fields(data_args):
if data_args.task_name is not None:
if data_args.task_name=='stance':
return {
'select_fields': [user_id_column, 'message_id', message_column, 'stance', 'timestamp'],
'transform_data_fields': [user_id_column, message_column, 'stance'],
'label_field': 'stance'
}
elif data_args.task_name=='sentiment':
return {
'select_fields': [user_id_column, 'message_id', message_column, label_column],
'transform_data_fields': [user_id_column, message_column, label_column],
'label_field': label_column
}
else:
return {
'select_fields': [user_id_column, message_column, label_column],
'transform_data_fields': [user_id_column, message_column, label_column],
'label_field': label_column
}
def get_conn(data_args):
myDB = URL(drivername='mysql', host=data_args.hostname,
database=data_args.db, query={'read_default_file': '~/.my.cnf', 'charset': 'utf8mb4'})
engine = create_engine(myDB, encoding='latin1')
conn = engine.connect()
return conn
def get_data_from_db(logger, table, fields, data_args, data_type):
logger.info("Getting data from table:{} in {} database".format(table, data_args.db))
conn = get_conn(data_args)
select_clause = 'select ' + ', '.join(fields['select_fields']) + ' from ' + table
where_clause = ' where user_id not in (select user_id from sentiment_dev_users)' \
if data_args.task_name=='sentiment' and data_type=='train' \
else \
' where user_id in (select user_id from sentiment_dev_users)' \
if data_args.task_name=='sentiment' and data_type=='dev' \
else ''
limit_clause = '' #if not __debug__ else ' limit 10'
stmt = select_clause + where_clause + limit_clause
results = conn.execute(stmt)
data = pd.DataFrame(results.fetchall())
data.columns = results.keys()
data = data[data.message.notnull()]
conn.close()
return data
def get_data_from_csv(logger, csv_file, fields, data_type):
logger.info("Getting data from {} data pickle file:{}".format(data_type, csv_file))
data = pd.read_csv(csv_file)
data.sort_values(by=fields['order_by_fields'], inplace=True)
data.reset_index(drop=True, inplace=True)
return data
def get_data_from_pkl(logger, pkl_file, fields, data_type):
logger.info("Getting data from {} data pickle file:{}".format(data_type, pkl_file))
data = pd.read_pickle(pkl_file)
data.sort_values(by=fields['order_by_fields'], inplace=True)
data.reset_index(drop=True, inplace=True)
return data
def map_labels_to_int_value(data, label_field):
labels_map = {
'positive': 1,
'negative': -1,
'neutral': 0,
'objective-OR-neutral': 0,
'objective': 0,
None: float('NaN'),
}
data[label_field] = data[label_field].map(lambda x : labels_map[x])
def tokenize_with_labels(data, label_field, tokenizer, data_args):
def tokenize(data):
return tokenizer(data)
def process(data):
# get the input_ids (i.e., token_ids) and the attention_mask
# attention_mask is not altered since it's required to attend to all tokens.
be = tokenize(data['message'])
if (data_args.task_name == 'stance' or data_args.task_name == 'sentiment') and not math.isnan(data[label_field]):
be['labels'] = int(data[label_field]) + 1
elif not math.isnan(data[label_field]):
be['labels'] = int(data[label_field])
return be
data['tokenized'] = data.apply(process, axis=1)
def normalize(data):
normalized = pd.json_normalize(data['tokenized'])
data = pd.concat([data, normalized], axis=1)
return data
def pad_and_chunk(data, tokenizer, block_size):
def pad_or_truncate(data, pad_value):
if len(data) > block_size:
data = data[0:block_size]
else:
multiplier = (block_size - len(data))%block_size
data.extend([pad_value]*multiplier)
return data
def chunks(data):
i_values = data['input_ids']
a_values = data['attention_mask']
l_values = data['labels']
return [BatchEncoding(dict(input_ids = i_values,
attention_mask=a_values,
labels = l_values))
]
def process(data):
data['input_ids'] = pad_or_truncate(data['input_ids'], tokenizer.eos_token_id)
data['attention_mask'] = pad_or_truncate(data['attention_mask'], 0)
return chunks(data)
data['batch_encodings'] = data.apply(process, axis=1)
def transform_data(logger, tokenizer, data, fields, block_size):
start_time = time.time()
data_new = data[fields['transform_data_fields']].copy()
tokenize_with_labels(data_new, fields['label_field'], tokenizer)
data_new = normalize(data_new)
pad_and_chunk(data_new, tokenizer, block_size)
logger.info("--- %s seconds ---" % (time.time() - start_time))
return data_new
def group_data(data, max_blocks, logger):
batch = pd.DataFrame(data.batch_encodings.tolist())
actual_blocks = len(batch.columns)
logger.info('************** Total Number of blocks = {} *************'.format(len(batch.columns)))
logger.info('************** Total Number of instances = {} *************'.format(batch.shape[0]))
if max_blocks is not None and len(batch.columns) > max_blocks:
batch = batch[range(max_blocks)]
logger.info('************ Trimmed Number of blocks = {} *************'.format(len(batch.columns)))
return batch.to_numpy().tolist(), actual_blocks
def load_dataset(logger, tokenizer, table, block_size, max_blocks, data_args, data_type, disable_hulm_batching):
fields = get_fields(data_args)
if 'pkl' in table:
data = get_data_from_pkl(logger, table, fields, data_type)
elif 'csv' in table:
data = get_data_from_csv(logger, table, fields, data_type)
else:
data = get_data_from_db(logger, table, fields, data_args, data_type)
if data_args.task_name=='sentiment':
map_labels_to_int_value(data, fields['label_field'])
data = transform_data(logger, tokenizer, data, fields, block_size, data_args)
logger.info('************** Block size = {} *************'.format(block_size))
if not disable_hulm_batching:
return group_data(data, max_blocks, logger)
else:
instances, uncut_num_blocks = group_data(data, max_blocks, logger)
logger.info('************** Total number of instances = {} *************'.format(len(instances)))
flat_list = [item for sublist in instances for item in sublist if item is not None]
return flat_list, uncut_num_blocks
| 7,206 | 39.717514 | 121 | py |
HaRT | HaRT-main/data/utils_hart/ft_doc_data_utils.py | import time
import math
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from transformers import BatchEncoding
user_id_column = 'user_id'
message_column = 'message'
order_by_fields = [user_id_column, 'updated_time']
label_column = 'label'
def get_fields(data_args):
if data_args.task_name is not None:
if data_args.task_name=='stance':
return {
'select_fields': [user_id_column, 'message_id', message_column, 'stance', 'timestamp'],
'order_by_fields': [user_id_column, 'message_id'],
'transform_data_fields': [user_id_column, message_column, 'stance'],
'label_field': 'stance'
}
elif data_args.task_name=='sentiment':
return {
'select_fields': [user_id_column, 'message_id', message_column, label_column],
'order_by_fields': [user_id_column, 'message_id'],
'transform_data_fields': [user_id_column, message_column, label_column],
'label_field': label_column
}
else:
return {
'select_fields': [user_id_column, message_column, label_column],
'order_by_fields': order_by_fields,
'transform_data_fields': [user_id_column, message_column, label_column],
'label_field': label_column
}
def get_conn(data_args):
myDB = URL(drivername='mysql', host=data_args.hostname,
database=data_args.db, query={'read_default_file': '~/.my.cnf', 'charset': 'utf8mb4'})
engine = create_engine(myDB, encoding='latin1')
conn = engine.connect()
return conn
def get_data_from_db(logger, table, fields, data_args, data_type):
logger.info("Getting data from table:{} in {} database".format(table, data_args.db))
conn = get_conn(data_args)
select_clause = 'select ' + ', '.join(fields['select_fields']) + ' from ' + table
where_clause = ' where user_id not in (select user_id from sentiment_dev_users)' \
if data_args.task_name=='sentiment' and data_type=='train' \
else \
' where user_id in (select user_id from sentiment_dev_users)' \
if data_args.task_name=='sentiment' and data_type=='dev' \
else ''
order_clause = ' order by ' + ', '.join(fields['order_by_fields'])
limit_clause = '' #if not __debug__ else ' limit 10000'
stmt = select_clause + where_clause + order_clause + limit_clause
results = conn.execute(stmt)
data = pd.DataFrame(results.fetchall())
data.columns = results.keys()
data = data[data.message.notnull()]
conn.close()
return data
def get_data_from_csv(logger, csv_file, fields, data_type):
logger.info("Getting data from {} data pickle file:{}".format(data_type, csv_file))
data = pd.read_csv(csv_file)
data.sort_values(by=fields['order_by_fields'], inplace=True)
data.reset_index(drop=True, inplace=True)
return data
def get_data_from_pkl(logger, pkl_file, fields, data_type):
logger.info("Getting data from {} data pickle file:{}".format(data_type, pkl_file))
data = pd.read_pickle(pkl_file)
data.sort_values(by=fields['order_by_fields'], inplace=True)
data.reset_index(drop=True, inplace=True)
return data
def map_labels_to_int_value(data, label_field):
labels_map = {
'positive': 1,
'negative': -1,
'neutral': 0,
'objective-OR-neutral': 0,
'objective': 0,
None: float('NaN'),
'': float('NaN')
}
data[label_field] = data[label_field].map(labels_map)
def append_insep(data, tokenizer):
data[message_column] = data[message_column] + tokenizer.sep_token
def tokenize_with_labels(data, label_field, tokenizer, data_args):
def tokenize(data):
return tokenizer(data)
def process(data):
# get the input_ids (i.e., token_ids) and the attention_mask
# attention_mask is not altered since it's required to attend to all tokens.
be = tokenize(data[message_column])
# create the labels of size len(input_ids) and mark all as -100 so that they don't
# contribute to the loss calculation
be['labels'] = [-100] * len(be['input_ids'])
# except, when the current msg is associated with a label
# mark the last token before the separator token as the actual label for stance.
# this token will be used to predict (i.e., classify into) the label.
if (data_args.task_name == 'stance' or data_args.task_name == 'sentiment') and not math.isnan(data[label_field]):
be['labels'][-2] = int(data[label_field]) + 1
elif not math.isnan(data[label_field]):
be['labels'][-2] = int(data[label_field])
return be
data['tokenized'] = data.apply(process, axis=1)
def normalize_and_concat(data):
normalized = pd.json_normalize(data['tokenized'])
data = pd.concat([data, normalized], axis=1)
return data.groupby(user_id_column).agg({'input_ids': 'sum', 'attention_mask':'sum', 'labels':'sum'}).reset_index()
def pad_and_chunk(data, tokenizer, block_size):
def pad(data, pad_value):
multiplier = (block_size - len(data))%block_size
data.extend([pad_value]*multiplier)
return data
def chunks(data):
i_values = data['input_ids']
a_values = data['attention_mask']
l_values = data['labels']
return [BatchEncoding(dict(input_ids = i_values[x:x+block_size],
attention_mask=a_values[x:x+block_size], labels = l_values[x:x+block_size]))
for x in range(0, len(i_values), block_size)]
def process(data):
data['input_ids'] = pad(data['input_ids'], tokenizer.eos_token_id)
data['attention_mask'] = pad(data['attention_mask'], 0)
data['labels'] = pad(data['labels'], -100)
return chunks(data)
data['batch_encodings'] = data.apply(process, axis=1)
def transform_data(logger, tokenizer, data, fields, block_size, data_args):
start_time = time.time()
data_new = data[fields['transform_data_fields']].copy()
append_insep(data_new, tokenizer)
tokenize_with_labels(data_new, fields['label_field'], tokenizer, data_args)
data_new = normalize_and_concat(data_new)
pad_and_chunk(data_new, tokenizer, block_size)
logger.info("--- %s seconds ---" % (time.time() - start_time))
return data_new
def group_data(data, max_blocks, logger):
batch = pd.DataFrame(data.batch_encodings.tolist())
actual_blocks = len(batch.columns)
logger.info('************** Total Number of blocks = {} *************'.format(len(batch.columns)))
if max_blocks is not None and len(batch.columns) > max_blocks:
batch = batch[range(max_blocks)]
logger.info('************ Trimmed Number of blocks = {} *************'.format(len(batch.columns)))
return batch.to_numpy().tolist(), actual_blocks
def load_dataset(logger, tokenizer, table, block_size, max_blocks, data_args, data_type, disable_hulm_batching):
fields = get_fields(data_args)
if 'pkl' in table:
data = get_data_from_pkl(logger, table, fields, data_type)
elif 'csv' in table:
data = get_data_from_csv(logger, table, fields, data_type)
else:
data = get_data_from_db(logger, table, fields, data_args, data_type)
if data_args.task_name=='sentiment':
map_labels_to_int_value(data, fields['label_field'])
data = transform_data(logger, tokenizer, data, fields, block_size, data_args)
logger.info('************** Block size = {} *************'.format(block_size))
if not disable_hulm_batching:
return group_data(data, max_blocks, logger)
else:
instances, uncut_num_blocks = group_data(data, max_blocks, logger)
flat_list = [item for sublist in instances for item in sublist if item is not None]
return flat_list, uncut_num_blocks | 8,031 | 42.652174 | 121 | py |
HaRT | HaRT-main/args/clm_args.py | from dataclasses import dataclass, field
from typing import Optional
from transformers import MODEL_FOR_CAUSAL_LM_MAPPING
MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
use_qh05_wts: bool = field(
default=False,
metadata={
"help": "Whether to use (at 'layer_ins') pretrained query, key, value weights followed by"
"query weights (for concatenated query and history) initialized with 0.5 mean, instead of,"
"newly initialized query (for concatenated hidden states and history) and key weights"}
)
instantiate_hart: bool = field(
default=False, metadata={"help": "Whether to use a local instance of model config or not."}
)
add_history: bool = field(
default=False, metadata={"help": "Whether to use history (and history recurrence) or not."}
)
initial_history: Optional[str] = field(default=None, metadata={"help": "A .pt file containing a reasonable initial history embedding as a pytorch tensor."})
layer_ins: Optional[int] = field(
default=None,
metadata={
"help": "If add_history is True, layer_ins tells at which layer the history should be addded (inserted)."
},
)
extract_layer: Optional[int] = field(
default=11,
metadata={
"help": "If add_history is True, extract_layer tells which layer's output should be used for updating history."
},
)
output_block_last_hidden_states: bool = field(
default=False, metadata={"help": "Whether to output last hidden-states of the model's blocks at the output of last layer for each block or not."}
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
search_params: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``optuna`` or ``Ray Tune``"}
)
use_ray: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``Ray Tune``"}
)
use_optuna: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``optuna``"}
)
num_trials: Optional[int] = field(
default=5,
metadata={
"help": "Number of trials to run when 'search_params' is true."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_table: Optional[str] = field(
default=None,
metadata={"help": "The input training data table (a mysql database table)."})
dev_table: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data table to evaluate the perplexity on. (a mysql database table)."},
)
test_table: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data table to evaluate the perplexity on. (a mysql database table)."},
)
db: Optional[str] = field(
default=None,
metadata={"help": "The database where input training data table resides. (a mysql database)."})
hostname: Optional[str] = field(
default=None,
metadata={"help": "The host name or IP where the (mysql) database resides."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
num_users_for_optuna: Optional[int] = field(
default=5000,
metadata={
"help": "For hyperparameter search, truncate the number of training users to this "
"value if set."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_val_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
},
)
max_train_blocks: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training blocks to this "
"value if set."
},
)
max_val_blocks: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation blocks to this "
"value if set."
},
)
block_size: Optional[int] = field(
default=None,
metadata={
"help": "Optional input block sequence length after tokenization "
"(batched into instances of max_train_blocks/max_val_blocks , each of size block_size"
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
disable_hulm_batching: bool = field(
default=False, metadata={"help": "Batch the dataset as a flat list ([users, blocks * block_size]) instead of hulm style batching, i.e., [users, blocks, block_size] dimensions."}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
def __post_init__(self):
if self.train_table is None and self.dev_table is None and self.test_table is None and self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a train/validation(dev or test) database table, dataset name or a training/validation file.")
else:
if (self.train_table is not None or self.dev_table is not None or self.test_table is not None) and (self.db is None or self.hostname is None):
raise ValueError("Need database and hostname/IP if providing a train/val(dev or test) mysql tables.")
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
| 9,140 | 43.808824 | 185 | py |
HaRT | HaRT-main/args/ft_args.py | from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_type: Optional[str] = field(
default=None,
metadata={"help": "The type of task to train on: 'document' or 'user' -level"},
)
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: 'stance', 'sentiment', 'age', 'ope', or 'ner'"},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
return_entity_level_metrics: bool = field(
default=False, metadata={"help": "NER return entity level metrics or not"}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
use_history_output: bool = field(
default=False, metadata={"help": "Should use the history output from Ar_HuLM for FT tasks predictions (regression/user-level tasks mainly) or not."}
)
save_preds_labels: bool = field(
default=False, metadata={"help": "Should save the predictions and labels into text files or not."}
)
num_labels: Optional[int] = field(
default=None,
metadata={
"help": "Number of classification labels when fine tuning a 'document' type task."
},
)
train_table: Optional[str] = field(
default=None,
metadata={"help": "The input training data table in a csv or pickle file (path to the file)."})
dev_table: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data table in a csv or pickle file (path to the file) to validate the model during training."},
)
test_table: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data table in a csv or pickle file (path to the file) to evaluate the trained model for perplexity."},
)
db: Optional[str] = field(
default=None,
metadata={"help": "The database where input training data table resides. (a mysql database)."}
)
hostname: Optional[str] = field(
default=None,
metadata={"help": "The host name or IP where the (mysql) database resides."}
)
max_train_blocks: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training blocks to this "
"value if set."
},
)
max_val_blocks: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation blocks to this "
"value if set."
},
)
block_size: Optional[int] = field(
default=1024,
metadata={
"help": "Optional input block sequence length after tokenization "
"(batched into instances of max_train_blocks/max_val_blocks , each of size block_size"
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
disable_hulm_batching: bool = field(
default=False, metadata={"help": "Batch the dataset as a flat list ([users, blocks * block_size]) instead of hulm style batching, i.e., [users, blocks, block_size] dimensions."}
)
agg_type: Optional[str] = field(
default=None,
metadata={
"help": "One of 'last', 'sum', 'avg', 'masked_last', 'masked_avg', 'masked_sum'"
"When using user_states/history for downstream tasks, what kind of "
"user_states/history aggregation to use. Currently, used only when saving states for users."
}
)
train_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input training data pickle file."})
train_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input training users' historical data pickle file."})
dev_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input dev data pickle file."})
dev_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input dev users' historical data pickle file."})
test_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input test data pickle file."})
test_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input test users' historical data pickle file."})
def __post_init__(self):
if self.task_type is None or (self.task_type != 'user' and self.task_type != 'document'):
raise ValueError("Need to define task type as one of 'document' or 'user'")
if self.num_labels is None:
raise ValueError('num_labels required to fine-tune downstream tasks!')
if self.train_table is None and (self.dev_table is None and self.test_table is None):
raise ValueError("Need a training/validation (dev or test) table.")
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
init_seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of model initialization."})
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
use_qh05_wts: bool = field(
default=False,
metadata={
"help": "Whether to use (at 'layer_ins') pretrained query, key, value weights followed by"
"query weights (for concatenated query and history) initialized with 0.5 mean, instead of,"
"newly initialized query (for concatenated hidden states and history) and key weights"
}
)
use_hart_no_hist: bool = field(
default=False,
metadata={"help": "Whether to use HaRT model with no available historcal context."},
)
freeze_model: bool = field(
default=False, metadata={"help": "Freeze the transformer module of the model. Train only classification layer."}
)
load_non_PT_hulm_model: bool = field(
default=False, metadata={"help": "Whether to use a non-pretrained hulm model or not"}
)
add_history: bool = field(
default=False, metadata={"help": "Whether to use history (and history recurrence) or not."}
)
initial_history: Optional[str] = field(
default=None, metadata={"help": "A .pt file containing a reasonable initial history embedding as a pytorch tensor."}
)
#TODO: following args should ideally be a part of training_args
metric_for_early_stopping: Optional[str] = field(
default=None, metadata={"help": "The metric to use to compare two different models."}
)
early_stopping_patience: int = field(
default=3,
metadata={
"help": "To be used with `metric_for_early_stopping`."
"To stop training when the specified `metric_for_early_stopping` worsens for"
"`early_stopping_patience` evaluation calls."
}
)
early_stopping_threshold: Optional[float] = field(
default=0.0,
metadata={
"help": "Use with `metric_for_early_stopping` and `early_stopping_patience` to denote how"
"much the specified metric must improve to satisfy early stopping conditions."
}
)
search_params: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``optuna`` or ``Ray Tune``"}
)
use_ray: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``Ray Tune``"}
)
use_optuna: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``optuna``"}
)
num_trials: Optional[int] = field(
default=10,
metadata={
"help": "Number of trials to run when 'search_params' is true."
},
)
| 10,704 | 41.82 | 185 | py |
covid-19_timeSeriesAnalysis | covid-19_timeSeriesAnalysis-master/diagnosticPlotsForLinearRegression_likeR.py | # Functions for the 4 diagnostic plots (like R)
# Code adapted from https://towardsdatascience.com/going-from-r-to-python-linear-regression-diagnostic-plots-144d1c4aa5a
from statsmodels.nonparametric.smoothers_lowess import lowess
import scipy.stats as stats
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
'''
The input parameter 'results' for all functions should come from the following piece of code:
import statsmodels.formula.api as smf
model = smf.ols(formula='<dependent_variable> ~ <independent_variable>', data=<data frame>)
results = model.fit()
'''
def all4DiagnosticPlots(results):
fig, axes = plt.subplots(2, 2, figsize=(10,10))
residualsVsFitted(results, axes[0, 0])
normalQ_Q(results, axes[0, 1])
scaleLocationPlot(results, axes[1, 0])
residualsVsLeverage(results, axes[1, 1])
def residualsVsFitted(results, axes=None):
residuals = results.resid
fitted = results.fittedvalues
smoothed = lowess(residuals,fitted)
top3 = abs(residuals).sort_values(ascending = False)[:3]
if axes == None:
plt.rcParams.update({'font.size': 16})
plt.rcParams["figure.figsize"] = (8,7)
fig, ax = plt.subplots()
ax.scatter(fitted, residuals, edgecolors = 'k', facecolors = 'none')
ax.plot(smoothed[:,0],smoothed[:,1],color = 'r')
ax.set_ylabel('Residuals')
ax.set_xlabel('Fitted Values')
ax.set_title('Residuals vs. Fitted')
ax.plot([min(fitted),max(fitted)],[0,0],color = 'k',linestyle = ':', alpha = .3)
for i in top3.index:
ax.annotate(i,xy=(fitted[i],residuals[i]))
plt.show()
else:
axes.scatter(fitted, residuals, edgecolors = 'k', facecolors = 'none')
axes.plot(smoothed[:,0],smoothed[:,1],color = 'r')
axes.set_ylabel('Residuals')
axes.set_xlabel('Fitted Values')
axes.set_title('Residuals vs. Fitted')
axes.plot([min(fitted),max(fitted)],[0,0],color = 'k',linestyle = ':', alpha = .3)
for i in top3.index:
axes.annotate(i,xy=(fitted[i],residuals[i]))
def normalQ_Q(results, axes=None):
sorted_student_residuals = pd.Series(results.get_influence().resid_studentized_internal)
sorted_student_residuals.index = results.resid.index
sorted_student_residuals = sorted_student_residuals.sort_values(ascending = True)
df = pd.DataFrame(sorted_student_residuals)
df.columns = ['sorted_student_residuals']
df['theoretical_quantiles'] = stats.probplot(df['sorted_student_residuals'], dist = 'norm', fit = False)[0]
rankings = abs(df['sorted_student_residuals']).sort_values(ascending = False)
top3 = rankings[:3]
x = df['theoretical_quantiles']
y = df['sorted_student_residuals']
if axes == None:
fig, ax = plt.subplots()
ax.scatter(x,y, edgecolor = 'k',facecolor = 'none')
ax.set_title('Normal Q-Q')
ax.set_ylabel('Standardized Residuals')
ax.set_xlabel('Theoretical Quantiles')
ax.plot([np.min([x,y]),np.max([x,y])],[np.min([x,y]),np.max([x,y])], color = 'r', ls = '--')
for val in top3.index:
ax.annotate(val,xy=(df['theoretical_quantiles'].loc[val],df['sorted_student_residuals'].loc[val]))
plt.show()
else:
axes.scatter(x,y, edgecolor = 'k',facecolor = 'none')
axes.set_title('Normal Q-Q')
axes.set_ylabel('Standardized Residuals')
axes.set_xlabel('Theoretical Quantiles')
axes.plot([np.min([x,y]),np.max([x,y])],[np.min([x,y]),np.max([x,y])], color = 'r', ls = '--')
for val in top3.index:
axes.annotate(val,xy=(df['theoretical_quantiles'].loc[val],df['sorted_student_residuals'].loc[val]))
def scaleLocationPlot(results, axes=None):
fitted = results.fittedvalues
student_residuals = results.get_influence().resid_studentized_internal
sqrt_student_residuals = pd.Series(np.sqrt(np.abs(student_residuals)))
sqrt_student_residuals.index = results.resid.index
smoothed = lowess(sqrt_student_residuals,fitted)
top3 = abs(sqrt_student_residuals).sort_values(ascending = False)[:3]
if axes == None:
fig, ax = plt.subplots()
ax.scatter(fitted, sqrt_student_residuals, edgecolors = 'k', facecolors = 'none')
ax.plot(smoothed[:,0],smoothed[:,1],color = 'r')
ax.set_ylabel('$\sqrt{|Studentized \ Residuals|}$')
ax.set_xlabel('Fitted Values')
ax.set_title('Scale-Location')
ax.set_ylim(0,max(sqrt_student_residuals)+0.1)
for i in top3.index:
ax.annotate(i,xy=(fitted[i],sqrt_student_residuals[i]))
plt.show()
else:
axes.scatter(fitted, sqrt_student_residuals, edgecolors = 'k', facecolors = 'none')
axes.plot(smoothed[:,0],smoothed[:,1],color = 'r')
axes.set_ylabel('$\sqrt{|Studentized \ Residuals|}$')
axes.set_xlabel('Fitted Values')
axes.set_title('Scale-Location')
axes.set_ylim(0,max(sqrt_student_residuals)+0.1)
for i in top3.index:
axes.annotate(i,xy=(fitted[i],sqrt_student_residuals[i]))
def residualsVsLeverage(results, axes=None):
student_residuals = pd.Series(results.get_influence().resid_studentized_internal)
student_residuals.index = results.resid.index
df = pd.DataFrame(student_residuals)
df.columns = ['student_residuals']
df['leverage'] = results.get_influence().hat_matrix_diag
smoothed = lowess(df['student_residuals'],df['leverage'])
sorted_student_residuals = abs(df['student_residuals']).sort_values(ascending = False)
top3 = sorted_student_residuals[:3]
x = df['leverage']
y = df['student_residuals']
xpos = max(x)+max(x)*0.01
if axes == None:
fig, ax = plt.subplots()
ax.scatter(x, y, edgecolors = 'k', facecolors = 'none')
ax.plot(smoothed[:,0],smoothed[:,1],color = 'r')
ax.set_ylabel('Studentized Residuals')
ax.set_xlabel('Leverage')
ax.set_title('Residuals vs. Leverage')
ax.set_ylim(min(y)-min(y)*0.15,max(y)+max(y)*0.15)
ax.set_xlim(-0.01,max(x)+max(x)*0.05)
plt.tight_layout()
for val in top3.index:
ax.annotate(val,xy=(x.loc[val],y.loc[val]))
cooksx = np.linspace(min(x), xpos, 50)
p = len(results.params)
poscooks1y = np.sqrt((p*(1-cooksx))/cooksx)
poscooks05y = np.sqrt(0.5*(p*(1-cooksx))/cooksx)
negcooks1y = -np.sqrt((p*(1-cooksx))/cooksx)
negcooks05y = -np.sqrt(0.5*(p*(1-cooksx))/cooksx)
ax.plot(cooksx,poscooks1y,label = "Cook's Distance", ls = ':', color = 'r')
ax.plot(cooksx,poscooks05y, ls = ':', color = 'r')
ax.plot(cooksx,negcooks1y, ls = ':', color = 'r')
ax.plot(cooksx,negcooks05y, ls = ':', color = 'r')
ax.plot([0,0],ax.get_ylim(), ls=":", alpha = .3, color = 'k')
ax.plot(ax.get_xlim(), [0,0], ls=":", alpha = .3, color = 'k')
ax.annotate('1.0', xy = (xpos, poscooks1y[-1]), color = 'r')
ax.annotate('0.5', xy = (xpos, poscooks05y[-1]), color = 'r')
ax.annotate('1.0', xy = (xpos, negcooks1y[-1]), color = 'r')
ax.annotate('0.5', xy = (xpos, negcooks05y[-1]), color = 'r')
ax.legend()
plt.show()
else:
axes.scatter(x, y, edgecolors = 'k', facecolors = 'none')
axes.plot(smoothed[:,0],smoothed[:,1],color = 'r')
axes.set_ylabel('Studentized Residuals')
axes.set_xlabel('Leverage')
axes.set_title('Residuals vs. Leverage')
axes.set_ylim(min(y)-min(y)*0.15,max(y)+max(y)*0.15)
axes.set_xlim(-0.01,max(x)+max(x)*0.05)
plt.tight_layout()
for val in top3.index:
axes.annotate(val,xy=(x.loc[val],y.loc[val]))
cooksx = np.linspace(min(x), xpos, 50)
p = len(results.params)
poscooks1y = np.sqrt((p*(1-cooksx))/cooksx)
poscooks05y = np.sqrt(0.5*(p*(1-cooksx))/cooksx)
negcooks1y = -np.sqrt((p*(1-cooksx))/cooksx)
negcooks05y = -np.sqrt(0.5*(p*(1-cooksx))/cooksx)
axes.plot(cooksx,poscooks1y,label = "Cook's Distance", ls = ':', color = 'r')
axes.plot(cooksx,poscooks05y, ls = ':', color = 'r')
axes.plot(cooksx,negcooks1y, ls = ':', color = 'r')
axes.plot(cooksx,negcooks05y, ls = ':', color = 'r')
axes.plot([0,0],axes.get_ylim(), ls=":", alpha = .3, color = 'k')
axes.plot(axes.get_xlim(), [0,0], ls=":", alpha = .3, color = 'k')
axes.annotate('1.0', xy = (xpos, poscooks1y[-1]), color = 'r')
axes.annotate('0.5', xy = (xpos, poscooks05y[-1]), color = 'r')
axes.annotate('1.0', xy = (xpos, negcooks1y[-1]), color = 'r')
axes.annotate('0.5', xy = (xpos, negcooks05y[-1]), color = 'r')
axes.legend() | 8,774 | 45.184211 | 120 | py |
FJLT | FJLT-master/src/fjlt.py | import numpy as np
import fht #Fast hadamard transform. https://github.com/nbarbey/fht
from scipy import sparse
import numpy.random as npr
import math
def approx_bound(eps, n):
return int(2 / eps ** 2 * math.log(n) + 1.0)
def fast_sample(n, sample_size):
swap_records = {}
sample_wor = np.empty(sample_size, dtype=int)
for i in xrange(sample_size):
rand_ix = npr.randint(i, n)
if i in swap_records:
el1 = swap_records[i]
else:
el1 = i
if rand_ix in swap_records:
el2 = swap_records[rand_ix]
else:
el2 = rand_ix
swap_records[rand_ix] = el1
sample_wor[i] = el2
if i in swap_records:
del swap_records[i]
return sample_wor
def nextPow(d_act):
d_act = d_act - 1
d_act |= d_act >> 1
d_act |= d_act >> 2
d_act |= d_act >> 4
d_act |= d_act >> 8
d_act |= d_act >> 16
d_act += 1
return d_act
def fjlt(A, k, q):
(d, n) = A.shape
#Calculate the next power of 2
d_act = nextPow(d)
sc_ft = np.sqrt(d_act / float(d * k))
#Calculate D plus some constansts
D = npr.randint(0, 2, size=(d, 1)) * 2 * sc_ft - sc_ft
DA = np.zeros((d_act, n))
DA[0:d, :] = A * D
#Apply hadamard transform to each row
hda = np.apply_along_axis(fht.fht, 0, DA)
#Apply P transform
sample_size = npr.binomial(k * d, q)
indc = fast_sample(k * d, sample_size)
p_rows, p_cols = np.unravel_index(indc, (k, d))
p_data = npr.normal(loc=0, scale=math.sqrt(1/q), size=len(p_rows))
P = sparse.csr_matrix((p_data, (p_rows, p_cols)), shape=(k, d_act))
return P.dot(hda)
def fjlt_usp(A, k):
(d, n) = A.shape
#Calculate the next power of 2
d_act = nextPow(d)
sc_ft = np.sqrt(d_act / float(d * k))
#Calculate D plus some constansts
D = npr.randint(0, 2, size=(d, 1)) * 2 * sc_ft - sc_ft
DA = np.zeros((d_act, n))
DA[0:d, :] = A * D
#Apply hadamard transform to each row
hda = np.apply_along_axis(fht.fht, 0, DA)
#Apply P transform
p_cols = fast_sample(d, k)
p_rows = np.array(range(k))
p_data = npr.randint(0, 2, size=k) * 2 - 1
P = sparse.csr_matrix((p_data, (p_rows, p_cols)), shape=(k, d_act))
return P.dot(hda)
| 2,319 | 25.666667 | 71 | py |
exatn | exatn-master/src/exatn/tests/hubbard.py | import numpy as np
from openfermion import *
import openfermion.ops as ops
import openfermion.hamiltonians as hams
import openfermion.transforms as trans
import openfermion.linalg as linalg
hubb = hams.fermi_hubbard(2,2,1,1)
hubb_jw = trans.jordan_wigner(hubb)
print(hubb_jw)
hubb_matrix = linalg.get_sparse_operator(hubb).A
ground_state = linalg.get_ground_state(linalg.get_sparse_operator(hubb))
print(ground_state)
spectrum = linalg.eigenspectrum(hubb)
print(spectrum)
| 475 | 25.444444 | 72 | py |
exatn | exatn-master/python/exatn.py | from _pyexatn import *
import argparse
def parse_args(args):
parser = argparse.ArgumentParser(description="ExaTN Python Framework.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
fromfile_prefix_chars='@')
parser.add_argument("-L", "--location", action='store_true',
help="Print the path to the ExaTN install location.", required=False)
opts = parser.parse_args(args)
return opts
Initialize()
def _finalize():
Finalize()
import atexit
atexit.register(_finalize)
def main(argv=None):
opts = parse_args(sys.argv[1:])
exatnLocation = os.path.dirname(os.path.realpath(__file__))
if opts.location:
print(exatnLocation)
sys.exit(0)
if __name__ == "__main__":
sys.exit(main())
| 836 | 26.9 | 93 | py |
exatn | exatn-master/python/examples/large_circuit.py | import sys
from pathlib import Path
sys.path.insert(1, str(Path.home()) + '/.exatn')
import exatn, numpy as np
qzero = np.array([1.0, 0.0], dtype=complex)
hadamard = np.array([[1., 1.],[1., -1.]], dtype=complex)
nQubits = 10
[exatn.createTensor('Q'+str(i), qzero) for i in range(nQubits)]
exatn.createTensor('H', hadamard)
exatn.registerTensorIsometry('H', [0], [1])
tensorCounter = 1
circuit = exatn.TensorNetwork('QuantumCircuit')
[circuit.appendTensor(tensorCounter+c, 'Q'+str(i)) for i,c in enumerate(range(nQubits))]
tensorCounter += nQubits
qubitReg = exatn.TensorNetwork(circuit)
qubitReg.rename('QubitKet')
[circuit.appendTensorGate(tensorCounter+c, 'H', [i]) for i, c in enumerate(range(nQubits))]
tensorCounter += nQubits
circuit.printIt()
inverse = exatn.TensorNetwork(circuit)
inverse.rename('InverseCircuit')
[inverse.appendTensorGate(tensorCounter+c, 'H', [nQubits - i - 1], True) for i,c in enumerate(range(nQubits))]
tensorCounter += nQubits
assert(inverse.collapseIsometries())
inverse.printIt()
bra = qubitReg
bra.conjugate()
bra.rename('QubitBra')
pairings = [[i,i] for i in range(nQubits)]
inverse.appendTensorNetwork(bra, pairings)
inverse.printIt()
assert(inverse.getRank() == 0)
assert(exatn.evaluate(inverse))
print(exatn.getLocalTensor(inverse.getTensor(0).getName())) | 1,309 | 25.2 | 110 | py |
exatn | exatn-master/python/examples/test_exatn_interface_data_from_numpy_dot_product.py | import exatn, numpy as np
# Create some numpy arrays
s = np.random.rand(2)
r = np.random.rand(2)
# Create the ExaTN tensors
exatn.createTensor("Z0", 0.0)
exatn.createTensor("S", s)
exatn.createTensor("R", r)
# Print S, R
exatn.print("S")
exatn.print("R")
# Demonstrate transformTensor interface by
# negating the tensor data in S
def negate(data):
data *= -1.
exatn.transformTensor("S", negate)
exatn.print("S")
# Compute the dot product, store in Z0
exatn.evaluateTensorNetwork('MyTN', 'Z0() = S(a) * R(a)')
exatn.print('Z0')
z0_data = exatn.getLocalTensor('Z0')
# Compare to what numpy would have gotten
print(np.dot(-s,r))
print('z0 = ', z0_data)
print(exatn.getLocalTensor('S'))
print(exatn.getLocalTensor('S').shape)
# Clean up and destroy
exatn.destroyTensor("S")
exatn.destroyTensor("R")
exatn.destroyTensor("Z0")
| 833 | 20.384615 | 57 | py |
exatn | exatn-master/python/examples/benchmark.py | import sys, os, re, time
from pathlib import Path
sys.path.insert(1, str(Path.home()) + '/.exatn')
import exatn
import numpy as np
dir_path = os.path.dirname(os.path.realpath(__file__))
inputFile = open(dir_path + '/resources/test-cases.txt', 'r')
count = 0
# Returns an array of tensor dimension from its string expression
# and a map of indices to dimensions
def getTensorDimArray(tensorStr, varDimMap):
dimStr = tensorStr[2:-1]
dimVars = dimStr.split(",")
result = []
for var in dimVars:
result.append(int(varDimMap[var]))
return result
while True:
count += 1
gFlops = 2.0/1e9
# Get next line from file
line = inputFile.readline()
# if line is empty
# end of file is reached
if not line:
break
expr, dimVars = line.strip().split(" & ")
# Remove the last ';'
dimVars = dimVars[:-1]
dimVarList = dimVars.split("; ")
vardict = {}
for var in dimVarList:
varName, varVal = var.split(":")
vardict[varName] = varVal
gFlops *= float(varVal)
exprSplits = re.split(' = | * ', expr)
rhsTensor = exprSplits[0]
lhsTensorOperand1 = exprSplits[1]
lhsTensorOperand2 = exprSplits[3]
#print("{}:{}:{}".format(rhsTensor, lhsTensorOperand1, lhsTensorOperand2))
# LHS (result) 'C' tensor
assert(rhsTensor[0] == "C")
exatn.createTensor("C", getTensorDimArray(rhsTensor, vardict), 0.0)
# RHS A tensor
assert(lhsTensorOperand1[0] == "A")
exatn.createTensor("A", getTensorDimArray(lhsTensorOperand1, vardict), 0.0)
# Initialize random tensor body
exatn.initTensorRnd("A")
# RHS B tensor
assert(lhsTensorOperand2[0] == "B")
exatn.createTensor("B", getTensorDimArray(lhsTensorOperand2, vardict), 0.0)
# Initialize random tensor body
exatn.initTensorRnd("B")
# Convert [] to () (ExaTN convention)
exatnExpr = expr.replace('[','(').replace(']',')')
# Evaluate by tensor contraction
start = time.process_time()
contractOk = exatn.contractTensors(exatnExpr)
elapsedTime = time.process_time() - start
assert(contractOk is True)
# Destroy tensors
exatn.destroyTensor("A")
exatn.destroyTensor("B")
exatn.destroyTensor("C")
# Calc. GFlops/sec
gFlops = gFlops/elapsedTime
print("Test {}: {} ({}) || Time elapsed: {} [sec]; GFlops/sec: {}".format(count, expr, dimVars, elapsedTime, gFlops))
# Compare with numpy
sizeA = getTensorDimArray(lhsTensorOperand1, vardict)
sizeB = getTensorDimArray(lhsTensorOperand2, vardict)
A = np.empty(sizeA, dtype = np.float64)
B = np.empty(sizeB, dtype = np.float64)
randA = np.random.randn(*A.shape)
randB = np.random.randn(*B.shape)
indA = lhsTensorOperand1[2:-1]
indB = lhsTensorOperand2[2:-1]
indC = rhsTensor[2:-1]
npStart = time.process_time()
# Use numpy einsum to perform tensor contraction
C_ = np.einsum("%s,%s->%s"%(indA.replace(',',''), indB.replace(',',''), indC.replace(',','')), randA, randB)
npElapsedTime = time.process_time() - start
print(" ==> Test {}: Numpy time elapsed: {} [sec]; ExaTN speed-up: {}".format(count, npElapsedTime, npElapsedTime/elapsedTime))
inputFile.close()
| 3,245 | 32.463918 | 133 | py |
exatn | exatn-master/python/examples/contraction.py | import sys
from pathlib import Path
sys.path.insert(1, str(Path.home()) + '/.exatn')
import exatn, numpy as np
def test_exatn():
a1 = np.array([
[1., 0., 0.],
[0., 1., 1.]])
print('A1 shape: ',a1.shape)
b1 = np.array([
[ 1., 0., 3., 0.],
[ 1., 1., 2., 2.],
[-1., 1., -2., 2.]])
print('B1 shape: ',b1.shape)
exatn.createTensor('C1', [2, 4], 0.0)
exatn.createTensor('A1', a1.copy(order='F'))
exatn.createTensor('B1', b1.copy(order='F'))
exatn.contractTensors('C1(a,c)=A1(a,b)*B1(b,c)',1.0)
c1 = exatn.getLocalTensor('C1')
print('C1 shape: ',c1.shape)
d1 = np.dot(a1, b1)
print('D1 shape: ',d1.shape)
print('NumPy result c1 = a1 * b1:\n', d1)
print('ExaTN result c1 = a1 * b1:\n', c1)
exatn.destroyTensor('B1')
exatn.destroyTensor('A1')
exatn.destroyTensor('C1')
test_exatn()
| 897 | 22.631579 | 56 | py |
exatn | exatn-master/python/examples/simple.py | import sys
from pathlib import Path
sys.path.insert(1, str(Path.home()) + '/.exatn')
import exatn
exatn.createTensor('Z0')
exatn.createTensor('T0', [2,2], .01)
exatn.createTensor('T1', [2,2,2], .01)
exatn.createTensor('T2', [2,2], .01)
exatn.createTensor('H0', [2,2,2,2], .01)
exatn.createTensor('S0', [2,2], .01)
exatn.createTensor('S1', [2,2,2], .01)
exatn.createTensor('S2', [2,2], .01)
exatn.evaluateTensorNetwork('{0,1} 3-site MPS closure', 'Z0() = T0(a,b) * T1(b,c,d) * T2(d,e) * H0(a,c,f,g) * S0(f,h) * S1(h,g,i) * S2(i,e)')
z0 = exatn.getLocalTensor('Z0')
assert(abs(z0 - 5.12e-12) < 1e-12)
print(z0) | 611 | 31.210526 | 141 | py |
exatn | exatn-master/python/examples/hamiltonian.py | import sys
from pathlib import Path
sys.path.insert(1, str(Path.home()) + '/.exatn')
import exatn, numpy as np
# Declare MPS tensors:
exatn.createTensor('Q0', [2,2], 1e-2)
exatn.createTensor('Q1', [2,2,4], 1e-2)
exatn.createTensor('Q2', [4,2,2], 1e-2)
exatn.createTensor('Q3', [2,2], 1e-2)
# Declare Hamiltonian Tensors
exatn.createTensor('H01', [2,2,2,2], 1e-2)
exatn.createTensor('H12', [2,2,2,2], 1e-2)
exatn.createTensor('H23', [2,2,2,2], 1e-2)
exatn.createTensor('Z0', [2,2,2,2], 1e-2)
# Get them as exatn.Tensor
q0 = exatn.getTensor('Q0')
q1 = exatn.getTensor('Q1')
q2 = exatn.getTensor('Q2')
q3 = exatn.getTensor('Q3')
h01 = exatn.getTensor('H01')
h12 = exatn.getTensor('H12')
h23 = exatn.getTensor('H23')
z0 = exatn.getTensor('Z0')
# Declare the Hamiltonian Operator
ham = exatn.TensorOperator('Hamiltonian')
ham.appendComponent(h01, [[0,0],[1,1]], [[0,2],[1,3]], 1.0)
ham.appendComponent(h12, [[1,0],[2,1]], [[1,2],[2,3]], 1.0)
ham.appendComponent(h23, [[2,0],[3,1]], [[2,2],[3,3]], 1.0)
# Declare the ket MPS tensor network:
# Q0----Q1----Q2----Q3
# | | | |
mps_ket = exatn.TensorNetwork('MPS', 'Z0(i0,i1,i2,i3)+=Q0(i0,j0)*Q1(j0,i1,j1)*Q2(j1,i2,j2)*Q3(j2,i3)', {'Z0':z0, 'Q0':q0, 'Q1':q1, 'Q2':q2, 'Q3':q3})
# Declare the ket tensor network expansion:
# Q0----Q1----Q2----Q3
# | | | |
ket = exatn.TensorExpansion()
ket.appendComponent(mps_ket, 1.0)
ket.rename('MPSket')
# Declare the bra tensor network expansion (conjugated ket):
# | | | |
# Q0----Q1----Q2----Q3
bra = exatn.TensorExpansion(ket)
bra.conjugate()
bra.rename('MPSbra')
# Declare the operator times ket product tensor expansion:
# Q0----Q1----Q2----Q3 Q0----Q1----Q2----Q3 Q0----Q1----Q2----Q3
# | | | | | | | | | | | |
# ==H01== | | + | ==H12== | + | | ==H23==
# | | | | | | | | | | | |
ham_ket = exatn.TensorExpansion(ket, ham)
ham_ket.rename('HamMPSket')
# Declare the full closed product tensor expansion (scalar):
# Q0----Q1----Q2----Q3 Q0----Q1----Q2----Q3 Q0----Q1----Q2----Q3
# | | | | | | | | | | | |
# ==H01== | | + | ==H12== | + | | ==H23== => AC0()
# | | | | | | | | | | | |
# Q0----Q1----Q2----Q3 Q0----Q1----Q2----Q3 Q0----Q1----Q2----Q3
closed_prod = exatn.TensorExpansion(ham_ket, bra)
closed_prod.rename('MPSbraHamMPSket')
closed_prod.printIt()
# Declare the derivative tensor expansion with respect to tensor Q1+:
# Q0----Q1----Q2----Q3 Q0----Q1----Q2----Q3 Q0----Q1----Q2----Q3
# | | | | | | | | | | | |
# ==H01== | | + | ==H12== | + | | ==H23==
# | | | | | | | | | | | |
# Q0-- --Q2----Q3 Q0-- --Q2----Q3 Q0-- --Q2----Q3
deriv_q1 = exatn.TensorExpansion(closed_prod, 'Q1', True)
deriv_q1.rename('DerivativeQ1')
# Create the Accumulator tensor for the closed tensor expansion:
exatn.createTensor('AC0')
accumulator0 = exatn.getTensor('AC0')
exatn.createTensor('AC1',[2,2,4], 0.0)
accumulator1 = exatn.getTensor('AC1')
# Evaluate the expectation value:
exatn.evaluate(closed_prod, accumulator0)
# Evaluate the derivative of the expectation value w.r.t. tensor Q1:
exatn.evaluate(deriv_q1, accumulator1)
# Print the expectation values
print(exatn.getLocalTensor('AC0'))
[print(exatn.getLocalTensor(c.network.getTensor(0).getName())) for c in closed_prod] | 3,592 | 37.223404 | 149 | py |
exatn | exatn-master/python/examples/circuit_conjugate.py | import sys
from pathlib import Path
sys.path.insert(1, str(Path.home()) + '/.exatn')
import exatn, numpy as np
qzero = np.array([1.0, 0.0], dtype=complex)
unitary = np.reshape(np.array([1,-1j,-1j,1], dtype=complex), (2,2))
exatn.createTensor('Q0', qzero)
exatn.createTensor('U', unitary)
exatn.registerTensorIsometry('U', [0], [1])
circuit = exatn.TensorNetwork('QuantumCircuit')
circuit.appendTensor(1, 'Q0')
circuit.appendTensorGate(2, 'U', [0])
circuit.printIt()
conj_circuit = exatn.TensorNetwork(circuit)
conj_circuit.rename('ConjugatedCircuit')
conj_circuit.conjugate()
conj_circuit.printIt()
assert(exatn.evaluate(circuit))
assert(exatn.evaluate(conj_circuit))
print(exatn.getLocalTensor(circuit.getTensor(0).getName()))
print(exatn.getLocalTensor(conj_circuit.getTensor(0).getName()))
| 800 | 26.62069 | 67 | py |
exatn | exatn-master/python/examples/tensor_basic.py | import sys
from pathlib import Path
sys.path.insert(1, str(Path.home()) + '/.exatn')
import exatn
import numpy as np
# Demonstrate simple tensor network manipulation
exatn.createTensor('X', [2, 2], 0)
exatn.createTensor('Y', [2, 2], 0)
exatn.createTensor('Z', [2, 2], 0)
exatn.initTensorRnd('X')
exatn.initTensorRnd('Y')
exatn.initTensorRnd('Z')
tNet = exatn.TensorNetwork('test')
tNet.appendTensor(1, 'X')
tNet.appendTensor(2, 'Y')
tNet.appendTensor(3, 'Z')
# print tensor network
tNet.printIt()
tNetOriginal = exatn.TensorNetwork(tNet)
# Merge X and Y
pattern = tNet.mergeTensors(1, 2, 4)
print("After merge:")
tNet.printIt()
# Print the generic merge pattern
print(pattern)
# Create the merged tensor
pattern = pattern.replace("D", tNet.getTensor(4).getName())
pattern = pattern.replace("L", "X")
pattern = pattern.replace("R", "Y")
print(pattern)
# Perform calculation
exatn.createTensor(tNet.getTensor(4))
exatn.contractTensors(pattern)
# Evaluate the tensor network (after merging two tensors)
exatn.evaluate(tNet)
# Print root tensor
root = exatn.getLocalTensor(tNet.getTensor(0).getName())
print(root)
# Evaluate the *Original* network to make sure it is the same.
tNetOriginal.printIt()
exatn.evaluate(tNetOriginal)
rootOriginal = exatn.getLocalTensor(tNetOriginal.getTensor(0).getName())
print(rootOriginal)
assert(np.allclose(root, rootOriginal))
| 1,367 | 24.333333 | 72 | py |
exatn | exatn-master/python/examples/hypercontraction.py | import exatn, numpy as np
def test_exatn():
a1 = np.random.randn(2,3,4)
print('A1 shape: ',a1.shape)
b1 = np.random.randn(2,3,4,5)
print('B1 shape: ',b1.shape)
exatn.createTensor('C1', [3, 4, 5], 0.0)
exatn.createTensor('A1', a1.copy(order='F'))
exatn.createTensor('B1', b1.copy(order='F'))
exatn.contractTensors('C1(b,c,d)=A1(a,b,c)*B1(a,b,c,d)',1.0)
c1 = exatn.getLocalTensor('C1')
print('C1 shape: ',c1.shape)
print('ExaTN result c1 = a1 * b1:\n', c1)
d1 = np.einsum('abc, abcd->bcd', a1, b1)
print('D1 shape: ',d1.shape)
print('NumPy result c1 = a1 * b1:\n', d1)
exatn.destroyTensor('B1')
exatn.destroyTensor('A1')
exatn.destroyTensor('C1')
test_exatn()
| 735 | 23.533333 | 64 | py |
exatn | exatn-master/python/examples/test_creation_from_array_print_and_transform.py | import exatn, numpy as np
exatn.createTensor("Sx", np.array([[0.,1.],[1.,0.]]))
exatn.createTensor("Sy", np.array([[0.,-1.j],[1.j,0.]]))
exatn.print("Sx")
exatn.print("Sy")
def negate(data):
data *= -1.
exatn.transformTensor("Sx", negate)
exatn.print("Sx")
exatn.transformTensor("Sx", negate)
exatn.print("Sx")
exatn.transformTensor("Sy", negate)
exatn.print("Sy")
| 374 | 18.736842 | 56 | py |
exatn | exatn-master/python/examples/quantum_circuit_network.py | import sys
from pathlib import Path
sys.path.insert(1, str(Path.home()) + '/.exatn')
import exatn, numpy as np
#Quantum Circuit:
#Q0----H---------
#Q1----H----C----
#Q2----H----N----
#Define the initial qubit state vector:
qzero = np.array([1.0, 0.0], dtype=complex)
hadamard = np.array([[1., 1.],[1., -1.]], dtype=complex)
cnot = np.reshape(np.array([[1, 0, 0, 0],[0, 1, 0, 0],[0, 0, 0, 1],[0, 0, 1, 0]], dtype=complex), (2,2,2,2))
exatn.createTensor('Q0', qzero)
exatn.createTensor('Q1', qzero)
exatn.createTensor('Q2', qzero)
exatn.createTensor('H', hadamard)
exatn.createTensor('CNOT', cnot)
exatn.registerTensorIsometry('H', [0], [1])
exatn.registerTensorIsometry('CNOT', [0,1], [2,3])
circuit = exatn.TensorNetwork('QuantumCircuit')
circuit.appendTensor(1, 'Q0')
circuit.appendTensor(2, 'Q1')
circuit.appendTensor(3, 'Q2')
circuit.appendTensorGate(4, 'H', [0])
circuit.appendTensorGate(5, 'H', [1])
circuit.appendTensorGate(6, 'H', [2])
circuit.appendTensorGate(7, 'CNOT', [1,2])
circuit.printIt()
inverse = exatn.TensorNetwork(circuit)
inverse.rename('InverseCircuit')
inverse.appendTensorGate(8, 'CNOT', [1,2], True)
inverse.appendTensorGate(9, 'H', [2], True)
inverse.appendTensorGate(10, 'H', [1], True)
inverse.appendTensorGate(11, 'H', [0], True)
assert(inverse.collapseIsometries())
inverse.printIt()
assert(exatn.evaluate(circuit)) | 1,359 | 25.666667 | 108 | py |
RAFT | RAFT-master/evaluate.py | import sys
sys.path.append('core')
from PIL import Image
import argparse
import os
import time
import numpy as np
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import datasets
from utils import flow_viz
from utils import frame_utils
from raft import RAFT
from utils.utils import InputPadder, forward_interpolate
@torch.no_grad()
def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'):
""" Create submission for the Sintel leaderboard """
model.eval()
for dstype in ['clean', 'final']:
test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype)
flow_prev, sequence_prev = None, None
for test_id in range(len(test_dataset)):
image1, image2, (sequence, frame) = test_dataset[test_id]
if sequence != sequence_prev:
flow_prev = None
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
flow_low, flow_pr = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True)
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
if warm_start:
flow_prev = forward_interpolate(flow_low[0])[None].cuda()
output_dir = os.path.join(output_path, dstype, sequence)
output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame+1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frame_utils.writeFlow(output_file, flow)
sequence_prev = sequence
@torch.no_grad()
def create_kitti_submission(model, iters=24, output_path='kitti_submission'):
""" Create submission for the Sintel leaderboard """
model.eval()
test_dataset = datasets.KITTI(split='testing', aug_params=None)
if not os.path.exists(output_path):
os.makedirs(output_path)
for test_id in range(len(test_dataset)):
image1, image2, (frame_id, ) = test_dataset[test_id]
padder = InputPadder(image1.shape, mode='kitti')
image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
_, flow_pr = model(image1, image2, iters=iters, test_mode=True)
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
output_filename = os.path.join(output_path, frame_id)
frame_utils.writeFlowKITTI(output_filename, flow)
@torch.no_grad()
def validate_chairs(model, iters=24):
""" Perform evaluation on the FlyingChairs (test) split """
model.eval()
epe_list = []
val_dataset = datasets.FlyingChairs(split='validation')
for val_id in range(len(val_dataset)):
image1, image2, flow_gt, _ = val_dataset[val_id]
image1 = image1[None].cuda()
image2 = image2[None].cuda()
_, flow_pr = model(image1, image2, iters=iters, test_mode=True)
epe = torch.sum((flow_pr[0].cpu() - flow_gt)**2, dim=0).sqrt()
epe_list.append(epe.view(-1).numpy())
epe = np.mean(np.concatenate(epe_list))
print("Validation Chairs EPE: %f" % epe)
return {'chairs': epe}
@torch.no_grad()
def validate_sintel(model, iters=32):
""" Peform validation using the Sintel (train) split """
model.eval()
results = {}
for dstype in ['clean', 'final']:
val_dataset = datasets.MpiSintel(split='training', dstype=dstype)
epe_list = []
for val_id in range(len(val_dataset)):
image1, image2, flow_gt, _ = val_dataset[val_id]
image1 = image1[None].cuda()
image2 = image2[None].cuda()
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1, image2)
flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
flow = padder.unpad(flow_pr[0]).cpu()
epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
epe_list.append(epe.view(-1).numpy())
epe_all = np.concatenate(epe_list)
epe = np.mean(epe_all)
px1 = np.mean(epe_all<1)
px3 = np.mean(epe_all<3)
px5 = np.mean(epe_all<5)
print("Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f" % (dstype, epe, px1, px3, px5))
results[dstype] = np.mean(epe_list)
return results
@torch.no_grad()
def validate_kitti(model, iters=24):
""" Peform validation using the KITTI-2015 (train) split """
model.eval()
val_dataset = datasets.KITTI(split='training')
out_list, epe_list = [], []
for val_id in range(len(val_dataset)):
image1, image2, flow_gt, valid_gt = val_dataset[val_id]
image1 = image1[None].cuda()
image2 = image2[None].cuda()
padder = InputPadder(image1.shape, mode='kitti')
image1, image2 = padder.pad(image1, image2)
flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
flow = padder.unpad(flow_pr[0]).cpu()
epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
mag = torch.sum(flow_gt**2, dim=0).sqrt()
epe = epe.view(-1)
mag = mag.view(-1)
val = valid_gt.view(-1) >= 0.5
out = ((epe > 3.0) & ((epe/mag) > 0.05)).float()
epe_list.append(epe[val].mean().item())
out_list.append(out[val].cpu().numpy())
epe_list = np.array(epe_list)
out_list = np.concatenate(out_list)
epe = np.mean(epe_list)
f1 = 100 * np.mean(out_list)
print("Validation KITTI: %f, %f" % (epe, f1))
return {'kitti-epe': epe, 'kitti-f1': f1}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help="restore checkpoint")
parser.add_argument('--dataset', help="dataset for evaluation")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
args = parser.parse_args()
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model))
model.cuda()
model.eval()
# create_sintel_submission(model.module, warm_start=True)
# create_kitti_submission(model.module)
with torch.no_grad():
if args.dataset == 'chairs':
validate_chairs(model.module)
elif args.dataset == 'sintel':
validate_sintel(model.module)
elif args.dataset == 'kitti':
validate_kitti(model.module)
| 6,618 | 32.429293 | 112 | py |
RAFT | RAFT-master/demo.py | import sys
sys.path.append('core')
import argparse
import os
import cv2
import glob
import numpy as np
import torch
from PIL import Image
from raft import RAFT
from utils import flow_viz
from utils.utils import InputPadder
DEVICE = 'cuda'
def load_image(imfile):
img = np.array(Image.open(imfile)).astype(np.uint8)
img = torch.from_numpy(img).permute(2, 0, 1).float()
return img[None].to(DEVICE)
def viz(img, flo):
img = img[0].permute(1,2,0).cpu().numpy()
flo = flo[0].permute(1,2,0).cpu().numpy()
# map flow to rgb image
flo = flow_viz.flow_to_image(flo)
img_flo = np.concatenate([img, flo], axis=0)
# import matplotlib.pyplot as plt
# plt.imshow(img_flo / 255.0)
# plt.show()
cv2.imshow('image', img_flo[:, :, [2,1,0]]/255.0)
cv2.waitKey()
def demo(args):
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model))
model = model.module
model.to(DEVICE)
model.eval()
with torch.no_grad():
images = glob.glob(os.path.join(args.path, '*.png')) + \
glob.glob(os.path.join(args.path, '*.jpg'))
images = sorted(images)
for imfile1, imfile2 in zip(images[:-1], images[1:]):
image1 = load_image(imfile1)
image2 = load_image(imfile2)
padder = InputPadder(image1.shape)
image1, image2 = padder.pad(image1, image2)
flow_low, flow_up = model(image1, image2, iters=20, test_mode=True)
viz(image1, flow_up)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help="restore checkpoint")
parser.add_argument('--path', help="dataset for evaluation")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
args = parser.parse_args()
demo(args)
| 2,073 | 26.289474 | 112 | py |
RAFT | RAFT-master/train.py | from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from raft import RAFT
import evaluate
import datasets
from torch.utils.tensorboard import SummaryWriter
try:
from torch.cuda.amp import GradScaler
except:
# dummy GradScaler for PyTorch < 1.6
class GradScaler:
def __init__(self):
pass
def scale(self, loss):
return loss
def unscale_(self, optimizer):
pass
def step(self, optimizer):
optimizer.step()
def update(self):
pass
# exclude extremly large displacements
MAX_FLOW = 400
SUM_FREQ = 100
VAL_FREQ = 5000
def sequence_loss(flow_preds, flow_gt, valid, gamma=0.8, max_flow=MAX_FLOW):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds)
flow_loss = 0.0
# exlude invalid pixels and extremely large diplacements
mag = torch.sum(flow_gt**2, dim=1).sqrt()
valid = (valid >= 0.5) & (mag < max_flow)
for i in range(n_predictions):
i_weight = gamma**(n_predictions - i - 1)
i_loss = (flow_preds[i] - flow_gt).abs()
flow_loss += i_weight * (valid[:, None] * i_loss).mean()
epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
epe = epe.view(-1)[valid.view(-1)]
metrics = {
'epe': epe.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
}
return flow_loss, metrics
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100,
pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
return optimizer, scheduler
class Logger:
def __init__(self, model, scheduler):
self.model = model
self.scheduler = scheduler
self.total_steps = 0
self.running_loss = {}
self.writer = None
def _print_training_status(self):
metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0])
metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
# print the training status
print(training_str + metrics_str)
if self.writer is None:
self.writer = SummaryWriter()
for k in self.running_loss:
self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps)
self.running_loss[k] = 0.0
def push(self, metrics):
self.total_steps += 1
for key in metrics:
if key not in self.running_loss:
self.running_loss[key] = 0.0
self.running_loss[key] += metrics[key]
if self.total_steps % SUM_FREQ == SUM_FREQ-1:
self._print_training_status()
self.running_loss = {}
def write_dict(self, results):
if self.writer is None:
self.writer = SummaryWriter()
for key in results:
self.writer.add_scalar(key, results[key], self.total_steps)
def close(self):
self.writer.close()
def train(args):
model = nn.DataParallel(RAFT(args), device_ids=args.gpus)
print("Parameter Count: %d" % count_parameters(model))
if args.restore_ckpt is not None:
model.load_state_dict(torch.load(args.restore_ckpt), strict=False)
model.cuda()
model.train()
if args.stage != 'chairs':
model.module.freeze_bn()
train_loader = datasets.fetch_dataloader(args)
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = 0
scaler = GradScaler(enabled=args.mixed_precision)
logger = Logger(model, scheduler)
VAL_FREQ = 5000
add_noise = True
should_keep_training = True
while should_keep_training:
for i_batch, data_blob in enumerate(train_loader):
optimizer.zero_grad()
image1, image2, flow, valid = [x.cuda() for x in data_blob]
if args.add_noise:
stdv = np.random.uniform(0.0, 5.0)
image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0)
image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0)
flow_predictions = model(image1, image2, iters=args.iters)
loss, metrics = sequence_loss(flow_predictions, flow, valid, args.gamma)
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
scaler.step(optimizer)
scheduler.step()
scaler.update()
logger.push(metrics)
if total_steps % VAL_FREQ == VAL_FREQ - 1:
PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name)
torch.save(model.state_dict(), PATH)
results = {}
for val_dataset in args.validation:
if val_dataset == 'chairs':
results.update(evaluate.validate_chairs(model.module))
elif val_dataset == 'sintel':
results.update(evaluate.validate_sintel(model.module))
elif val_dataset == 'kitti':
results.update(evaluate.validate_kitti(model.module))
logger.write_dict(results)
model.train()
if args.stage != 'chairs':
model.module.freeze_bn()
total_steps += 1
if total_steps > args.num_steps:
should_keep_training = False
break
logger.close()
PATH = 'checkpoints/%s.pth' % args.name
torch.save(model.state_dict(), PATH)
return PATH
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='raft', help="name your experiment")
parser.add_argument('--stage', help="determines which dataset to use for training")
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--validation', type=str, nargs='+')
parser.add_argument('--lr', type=float, default=0.00002)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--gpus', type=int, nargs='+', default=[0,1])
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--iters', type=int, default=12)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
parser.add_argument('--add_noise', action='store_true')
args = parser.parse_args()
torch.manual_seed(1234)
np.random.seed(1234)
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
train(args) | 7,987 | 31.340081 | 103 | py |
RAFT | RAFT-master/core/corr.py | import torch
import torch.nn.functional as F
from utils.utils import bilinear_sampler, coords_grid
try:
import alt_cuda_corr
except:
# alt_cuda_corr is not compiled
pass
class CorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid = []
# all pairs correlation
corr = CorrBlock.corr(fmap1, fmap2)
batch, h1, w1, dim, h2, w2 = corr.shape
corr = corr.reshape(batch*h1*w1, dim, h2, w2)
self.corr_pyramid.append(corr)
for i in range(self.num_levels-1):
corr = F.avg_pool2d(corr, 2, stride=2)
self.corr_pyramid.append(corr)
def __call__(self, coords):
r = self.radius
coords = coords.permute(0, 2, 3, 1)
batch, h1, w1, _ = coords.shape
out_pyramid = []
for i in range(self.num_levels):
corr = self.corr_pyramid[i]
dx = torch.linspace(-r, r, 2*r+1, device=coords.device)
dy = torch.linspace(-r, r, 2*r+1, device=coords.device)
delta = torch.stack(torch.meshgrid(dy, dx), axis=-1)
centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i
delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2)
coords_lvl = centroid_lvl + delta_lvl
corr = bilinear_sampler(corr, coords_lvl)
corr = corr.view(batch, h1, w1, -1)
out_pyramid.append(corr)
out = torch.cat(out_pyramid, dim=-1)
return out.permute(0, 3, 1, 2).contiguous().float()
@staticmethod
def corr(fmap1, fmap2):
batch, dim, ht, wd = fmap1.shape
fmap1 = fmap1.view(batch, dim, ht*wd)
fmap2 = fmap2.view(batch, dim, ht*wd)
corr = torch.matmul(fmap1.transpose(1,2), fmap2)
corr = corr.view(batch, ht, wd, 1, ht, wd)
return corr / torch.sqrt(torch.tensor(dim).float())
class AlternateCorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.pyramid = [(fmap1, fmap2)]
for i in range(self.num_levels):
fmap1 = F.avg_pool2d(fmap1, 2, stride=2)
fmap2 = F.avg_pool2d(fmap2, 2, stride=2)
self.pyramid.append((fmap1, fmap2))
def __call__(self, coords):
coords = coords.permute(0, 2, 3, 1)
B, H, W, _ = coords.shape
dim = self.pyramid[0][0].shape[1]
corr_list = []
for i in range(self.num_levels):
r = self.radius
fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous()
fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous()
coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous()
corr, = alt_cuda_corr.forward(fmap1_i, fmap2_i, coords_i, r)
corr_list.append(corr.squeeze(1))
corr = torch.stack(corr_list, dim=1)
corr = corr.reshape(B, -1, H, W)
return corr / torch.sqrt(torch.tensor(dim).float())
| 3,085 | 32.543478 | 74 | py |
RAFT | RAFT-master/core/update.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(SepConvGRU, self).__init__()
self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
def forward(self, h, x):
# horizontal
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SmallMotionEncoder(nn.Module):
def __init__(self, args):
super(SmallMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0)
self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
self.convf2 = nn.Conv2d(64, 32, 3, padding=1)
self.conv = nn.Conv2d(128, 80, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class SmallUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=96):
super(SmallUpdateBlock, self).__init__()
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64)
self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
def forward(self, net, inp, corr, flow):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
return net, None, delta_flow
class BasicUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64*9, 1, padding=0))
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = .25 * self.mask(net)
return net, mask, delta_flow
| 5,227 | 36.342857 | 87 | py |
RAFT | RAFT-master/core/extractor.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1:
self.norm3 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)
self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes//4)
self.norm2 = nn.BatchNorm2d(planes//4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes//4)
self.norm2 = nn.InstanceNorm2d(planes//4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if not stride == 1:
self.norm4 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
# output convolution
self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class SmallEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(32)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(32)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
| 8,847 | 32.014925 | 93 | py |
RAFT | RAFT-master/core/datasets.py | # Data loading based on https://github.com/NVIDIA/flownet2-pytorch
import numpy as np
import torch
import torch.utils.data as data
import torch.nn.functional as F
import os
import math
import random
from glob import glob
import os.path as osp
from utils import frame_utils
from utils.augmentor import FlowAugmentor, SparseFlowAugmentor
class FlowDataset(data.Dataset):
def __init__(self, aug_params=None, sparse=False):
self.augmentor = None
self.sparse = sparse
if aug_params is not None:
if sparse:
self.augmentor = SparseFlowAugmentor(**aug_params)
else:
self.augmentor = FlowAugmentor(**aug_params)
self.is_test = False
self.init_seed = False
self.flow_list = []
self.image_list = []
self.extra_info = []
def __getitem__(self, index):
if self.is_test:
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
img1 = np.array(img1).astype(np.uint8)[..., :3]
img2 = np.array(img2).astype(np.uint8)[..., :3]
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
return img1, img2, self.extra_info[index]
if not self.init_seed:
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
torch.manual_seed(worker_info.id)
np.random.seed(worker_info.id)
random.seed(worker_info.id)
self.init_seed = True
index = index % len(self.image_list)
valid = None
if self.sparse:
flow, valid = frame_utils.readFlowKITTI(self.flow_list[index])
else:
flow = frame_utils.read_gen(self.flow_list[index])
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = np.array(flow).astype(np.float32)
img1 = np.array(img1).astype(np.uint8)
img2 = np.array(img2).astype(np.uint8)
# grayscale images
if len(img1.shape) == 2:
img1 = np.tile(img1[...,None], (1, 1, 3))
img2 = np.tile(img2[...,None], (1, 1, 3))
else:
img1 = img1[..., :3]
img2 = img2[..., :3]
if self.augmentor is not None:
if self.sparse:
img1, img2, flow, valid = self.augmentor(img1, img2, flow, valid)
else:
img1, img2, flow = self.augmentor(img1, img2, flow)
img1 = torch.from_numpy(img1).permute(2, 0, 1).float()
img2 = torch.from_numpy(img2).permute(2, 0, 1).float()
flow = torch.from_numpy(flow).permute(2, 0, 1).float()
if valid is not None:
valid = torch.from_numpy(valid)
else:
valid = (flow[0].abs() < 1000) & (flow[1].abs() < 1000)
return img1, img2, flow, valid.float()
def __rmul__(self, v):
self.flow_list = v * self.flow_list
self.image_list = v * self.image_list
return self
def __len__(self):
return len(self.image_list)
class MpiSintel(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/Sintel', dstype='clean'):
super(MpiSintel, self).__init__(aug_params)
flow_root = osp.join(root, split, 'flow')
image_root = osp.join(root, split, dstype)
if split == 'test':
self.is_test = True
for scene in os.listdir(image_root):
image_list = sorted(glob(osp.join(image_root, scene, '*.png')))
for i in range(len(image_list)-1):
self.image_list += [ [image_list[i], image_list[i+1]] ]
self.extra_info += [ (scene, i) ] # scene and frame_id
if split != 'test':
self.flow_list += sorted(glob(osp.join(flow_root, scene, '*.flo')))
class FlyingChairs(FlowDataset):
def __init__(self, aug_params=None, split='train', root='datasets/FlyingChairs_release/data'):
super(FlyingChairs, self).__init__(aug_params)
images = sorted(glob(osp.join(root, '*.ppm')))
flows = sorted(glob(osp.join(root, '*.flo')))
assert (len(images)//2 == len(flows))
split_list = np.loadtxt('chairs_split.txt', dtype=np.int32)
for i in range(len(flows)):
xid = split_list[i]
if (split=='training' and xid==1) or (split=='validation' and xid==2):
self.flow_list += [ flows[i] ]
self.image_list += [ [images[2*i], images[2*i+1]] ]
class FlyingThings3D(FlowDataset):
def __init__(self, aug_params=None, root='datasets/FlyingThings3D', dstype='frames_cleanpass'):
super(FlyingThings3D, self).__init__(aug_params)
for cam in ['left']:
for direction in ['into_future', 'into_past']:
image_dirs = sorted(glob(osp.join(root, dstype, 'TRAIN/*/*')))
image_dirs = sorted([osp.join(f, cam) for f in image_dirs])
flow_dirs = sorted(glob(osp.join(root, 'optical_flow/TRAIN/*/*')))
flow_dirs = sorted([osp.join(f, direction, cam) for f in flow_dirs])
for idir, fdir in zip(image_dirs, flow_dirs):
images = sorted(glob(osp.join(idir, '*.png')) )
flows = sorted(glob(osp.join(fdir, '*.pfm')) )
for i in range(len(flows)-1):
if direction == 'into_future':
self.image_list += [ [images[i], images[i+1]] ]
self.flow_list += [ flows[i] ]
elif direction == 'into_past':
self.image_list += [ [images[i+1], images[i]] ]
self.flow_list += [ flows[i+1] ]
class KITTI(FlowDataset):
def __init__(self, aug_params=None, split='training', root='datasets/KITTI'):
super(KITTI, self).__init__(aug_params, sparse=True)
if split == 'testing':
self.is_test = True
root = osp.join(root, split)
images1 = sorted(glob(osp.join(root, 'image_2/*_10.png')))
images2 = sorted(glob(osp.join(root, 'image_2/*_11.png')))
for img1, img2 in zip(images1, images2):
frame_id = img1.split('/')[-1]
self.extra_info += [ [frame_id] ]
self.image_list += [ [img1, img2] ]
if split == 'training':
self.flow_list = sorted(glob(osp.join(root, 'flow_occ/*_10.png')))
class HD1K(FlowDataset):
def __init__(self, aug_params=None, root='datasets/HD1k'):
super(HD1K, self).__init__(aug_params, sparse=True)
seq_ix = 0
while 1:
flows = sorted(glob(os.path.join(root, 'hd1k_flow_gt', 'flow_occ/%06d_*.png' % seq_ix)))
images = sorted(glob(os.path.join(root, 'hd1k_input', 'image_2/%06d_*.png' % seq_ix)))
if len(flows) == 0:
break
for i in range(len(flows)-1):
self.flow_list += [flows[i]]
self.image_list += [ [images[i], images[i+1]] ]
seq_ix += 1
def fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'):
""" Create the data loader for the corresponding trainign set """
if args.stage == 'chairs':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True}
train_dataset = FlyingChairs(aug_params, split='training')
elif args.stage == 'things':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.4, 'max_scale': 0.8, 'do_flip': True}
clean_dataset = FlyingThings3D(aug_params, dstype='frames_cleanpass')
final_dataset = FlyingThings3D(aug_params, dstype='frames_finalpass')
train_dataset = clean_dataset + final_dataset
elif args.stage == 'sintel':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.6, 'do_flip': True}
things = FlyingThings3D(aug_params, dstype='frames_cleanpass')
sintel_clean = MpiSintel(aug_params, split='training', dstype='clean')
sintel_final = MpiSintel(aug_params, split='training', dstype='final')
if TRAIN_DS == 'C+T+K+S+H':
kitti = KITTI({'crop_size': args.image_size, 'min_scale': -0.3, 'max_scale': 0.5, 'do_flip': True})
hd1k = HD1K({'crop_size': args.image_size, 'min_scale': -0.5, 'max_scale': 0.2, 'do_flip': True})
train_dataset = 100*sintel_clean + 100*sintel_final + 200*kitti + 5*hd1k + things
elif TRAIN_DS == 'C+T+K/S':
train_dataset = 100*sintel_clean + 100*sintel_final + things
elif args.stage == 'kitti':
aug_params = {'crop_size': args.image_size, 'min_scale': -0.2, 'max_scale': 0.4, 'do_flip': False}
train_dataset = KITTI(aug_params, split='training')
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size,
pin_memory=False, shuffle=True, num_workers=4, drop_last=True)
print('Training with %d image pairs' % len(train_dataset))
return train_loader
| 9,245 | 38.177966 | 111 | py |
RAFT | RAFT-master/core/raft.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from update import BasicUpdateBlock, SmallUpdateBlock
from extractor import BasicEncoder, SmallEncoder
from corr import CorrBlock, AlternateCorrBlock
from utils.utils import bilinear_sampler, coords_grid, upflow8
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class RAFT(nn.Module):
def __init__(self, args):
super(RAFT, self).__init__()
self.args = args
if args.small:
self.hidden_dim = hdim = 96
self.context_dim = cdim = 64
args.corr_levels = 4
args.corr_radius = 3
else:
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
args.corr_levels = 4
args.corr_radius = 4
if 'dropout' not in self.args:
self.args.dropout = 0
if 'alternate_corr' not in self.args:
self.args.alternate_corr = False
# feature network, context network, and update block
if args.small:
self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
else:
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def initialize_flow(self, img):
""" Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
N, C, H, W = img.shape
coords0 = coords_grid(N, H//8, W//8, device=img.device)
coords1 = coords_grid(N, H//8, W//8, device=img.device)
# optical flow computed as difference: flow = coords1 - coords0
return coords0, coords1
def upsample_flow(self, flow, mask):
""" Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
N, _, H, W = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold(8 * flow, [3,3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum(mask * up_flow, dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, 8*H, 8*W)
def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):
""" Estimate optical flow between pair of frames """
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
image1 = image1.contiguous()
image2 = image2.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
# run the feature network
with autocast(enabled=self.args.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
if self.args.alternate_corr:
corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
else:
corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
# run the context network
with autocast(enabled=self.args.mixed_precision):
cnet = self.cnet(image1)
net, inp = torch.split(cnet, [hdim, cdim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
coords0, coords1 = self.initialize_flow(image1)
if flow_init is not None:
coords1 = coords1 + flow_init
flow_predictions = []
for itr in range(iters):
coords1 = coords1.detach()
corr = corr_fn(coords1) # index correlation volume
flow = coords1 - coords0
with autocast(enabled=self.args.mixed_precision):
net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
# F(t+1) = F(t) + \Delta(t)
coords1 = coords1 + delta_flow
# upsample predictions
if up_mask is None:
flow_up = upflow8(coords1 - coords0)
else:
flow_up = self.upsample_flow(coords1 - coords0, up_mask)
flow_predictions.append(flow_up)
if test_mode:
return coords1 - coords0, flow_up
return flow_predictions
| 4,924 | 32.965517 | 102 | py |
RAFT | RAFT-master/core/__init__.py | 0 | 0 | 0 | py |
|
RAFT | RAFT-master/core/utils/utils.py | import torch
import torch.nn.functional as F
import numpy as np
from scipy import interpolate
class InputPadder:
""" Pads images such that dimensions are divisible by 8 """
def __init__(self, dims, mode='sintel'):
self.ht, self.wd = dims[-2:]
pad_ht = (((self.ht // 8) + 1) * 8 - self.ht) % 8
pad_wd = (((self.wd // 8) + 1) * 8 - self.wd) % 8
if mode == 'sintel':
self._pad = [pad_wd//2, pad_wd - pad_wd//2, pad_ht//2, pad_ht - pad_ht//2]
else:
self._pad = [pad_wd//2, pad_wd - pad_wd//2, 0, pad_ht]
def pad(self, *inputs):
return [F.pad(x, self._pad, mode='replicate') for x in inputs]
def unpad(self,x):
ht, wd = x.shape[-2:]
c = [self._pad[2], ht-self._pad[3], self._pad[0], wd-self._pad[1]]
return x[..., c[0]:c[1], c[2]:c[3]]
def forward_interpolate(flow):
flow = flow.detach().cpu().numpy()
dx, dy = flow[0], flow[1]
ht, wd = dx.shape
x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht))
x1 = x0 + dx
y1 = y0 + dy
x1 = x1.reshape(-1)
y1 = y1.reshape(-1)
dx = dx.reshape(-1)
dy = dy.reshape(-1)
valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht)
x1 = x1[valid]
y1 = y1[valid]
dx = dx[valid]
dy = dy[valid]
flow_x = interpolate.griddata(
(x1, y1), dx, (x0, y0), method='nearest', fill_value=0)
flow_y = interpolate.griddata(
(x1, y1), dy, (x0, y0), method='nearest', fill_value=0)
flow = np.stack([flow_x, flow_y], axis=0)
return torch.from_numpy(flow).float()
def bilinear_sampler(img, coords, mode='bilinear', mask=False):
""" Wrapper for grid_sample, uses pixel coordinates """
H, W = img.shape[-2:]
xgrid, ygrid = coords.split([1,1], dim=-1)
xgrid = 2*xgrid/(W-1) - 1
ygrid = 2*ygrid/(H-1) - 1
grid = torch.cat([xgrid, ygrid], dim=-1)
img = F.grid_sample(img, grid, align_corners=True)
if mask:
mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)
return img, mask.float()
return img
def coords_grid(batch, ht, wd, device):
coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device))
coords = torch.stack(coords[::-1], dim=0).float()
return coords[None].repeat(batch, 1, 1, 1)
def upflow8(flow, mode='bilinear'):
new_size = (8 * flow.shape[2], 8 * flow.shape[3])
return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
| 2,489 | 29 | 93 | py |
RAFT | RAFT-master/core/utils/augmentor.py | import numpy as np
import random
import math
from PIL import Image
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import torch
from torchvision.transforms import ColorJitter
import torch.nn.functional as F
class FlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 0.8
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.5/3.14)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
""" Photometric augmentation """
# asymmetric
if np.random.rand() < self.asymmetric_color_aug_prob:
img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8)
img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8)
# symmetric
else:
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2, bounds=[50, 100]):
""" Occlusion augmentation """
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(bounds[0], bounds[1])
dy = np.random.randint(bounds[0], bounds[1])
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def spatial_transform(self, img1, img2, flow):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 8) / float(ht),
(self.crop_size[1] + 8) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = scale
scale_y = scale
if np.random.rand() < self.stretch_prob:
scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_x = np.clip(scale_x, min_scale, None)
scale_y = np.clip(scale_y, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = flow * [scale_x, scale_y]
if self.do_flip:
if np.random.rand() < self.h_flip_prob: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
if np.random.rand() < self.v_flip_prob: # v-flip
img1 = img1[::-1, :]
img2 = img2[::-1, :]
flow = flow[::-1, :] * [1.0, -1.0]
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0])
x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow
def __call__(self, img1, img2, flow):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow = self.spatial_transform(img1, img2, flow)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
return img1, img2, flow
class SparseFlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False):
# spatial augmentation params
self.crop_size = crop_size
self.min_scale = min_scale
self.max_scale = max_scale
self.spatial_aug_prob = 0.8
self.stretch_prob = 0.8
self.max_stretch = 0.2
# flip augmentation params
self.do_flip = do_flip
self.h_flip_prob = 0.5
self.v_flip_prob = 0.1
# photometric augmentation params
self.photo_aug = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14)
self.asymmetric_color_aug_prob = 0.2
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2):
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(50, 100)
dy = np.random.randint(50, 100)
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0):
ht, wd = flow.shape[:2]
coords = np.meshgrid(np.arange(wd), np.arange(ht))
coords = np.stack(coords, axis=-1)
coords = coords.reshape(-1, 2).astype(np.float32)
flow = flow.reshape(-1, 2).astype(np.float32)
valid = valid.reshape(-1).astype(np.float32)
coords0 = coords[valid>=1]
flow0 = flow[valid>=1]
ht1 = int(round(ht * fy))
wd1 = int(round(wd * fx))
coords1 = coords0 * [fx, fy]
flow1 = flow0 * [fx, fy]
xx = np.round(coords1[:,0]).astype(np.int32)
yy = np.round(coords1[:,1]).astype(np.int32)
v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1)
xx = xx[v]
yy = yy[v]
flow1 = flow1[v]
flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32)
valid_img = np.zeros([ht1, wd1], dtype=np.int32)
flow_img[yy, xx] = flow1
valid_img[yy, xx] = 1
return flow_img, valid_img
def spatial_transform(self, img1, img2, flow, valid):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 1) / float(ht),
(self.crop_size[1] + 1) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = np.clip(scale, min_scale, None)
scale_y = np.clip(scale, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y)
if self.do_flip:
if np.random.rand() < 0.5: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
valid = valid[:, ::-1]
margin_y = 20
margin_x = 50
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y)
x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x)
y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow, valid
def __call__(self, img1, img2, flow, valid):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
valid = np.ascontiguousarray(valid)
return img1, img2, flow, valid
| 9,108 | 35.878543 | 97 | py |
RAFT | RAFT-master/core/utils/__init__.py | 0 | 0 | 0 | py |
|
RAFT | RAFT-master/core/utils/flow_viz.py | # Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization
# MIT License
#
# Copyright (c) 2018 Tom Runia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to conditions.
#
# Author: Tom Runia
# Date Created: 2018-08-03
import numpy as np
def make_colorwheel():
"""
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
Code follows the original C++ source code of Daniel Scharstein.
Code follows the the Matlab source code of Deqing Sun.
Returns:
np.ndarray: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
col = col+RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
colorwheel[col:col+YG, 1] = 255
col = col+YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
col = col+GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
colorwheel[col:col+CB, 2] = 255
col = col+CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
col = col+BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
colorwheel[col:col+MR, 0] = 255
return colorwheel
def flow_uv_to_colors(u, v, convert_to_bgr=False):
"""
Applies the flow color wheel to (possibly clipped) flow components u and v.
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
Args:
u (np.ndarray): Input horizontal flow of shape [H,W]
v (np.ndarray): Input vertical flow of shape [H,W]
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u)/np.pi
fk = (a+1) / 2*(ncols-1)
k0 = np.floor(fk).astype(np.int32)
k1 = k0 + 1
k1[k1 == ncols] = 0
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:,i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1-f)*col0 + f*col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1-col[idx])
col[~idx] = col[~idx] * 0.75 # out of range
# Note the 2-i => BGR instead of RGB
ch_idx = 2-i if convert_to_bgr else i
flow_image[:,:,ch_idx] = np.floor(255 * col)
return flow_image
def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):
"""
Expects a two dimensional flow image of shape.
Args:
flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:,:,0]
v = flow_uv[:,:,1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = np.max(rad)
epsilon = 1e-5
u = u / (rad_max + epsilon)
v = v / (rad_max + epsilon)
return flow_uv_to_colors(u, v, convert_to_bgr) | 4,318 | 31.719697 | 90 | py |
RAFT | RAFT-master/core/utils/frame_utils.py | import numpy as np
from PIL import Image
from os.path import *
import re
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
TAG_CHAR = np.array([202021.25], np.float32)
def readFlow(fn):
""" Read .flo file in Middlebury format"""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
# print 'fn = %s'%(fn)
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
# print 'Reading %d x %d flo file\n' % (w, h)
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
return np.resize(data, (int(h), int(w), 2))
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header == b'PF':
color = True
elif header == b'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data
def writeFlow(filename,uv,v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
nBands = 2
if v is None:
assert(uv.ndim == 3)
assert(uv.shape[2] == 2)
u = uv[:,:,0]
v = uv[:,:,1]
else:
u = uv
assert(u.shape == v.shape)
height,width = u.shape
f = open(filename,'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width*nBands))
tmp[:,np.arange(width)*2] = u
tmp[:,np.arange(width)*2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def readFlowKITTI(filename):
flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR)
flow = flow[:,:,::-1].astype(np.float32)
flow, valid = flow[:, :, :2], flow[:, :, 2]
flow = (flow - 2**15) / 64.0
return flow, valid
def readDispKITTI(filename):
disp = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) / 256.0
valid = disp > 0.0
flow = np.stack([-disp, np.zeros_like(disp)], -1)
return flow, valid
def writeFlowKITTI(filename, uv):
uv = 64.0 * uv + 2**15
valid = np.ones([uv.shape[0], uv.shape[1], 1])
uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
cv2.imwrite(filename, uv[..., ::-1])
def read_gen(file_name, pil=False):
ext = splitext(file_name)[-1]
if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
return Image.open(file_name)
elif ext == '.bin' or ext == '.raw':
return np.load(file_name)
elif ext == '.flo':
return readFlow(file_name).astype(np.float32)
elif ext == '.pfm':
flow = readPFM(file_name).astype(np.float32)
if len(flow.shape) == 2:
return flow
else:
return flow[:, :, :-1]
return [] | 4,024 | 28.379562 | 109 | py |
RAFT | RAFT-master/alt_cuda_corr/setup.py | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='correlation',
ext_modules=[
CUDAExtension('alt_cuda_corr',
sources=['correlation.cpp', 'correlation_kernel.cu'],
extra_compile_args={'cxx': [], 'nvcc': ['-O3']}),
],
cmdclass={
'build_ext': BuildExtension
})
| 381 | 22.875 | 67 | py |
onnxruntime | onnxruntime-main/setup.py | # ------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# ------------------------------------------------------------------------
# pylint: disable=C0103
import datetime
import logging
import platform
import subprocess
import sys
from glob import glob, iglob
from os import environ, getcwd, path, popen, remove
from pathlib import Path
from shutil import copyfile
from packaging.tags import sys_tags
from setuptools import Extension, setup
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools.command.install import install as InstallCommandBase
nightly_build = False
package_name = "onnxruntime"
wheel_name_suffix = None
logger = logging.getLogger()
def parse_arg_remove_boolean(argv, arg_name):
arg_value = False
if arg_name in sys.argv:
arg_value = True
argv.remove(arg_name)
return arg_value
def parse_arg_remove_string(argv, arg_name_equal):
arg_value = None
for arg in sys.argv[1:]:
if arg.startswith(arg_name_equal):
arg_value = arg[len(arg_name_equal) :]
sys.argv.remove(arg)
break
return arg_value
# Any combination of the following arguments can be applied
if parse_arg_remove_boolean(sys.argv, "--nightly_build"):
package_name = "ort-nightly"
nightly_build = True
wheel_name_suffix = parse_arg_remove_string(sys.argv, "--wheel_name_suffix=")
cuda_version = None
rocm_version = None
is_rocm = False
is_openvino = False
# The following arguments are mutually exclusive
if wheel_name_suffix == "gpu":
# TODO: how to support multiple CUDA versions?
cuda_version = parse_arg_remove_string(sys.argv, "--cuda_version=")
elif parse_arg_remove_boolean(sys.argv, "--use_rocm"):
is_rocm = True
package_name = "onnxruntime-rocm" if not nightly_build else "ort-rocm-nightly"
rocm_version = parse_arg_remove_string(sys.argv, "--rocm_version=")
elif parse_arg_remove_boolean(sys.argv, "--use_openvino"):
is_openvino = True
package_name = "onnxruntime-openvino"
elif parse_arg_remove_boolean(sys.argv, "--use_dnnl"):
package_name = "onnxruntime-dnnl"
elif parse_arg_remove_boolean(sys.argv, "--use_tvm"):
package_name = "onnxruntime-tvm"
elif parse_arg_remove_boolean(sys.argv, "--use_vitisai"):
package_name = "onnxruntime-vitisai"
elif parse_arg_remove_boolean(sys.argv, "--use_acl"):
package_name = "onnxruntime-acl"
elif parse_arg_remove_boolean(sys.argv, "--use_armnn"):
package_name = "onnxruntime-armnn"
elif parse_arg_remove_boolean(sys.argv, "--use_cann"):
package_name = "onnxruntime-cann"
elif parse_arg_remove_boolean(sys.argv, "--use_azure"):
# keep the same name since AzureEP will release with CpuEP by default.
package_name = "onnxruntime"
elif parse_arg_remove_boolean(sys.argv, "--use_qnn"):
package_name = "onnxruntime-qnn"
# PEP 513 defined manylinux1_x86_64 and manylinux1_i686
# PEP 571 defined manylinux2010_x86_64 and manylinux2010_i686
# PEP 599 defines the following platform tags:
# manylinux2014_x86_64
# manylinux2014_i686
# manylinux2014_aarch64
# manylinux2014_armv7l
# manylinux2014_ppc64
# manylinux2014_ppc64le
# manylinux2014_s390x
manylinux_tags = [
"manylinux1_x86_64",
"manylinux1_i686",
"manylinux2010_x86_64",
"manylinux2010_i686",
"manylinux2014_x86_64",
"manylinux2014_i686",
"manylinux2014_aarch64",
"manylinux2014_armv7l",
"manylinux2014_ppc64",
"manylinux2014_ppc64le",
"manylinux2014_s390x",
"manylinux_2_27_x86_64",
"manylinux_2_27_aarch64",
]
is_manylinux = environ.get("AUDITWHEEL_PLAT", None) in manylinux_tags
class build_ext(_build_ext): # noqa: N801
def build_extension(self, ext):
dest_file = self.get_ext_fullpath(ext.name)
logger.info("copying %s -> %s", ext.sources[0], dest_file)
copyfile(ext.sources[0], dest_file)
try:
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
class bdist_wheel(_bdist_wheel): # noqa: N801
"""Helper functions to create wheel package"""
if is_openvino and is_manylinux:
def get_tag(self):
_, _, plat = _bdist_wheel.get_tag(self)
if platform.system() == "Linux":
# Get the right platform tag by querying the linker version
glibc_major, glibc_minor = popen("ldd --version | head -1").read().split()[-1].split(".")
"""# See https://github.com/mayeut/pep600_compliance/blob/master/
pep600_compliance/tools/manylinux-policy.json"""
if glibc_major == "2" and glibc_minor == "17":
plat = "manylinux_2_17_x86_64.manylinux2014_x86_64"
else: # For manylinux2014 and above, no alias is required
plat = f"manylinux_{glibc_major}_{glibc_minor}_x86_64"
tags = next(sys_tags())
return (tags.interpreter, tags.abi, plat)
def finalize_options(self):
_bdist_wheel.finalize_options(self)
if not is_manylinux:
self.root_is_pure = False
def _rewrite_ld_preload(self, to_preload):
with open("onnxruntime/capi/_ld_preload.py", "a") as f:
if len(to_preload) > 0:
f.write("from ctypes import CDLL, RTLD_GLOBAL\n")
for library in to_preload:
f.write('_{} = CDLL("{}", mode=RTLD_GLOBAL)\n'.format(library.split(".")[0], library))
def _rewrite_ld_preload_cuda(self, to_preload):
with open("onnxruntime/capi/_ld_preload.py", "a") as f:
if len(to_preload) > 0:
f.write("from ctypes import CDLL, RTLD_GLOBAL\n")
f.write("try:\n")
for library in to_preload:
f.write(' _{} = CDLL("{}", mode=RTLD_GLOBAL)\n'.format(library.split(".")[0], library))
f.write("except OSError:\n")
f.write(" import os\n")
f.write(' os.environ["ORT_CUDA_UNAVAILABLE"] = "1"\n')
def _rewrite_ld_preload_tensorrt(self, to_preload):
with open("onnxruntime/capi/_ld_preload.py", "a", encoding="ascii") as f:
if len(to_preload) > 0:
f.write("from ctypes import CDLL, RTLD_GLOBAL\n")
f.write("try:\n")
for library in to_preload:
f.write(' _{} = CDLL("{}", mode=RTLD_GLOBAL)\n'.format(library.split(".")[0], library))
f.write("except OSError:\n")
f.write(" import os\n")
f.write(' os.environ["ORT_TENSORRT_UNAVAILABLE"] = "1"\n')
def run(self):
if is_manylinux:
source = "onnxruntime/capi/onnxruntime_pybind11_state.so"
dest = "onnxruntime/capi/onnxruntime_pybind11_state_manylinux1.so"
logger.info("copying %s -> %s", source, dest)
copyfile(source, dest)
result = subprocess.run(
["patchelf", "--print-needed", dest], check=True, stdout=subprocess.PIPE, text=True
)
dependencies = [
"librccl.so",
"libamdhip64.so",
"librocblas.so",
"libMIOpen.so",
"libhsa-runtime64.so",
"libhsakmt.so",
]
to_preload = []
to_preload_cuda = []
to_preload_tensorrt = []
to_preload_cann = []
cuda_dependencies = []
args = ["patchelf", "--debug"]
for line in result.stdout.split("\n"):
for dependency in dependencies:
if dependency in line:
to_preload.append(line)
args.extend(["--remove-needed", line])
args.append(dest)
if len(args) > 3:
subprocess.run(args, check=True, stdout=subprocess.PIPE)
dest = "onnxruntime/capi/libonnxruntime_providers_" + ("rocm.so" if is_rocm else "cuda.so")
if path.isfile(dest):
result = subprocess.run(
["patchelf", "--print-needed", dest],
check=True,
stdout=subprocess.PIPE,
text=True,
)
cuda_dependencies = [
"libcublas.so",
"libcublasLt.so",
"libcudnn.so",
"libcudart.so",
"libcurand.so",
"libcufft.so",
"libnvToolsExt.so",
"libcupti.so",
]
rocm_dependencies = [
"librccl.so",
"libamdhip64.so",
"librocblas.so",
"libMIOpen.so",
"libhsa-runtime64.so",
"libhsakmt.so",
]
args = ["patchelf", "--debug"]
for line in result.stdout.split("\n"):
for dependency in cuda_dependencies + rocm_dependencies:
if dependency in line:
if dependency not in to_preload:
to_preload_cuda.append(line)
args.extend(["--remove-needed", line])
args.append(dest)
if len(args) > 3:
subprocess.run(args, check=True, stdout=subprocess.PIPE)
dest = "onnxruntime/capi/libonnxruntime_providers_" + ("migraphx.so" if is_rocm else "tensorrt.so")
if path.isfile(dest):
result = subprocess.run(
["patchelf", "--print-needed", dest],
check=True,
stdout=subprocess.PIPE,
text=True,
)
tensorrt_dependencies = ["libnvinfer.so", "libnvinfer_plugin.so", "libnvonnxparser.so"]
args = ["patchelf", "--debug"]
for line in result.stdout.split("\n"):
for dependency in cuda_dependencies + tensorrt_dependencies:
if dependency in line:
if dependency not in (to_preload + to_preload_cuda):
to_preload_tensorrt.append(line)
args.extend(["--remove-needed", line])
args.append(dest)
if len(args) > 3:
subprocess.run(args, check=True, stdout=subprocess.PIPE)
dest = "onnxruntime/capi/libonnxruntime_providers_cann.so"
if path.isfile(dest):
result = subprocess.run(
["patchelf", "--print-needed", dest],
check=True,
stdout=subprocess.PIPE,
text=True,
)
cann_dependencies = ["libascendcl.so", "libacl_op_compiler.so", "libfmk_onnx_parser.so"]
args = ["patchelf", "--debug"]
for line in result.stdout.split("\n"):
for dependency in cann_dependencies:
if dependency in line:
if dependency not in to_preload:
to_preload_cann.append(line)
args.extend(["--remove-needed", line])
args.append(dest)
if len(args) > 3:
subprocess.run(args, check=True, stdout=subprocess.PIPE)
dest = "onnxruntime/capi/libonnxruntime_providers_openvino.so"
if path.isfile(dest):
subprocess.run(
["patchelf", "--set-rpath", "$ORIGIN", dest, "--force-rpath"],
check=True,
stdout=subprocess.PIPE,
text=True,
)
self._rewrite_ld_preload(to_preload)
self._rewrite_ld_preload_cuda(to_preload_cuda)
self._rewrite_ld_preload_tensorrt(to_preload_tensorrt)
self._rewrite_ld_preload(to_preload_cann)
else:
pass
_bdist_wheel.run(self)
if is_manylinux and not disable_auditwheel_repair and not is_openvino:
assert self.dist_dir is not None
file = glob(path.join(self.dist_dir, "*linux*.whl"))[0]
logger.info("repairing %s for manylinux1", file)
try:
subprocess.run(
["auditwheel", "repair", "-w", self.dist_dir, file], check=True, stdout=subprocess.PIPE
)
finally:
logger.info("removing %s", file)
remove(file)
except ImportError as error:
print("Error importing dependencies:")
print(error)
bdist_wheel = None
class InstallCommand(InstallCommandBase):
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_lib = self.install_platlib
return ret
providers_cuda_or_rocm = "libonnxruntime_providers_" + ("rocm.so" if is_rocm else "cuda.so")
providers_tensorrt_or_migraphx = "libonnxruntime_providers_" + ("migraphx.so" if is_rocm else "tensorrt.so")
providers_openvino = "libonnxruntime_providers_openvino.so"
providers_cann = "libonnxruntime_providers_cann.so"
# Additional binaries
dl_libs = []
libs = []
if platform.system() == "Linux":
libs = [
"onnxruntime_pybind11_state.so",
"libdnnl.so.2",
"libmklml_intel.so",
"libmklml_gnu.so",
"libiomp5.so",
"mimalloc.so",
]
dl_libs = ["libonnxruntime_providers_shared.so"]
dl_libs.append(providers_cuda_or_rocm)
dl_libs.append(providers_tensorrt_or_migraphx)
dl_libs.append(providers_cann)
# DNNL, TensorRT & OpenVINO EPs are built as shared libs
libs.extend(["libonnxruntime_providers_shared.so"])
libs.extend(["libonnxruntime_providers_dnnl.so"])
libs.extend(["libonnxruntime_providers_openvino.so"])
libs.append(providers_cuda_or_rocm)
libs.append(providers_tensorrt_or_migraphx)
libs.append(providers_cann)
if nightly_build:
libs.extend(["libonnxruntime_pywrapper.so"])
elif platform.system() == "Darwin":
libs = ["onnxruntime_pybind11_state.so", "libdnnl.2.dylib", "mimalloc.so"] # TODO add libmklml and libiomp5 later.
# DNNL & TensorRT EPs are built as shared libs
libs.extend(["libonnxruntime_providers_shared.dylib"])
libs.extend(["libonnxruntime_providers_dnnl.dylib"])
libs.extend(["libonnxruntime_providers_tensorrt.dylib"])
libs.extend(["libonnxruntime_providers_cuda.dylib"])
if nightly_build:
libs.extend(["libonnxruntime_pywrapper.dylib"])
else:
libs = ["onnxruntime_pybind11_state.pyd", "dnnl.dll", "mklml.dll", "libiomp5md.dll"]
# DNNL, TensorRT & OpenVINO EPs are built as shared libs
libs.extend(["onnxruntime_providers_shared.dll"])
libs.extend(["onnxruntime_providers_dnnl.dll"])
libs.extend(["onnxruntime_providers_tensorrt.dll"])
libs.extend(["onnxruntime_providers_openvino.dll"])
libs.extend(["onnxruntime_providers_cuda.dll"])
# DirectML Libs
libs.extend(["DirectML.dll"])
if nightly_build:
libs.extend(["onnxruntime_pywrapper.dll"])
if is_manylinux:
if is_openvino:
ov_libs = [
"libopenvino_intel_cpu_plugin.so",
"libopenvino_intel_gpu_plugin.so",
"libopenvino_auto_plugin.so",
"libopenvino_hetero_plugin.so",
"libtbb.so.2",
"libtbbmalloc.so.2",
"libopenvino.so",
"libopenvino_c.so",
"libopenvino_onnx_frontend.so",
]
for x in ov_libs:
y = "onnxruntime/capi/" + x
subprocess.run(
["patchelf", "--set-rpath", "$ORIGIN", y, "--force-rpath"],
check=True,
stdout=subprocess.PIPE,
text=True,
)
dl_libs.append(x)
dl_libs.append(providers_openvino)
dl_libs.append("plugins.xml")
dl_libs.append("usb-ma2x8x.mvcmd")
data = ["capi/libonnxruntime_pywrapper.so"] if nightly_build else []
data += [path.join("capi", x) for x in dl_libs if path.isfile(path.join("onnxruntime", "capi", x))]
ext_modules = [
Extension(
"onnxruntime.capi.onnxruntime_pybind11_state",
["onnxruntime/capi/onnxruntime_pybind11_state_manylinux1.so"],
),
]
else:
data = [path.join("capi", x) for x in libs if path.isfile(path.join("onnxruntime", "capi", x))]
ext_modules = []
# Additional examples
examples_names = ["mul_1.onnx", "logreg_iris.onnx", "sigmoid.onnx"]
examples = [path.join("datasets", x) for x in examples_names]
# Extra files such as EULA and ThirdPartyNotices
extra = ["LICENSE", "ThirdPartyNotices.txt", "Privacy.md"]
# Description
readme_file = "docs/python/ReadMeOV.rst" if is_openvino else "docs/python/README.rst"
README = path.join(getcwd(), readme_file)
if not path.exists(README):
this = path.dirname(__file__)
README = path.join(this, readme_file)
if not path.exists(README):
raise FileNotFoundError("Unable to find 'README.rst'")
with open(README, encoding="utf-8") as fdesc:
long_description = fdesc.read()
# Include files in onnxruntime/external if --enable_external_custom_op_schemas build.sh command
# line option is specified.
# If the options is not specified this following condition fails as onnxruntime/external folder is not created in the
# build flow under the build binary directory.
if path.isdir(path.join("onnxruntime", "external")):
# Gather all files under onnxruntime/external directory.
extra.extend(
list(
str(Path(*Path(x).parts[1:]))
for x in list(iglob(path.join(path.join("onnxruntime", "external"), "**/*.*"), recursive=True))
)
)
packages = [
"onnxruntime",
"onnxruntime.backend",
"onnxruntime.capi",
"onnxruntime.capi.training",
"onnxruntime.datasets",
"onnxruntime.tools",
"onnxruntime.tools.mobile_helpers",
"onnxruntime.tools.ort_format_model",
"onnxruntime.tools.ort_format_model.ort_flatbuffers_py",
"onnxruntime.tools.ort_format_model.ort_flatbuffers_py.fbs",
"onnxruntime.tools.qdq_helpers",
"onnxruntime.quantization",
"onnxruntime.quantization.operators",
"onnxruntime.quantization.CalTableFlatBuffers",
"onnxruntime.transformers",
"onnxruntime.transformers.models.bart",
"onnxruntime.transformers.models.bert",
"onnxruntime.transformers.models.gpt2",
"onnxruntime.transformers.models.longformer",
"onnxruntime.transformers.models.t5",
"onnxruntime.transformers.models.stable_diffusion",
"onnxruntime.transformers.models.whisper",
]
package_data = {"onnxruntime.tools.mobile_helpers": ["*.md", "*.config"]}
data_files = []
requirements_file = "requirements.txt"
local_version = None
enable_training = parse_arg_remove_boolean(sys.argv, "--enable_training")
enable_training_apis = parse_arg_remove_boolean(sys.argv, "--enable_training_apis")
enable_rocm_profiling = parse_arg_remove_boolean(sys.argv, "--enable_rocm_profiling")
disable_auditwheel_repair = parse_arg_remove_boolean(sys.argv, "--disable_auditwheel_repair")
default_training_package_device = parse_arg_remove_boolean(sys.argv, "--default_training_package_device")
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
if not enable_training:
classifiers.extend(["Operating System :: Microsoft :: Windows", "Operating System :: MacOS"])
if enable_training or enable_training_apis:
packages.append("onnxruntime.training")
if enable_training:
packages.extend(
[
"onnxruntime.training.amp",
"onnxruntime.training.experimental",
"onnxruntime.training.experimental.gradient_graph",
"onnxruntime.training.optim",
"onnxruntime.training.torchdynamo",
"onnxruntime.training.ortmodule",
"onnxruntime.training.ortmodule.experimental",
"onnxruntime.training.ortmodule.experimental.json_config",
"onnxruntime.training.ortmodule.experimental.hierarchical_ortmodule",
"onnxruntime.training.ortmodule.torch_cpp_extensions",
"onnxruntime.training.ortmodule.torch_cpp_extensions.cpu.aten_op_executor",
"onnxruntime.training.ortmodule.torch_cpp_extensions.cpu.torch_interop_utils",
"onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.torch_gpu_allocator",
"onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.fused_ops",
"onnxruntime.training.ort_triton",
"onnxruntime.training.ort_triton.kernel",
"onnxruntime.training.utils.data",
"onnxruntime.training.utils.hooks",
]
)
package_data["onnxruntime.training.ortmodule.torch_cpp_extensions.cpu.aten_op_executor"] = ["*.cc"]
package_data["onnxruntime.training.ortmodule.torch_cpp_extensions.cpu.torch_interop_utils"] = ["*.cc"]
package_data["onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.torch_gpu_allocator"] = ["*.cc"]
package_data["onnxruntime.training.ortmodule.torch_cpp_extensions.cuda.fused_ops"] = [
"*.cpp",
"*.cu",
"*.cuh",
"*.h",
]
packages.extend(
[
"onnxruntime.training.api",
"onnxruntime.training.onnxblock",
"onnxruntime.training.onnxblock.loss",
"onnxruntime.training.onnxblock.optim",
]
)
requirements_file = "requirements-training.txt"
# with training, we want to follow this naming convention:
# stable:
# onnxruntime-training-1.7.0+cu111-cp36-cp36m-linux_x86_64.whl
# nightly:
# onnxruntime-training-1.7.0.dev20210408+cu111-cp36-cp36m-linux_x86_64.whl
# this is needed immediately by pytorch/ort so that the user is able to
# install an onnxruntime training package with matching torch cuda version.
if not is_openvino:
# To support the package consisting of both openvino and training modules part of it
package_name = "onnxruntime-training"
disable_local_version = environ.get("ORT_DISABLE_PYTHON_PACKAGE_LOCAL_VERSION", "0")
disable_local_version = (
disable_local_version == "1"
or disable_local_version.lower() == "true"
or disable_local_version.lower() == "yes"
)
# local version should be disabled for internal feeds.
if not disable_local_version:
# we want put default training packages to pypi. pypi does not accept package with a local version.
if not default_training_package_device or nightly_build:
if cuda_version:
# removing '.' to make Cuda version number in the same form as Pytorch.
local_version = "+cu" + cuda_version.replace(".", "")
elif rocm_version:
# removing '.' to make Rocm version number in the same form as Pytorch.
local_version = "+rocm" + rocm_version.replace(".", "")
else:
# cpu version for documentation
local_version = "+cpu"
if package_name == "onnxruntime-tvm":
packages += ["onnxruntime.providers.tvm"]
package_data["onnxruntime"] = data + examples + extra
version_number = ""
with open("VERSION_NUMBER") as f:
version_number = f.readline().strip()
if nightly_build:
# https://docs.microsoft.com/en-us/azure/devops/pipelines/build/variables
build_suffix = environ.get("BUILD_BUILDNUMBER")
if build_suffix is None:
# The following line is only for local testing
build_suffix = str(datetime.datetime.now().date().strftime("%Y%m%d"))
else:
build_suffix = build_suffix.replace(".", "")
if len(build_suffix) > 8 and len(build_suffix) < 12:
# we want to format the build_suffix to avoid (the 12th run on 20210630 vs the first run on 20210701):
# 2021063012 > 202107011
# in above 2021063012 is treated as the latest which is incorrect.
# we want to convert the format to:
# 20210630012 < 20210701001
# where the first 8 digits are date. the last 3 digits are run count.
# as long as there are less than 1000 runs per day, we will not have the problem.
# to test this code locally, run:
# NIGHTLY_BUILD=1 BUILD_BUILDNUMBER=202107011 python tools/ci_build/build.py --config RelWithDebInfo \
# --enable_training --use_cuda --cuda_home /usr/local/cuda --cudnn_home /usr/lib/x86_64-linux-gnu/ \
# --nccl_home /usr/lib/x86_64-linux-gnu/ --build_dir build/Linux --build --build_wheel --skip_tests \
# --cuda_version 11.1
def check_date_format(date_str):
try:
datetime.datetime.strptime(date_str, "%Y%m%d")
return True
except Exception:
return False
def reformat_run_count(count_str):
try:
count = int(count_str)
if count >= 0 and count < 1000:
return f"{count:03}"
elif count >= 1000:
raise RuntimeError(f"Too many builds for the same day: {count}")
return ""
except Exception:
return ""
build_suffix_is_date_format = check_date_format(build_suffix[:8])
build_suffix_run_count = reformat_run_count(build_suffix[8:])
if build_suffix_is_date_format and build_suffix_run_count:
build_suffix = build_suffix[:8] + build_suffix_run_count
elif len(build_suffix) >= 12:
raise RuntimeError(f'Incorrect build suffix: "{build_suffix}"')
if enable_training:
from packaging import version
from packaging.version import Version
# with training package, we need to bump up version minor number so that
# nightly releases take precedence over the latest release when --pre is used during pip install.
# eventually this shall be the behavior of all onnxruntime releases.
# alternatively we may bump up version number right after every release.
ort_version = version.parse(version_number)
if isinstance(ort_version, Version):
# TODO: this is the last time we have to do this!!!
# We shall bump up release number right after release cut.
if ort_version.major == 1 and ort_version.minor == 8 and ort_version.micro == 0:
version_number = "{major}.{minor}.{macro}".format(
major=ort_version.major, minor=ort_version.minor + 1, macro=ort_version.micro
)
version_number = version_number + ".dev" + build_suffix
if local_version:
version_number = version_number + local_version
if is_rocm and enable_rocm_profiling:
version_number = version_number + ".profiling"
if wheel_name_suffix:
if not (enable_training and wheel_name_suffix == "gpu"):
# for training packages, local version is used to indicate device types
package_name = f"{package_name}-{wheel_name_suffix}"
cmd_classes = {}
if bdist_wheel is not None:
cmd_classes["bdist_wheel"] = bdist_wheel
cmd_classes["install"] = InstallCommand
cmd_classes["build_ext"] = build_ext
requirements_path = path.join(getcwd(), requirements_file)
if not path.exists(requirements_path):
this = path.dirname(__file__)
requirements_path = path.join(this, requirements_file)
if not path.exists(requirements_path):
raise FileNotFoundError("Unable to find " + requirements_file)
with open(requirements_path) as f:
install_requires = f.read().splitlines()
if enable_training:
def save_build_and_package_info(package_name, version_number, cuda_version, rocm_version):
sys.path.append(path.join(path.dirname(__file__), "onnxruntime", "python"))
from onnxruntime_collect_build_info import find_cudart_versions
version_path = path.join("onnxruntime", "capi", "build_and_package_info.py")
with open(version_path, "w") as f:
f.write(f"package_name = '{package_name}'\n")
f.write(f"__version__ = '{version_number}'\n")
if cuda_version:
f.write(f"cuda_version = '{cuda_version}'\n")
# cudart_versions are integers
cudart_versions = find_cudart_versions(build_env=True)
if cudart_versions and len(cudart_versions) == 1:
f.write(f"cudart_version = {cudart_versions[0]}\n")
else:
print(
"Error getting cudart version. ",
"did not find any cudart library"
if not cudart_versions or len(cudart_versions) == 0
else "found multiple cudart libraries",
)
elif rocm_version:
f.write(f"rocm_version = '{rocm_version}'\n")
save_build_and_package_info(package_name, version_number, cuda_version, rocm_version)
# Setup
setup(
name=package_name,
version=version_number,
description="ONNX Runtime is a runtime accelerator for Machine Learning models",
long_description=long_description,
author="Microsoft Corporation",
author_email="[email protected]",
cmdclass=cmd_classes,
license="MIT License",
packages=packages,
ext_modules=ext_modules,
package_data=package_data,
url="https://onnxruntime.ai",
download_url="https://github.com/microsoft/onnxruntime/tags",
data_files=data_files,
install_requires=install_requires,
keywords="onnx machine learning",
entry_points={
"console_scripts": [
"onnxruntime_test = onnxruntime.tools.onnxruntime_test:main",
]
},
classifiers=classifiers,
)
| 31,487 | 41.265772 | 119 | py |
onnxruntime | onnxruntime-main/objectivec/test/testdata/single_add_gen.py | import onnx
from onnx import TensorProto, helper
graph = helper.make_graph(
[ # nodes
helper.make_node("Add", ["A", "B"], ["C"], "Add"),
],
"SingleAdd", # name
[ # inputs
helper.make_tensor_value_info("A", TensorProto.FLOAT, [1]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [1]),
],
[ # outputs
helper.make_tensor_value_info("C", TensorProto.FLOAT, [1]),
],
)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 12)])
onnx.save(model, r"single_add.onnx")
| 552 | 26.65 | 77 | py |
onnxruntime | onnxruntime-main/tools/ci_build/patch_manylinux.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import os
import shutil
import sys
from pathlib import Path
from logger import get_logger
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", ".."))
sys.path.append(os.path.join(REPO_DIR, "tools", "python"))
from util import run # noqa: E402
log = get_logger("patch_manylinux")
def parse_args():
parser = argparse.ArgumentParser(
description="Build a docker image and push it to a remote Azure Container Registry."
"The content in the remote registry can be used as a cache when we need to build the thing again."
"The user must be logged in to the container registry."
)
parser.add_argument("--dockerfile", default="Dockerfile", help="Path to the Dockerfile.")
parser.add_argument("--context", default=".", help="Path to the build context.")
parser.add_argument("--manylinux-src", default="manylinux", help="Path to manylinux src folder")
return parser.parse_args()
def main():
args = parse_args()
log.debug(f"Dockerfile: {args.dockerfile}, context: {args.context}")
if "manylinux" in args.dockerfile:
manylinux_build_scripts_folder = Path(args.manylinux_src) / "docker" / "build_scripts"
dest = Path(args.context) / "build_scripts"
if dest.exists():
log.info(f"Deleting: {str(dest)}")
shutil.rmtree(str(dest))
shutil.copytree(str(manylinux_build_scripts_folder), str(dest))
src_entrypoint_file = str(Path(args.manylinux_src) / "docker" / "manylinux-entrypoint")
dst_entrypoint_file = str(Path(args.context) / "manylinux-entrypoint")
shutil.copyfile(src_entrypoint_file, dst_entrypoint_file)
shutil.copymode(src_entrypoint_file, dst_entrypoint_file)
run(
"patch",
"-p1",
"-i",
str((Path(SCRIPT_DIR) / "github" / "linux" / "docker" / "manylinux.patch").resolve()),
cwd=str(dest),
)
return 0
if __name__ == "__main__":
sys.exit(main())
| 2,181 | 32.569231 | 106 | py |
onnxruntime | onnxruntime-main/tools/ci_build/op_registration_utils.py | # !/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Utilities to help process files containing kernel registrations.
"""
import os
import pathlib
import sys
import typing
from logger import get_logger
log = get_logger("op_registration_utils")
def map_ort_constant_to_domain(ort_constant_name: str, allow_unknown_constant: bool = True):
"""
Map the name of the internal ONNX Runtime constant used in operator kernel registrations to the domain name
used in ONNX models and configuration files.
:param ort_constant_name: ONNX Runtime constant name for the domain from a kernel registration entry.
:param allow_unknown_constant: Whether an unknown constant is allowed or treated as an error.
:return: String with public domain name.
"""
# constants are defined in <ORT root>/include/onnxruntime/core/graph/constants.h
constant_to_domain_map = {
"kOnnxDomain": "ai.onnx",
"kMLDomain": "ai.onnx.ml",
"kMSDomain": "com.microsoft",
"kPytorchAtenDomain": "org.pytorch.aten",
"kMSExperimentalDomain": "com.microsoft.experimental",
"kMSNchwcDomain": "com.microsoft.nchwc",
"kMSInternalNHWCDomain": "com.ms.internal.nhwc",
"kMSDmlDomain": "com.microsoft.dml",
"kNGraphDomain": "com.intel.ai",
"kVitisAIDomain": "com.xilinx",
}
if ort_constant_name in constant_to_domain_map:
return constant_to_domain_map[ort_constant_name]
unknown_constant_message = f"Unknown domain for ONNX Runtime constant of {ort_constant_name}."
if not allow_unknown_constant:
raise ValueError(unknown_constant_message)
log.warning(unknown_constant_message)
return None
def get_kernel_registration_files(ort_root=None, include_cuda=False):
"""
Return paths to files containing kernel registrations for CPU and CUDA providers.
:param ort_root: ORT repository root directory. Inferred from the location of this script if not provided.
:param include_cuda: Include the CUDA registrations in the list of files.
:return: list[str] containing the kernel registration filenames.
"""
if not ort_root:
ort_root = os.path.dirname(os.path.abspath(__file__)) + "/../.."
provider_path = ort_root + "/onnxruntime/core/providers/{ep}/{ep}_execution_provider.cc"
contrib_provider_path = ort_root + "/onnxruntime/contrib_ops/{ep}/{ep}_contrib_kernels.cc"
training_provider_path = ort_root + "/orttraining/orttraining/training_ops/{ep}/{ep}_training_kernels.cc"
provider_paths = [
provider_path.format(ep="cpu"),
contrib_provider_path.format(ep="cpu"),
training_provider_path.format(ep="cpu"),
]
if include_cuda:
provider_paths.append(provider_path.format(ep="cuda"))
provider_paths.append(contrib_provider_path.format(ep="cuda"))
provider_paths.append(training_provider_path.format(ep="cuda"))
provider_paths = [os.path.abspath(p) for p in provider_paths]
return provider_paths
class RegistrationProcessor:
"""
Class to process lines that are extracted from a kernel registration file.
For each kernel registration, process_registration is called.
For all other lines, process_other_line is called.
"""
def process_registration(
self,
lines: typing.List[str],
domain: str,
operator: str,
start_version: int,
end_version: typing.Optional[int] = None,
type: typing.Optional[str] = None,
):
"""
Process lines that contain a kernel registration.
:param lines: Array containing the original lines containing the kernel registration.
:param domain: Domain for the operator
:param operator: Operator type
:param start_version: Start version
:param end_version: End version or None if unversioned registration
:param type: Type or types used in registration, if this is a typed registration
"""
pass
def process_other_line(self, line):
"""
Process a line that does not contain a kernel registration
:param line: Original line
"""
pass
def ok(self):
"""
Get overall status for processing
:return: True if successful. False if not. Error will be logged as the registrations are processed.
"""
return False # return False as the derived class must override to report the real status
def _process_lines(lines: typing.List[str], offset: int, registration_processor: RegistrationProcessor):
"""
Process one or more lines that contain a kernel registration.
Merge lines if split over multiple, and call registration_processor.process_registration with the original lines
and the registration information.
:return: Offset for first line that was not consumed.
"""
onnx_op = "ONNX_OPERATOR_KERNEL_CLASS_NAME"
onnx_op_len = len(onnx_op)
onnx_typed_op = "ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME"
onnx_typed_op_len = len(onnx_typed_op)
onnx_versioned_op = "ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME"
onnx_versioned_op_len = len(onnx_versioned_op)
onnx_versioned_typed_op = "ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME"
onnx_versioned_typed_op_len = len(onnx_versioned_typed_op)
onnx_two_typed_op = "ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME"
onnx_two_typed_op_len = len(onnx_two_typed_op)
onnx_versioned_two_typed_op = "ONNX_OPERATOR_VERSIONED_TWO_TYPED_KERNEL_CLASS_NAME"
onnx_versioned_two_typed_op_len = len(onnx_versioned_two_typed_op)
end_marks = tuple([");", ")>", ")>,", ")>,};", ")>};"])
end_mark = ""
lines_to_process = []
# merge line if split over multiple.
# original lines will be in lines_to_process. merged and stripped line will be in code_line
while True:
lines_to_process.append(lines[offset])
stripped = lines[offset].strip()
line_end = False
for mark in end_marks:
if stripped.endswith(mark):
end_mark = mark
line_end = True
break
if line_end:
break
offset += 1
if offset > len(lines):
log.error("Past end of input lines looking for line terminator.")
sys.exit(-1)
code_line = "".join([line.strip() for line in lines_to_process])
if onnx_op in code_line:
# e.g. BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(
# kCpuExecutionProvider, kOnnxDomain, 7, Cos)>,
trim_at = code_line.index(onnx_op) + onnx_op_len + 1
*_, domain, start_version, op_type = (arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(","))
registration_processor.process_registration(lines_to_process, domain, op_type, int(start_version), None, None)
elif onnx_typed_op in code_line:
# e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(
# kCpuExecutionProvider, kOnnxDomain, 7, double, Sin)>,
trim_at = code_line.index(onnx_typed_op) + onnx_typed_op_len + 1
*_, domain, start_version, type, op_type = (
arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(",")
)
registration_processor.process_registration(lines_to_process, domain, op_type, int(start_version), None, type)
elif onnx_versioned_op in code_line:
# e.g. BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(
# kCpuExecutionProvider, kOnnxDomain, 1, 10, Hardmax)>,
trim_at = code_line.index(onnx_versioned_op) + onnx_versioned_op_len + 1
*_, domain, start_version, end_version, op_type = (
arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(",")
)
registration_processor.process_registration(
lines_to_process, domain, op_type, int(start_version), int(end_version), None
)
elif onnx_versioned_typed_op in code_line:
# e.g. BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(
# kCpuExecutionProvider, kOnnxDomain, 1, 10, float, LogSoftmax)>,
trim_at = code_line.index(onnx_versioned_typed_op) + onnx_versioned_typed_op_len + 1
*_, domain, start_version, end_version, type, op_type = (
arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(",")
)
registration_processor.process_registration(
lines_to_process, domain, op_type, int(start_version), int(end_version), type
)
elif onnx_two_typed_op in code_line:
# e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME(
# kCpuExecutionProvider, kOnnxDomain, 19, float, uint8, QuantizeLinear)>,
trim_at = code_line.index(onnx_two_typed_op) + onnx_two_typed_op_len + 1
*_, domain, start_version, type1, type2, op_type = (
arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(",")
)
registration_processor.process_registration(
lines_to_process, domain, op_type, int(start_version), None, type1 + ", " + type2
)
elif onnx_versioned_two_typed_op in code_line:
# e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME(
# kCpuExecutionProvider, kOnnxDomain, 19, float, uint8, QuantizeLinear)>,
trim_at = code_line.index(onnx_versioned_two_typed_op) + onnx_versioned_two_typed_op_len + 1
*_, domain, start_version, end_version, type1, type2, op_type = (
arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(",")
)
registration_processor.process_registration(
lines_to_process, domain, op_type, int(start_version), int(end_version), type1 + ", " + type2
)
else:
log.warning(f"Ignoring unhandled kernel registration variant: {code_line}")
for line in lines_to_process:
registration_processor.process_other_line(line)
return offset + 1
def process_kernel_registration_file(
filename: typing.Union[str, pathlib.Path], registration_processor: RegistrationProcessor
):
"""
Process a kernel registration file using registration_processor.
:param filename: Path to file containing kernel registrations.
:param registration_processor: Processor to be used.
:return True if processing was successful.
"""
if not os.path.isfile(filename):
log.error(f"File not found: {filename}")
return False
lines = []
with open(filename) as file_to_read:
lines = file_to_read.readlines()
offset = 0
while offset < len(lines):
line = lines[offset]
stripped = line.strip()
if stripped.startswith("BuildKernelCreateInfo<ONNX"):
offset = _process_lines(lines, offset, registration_processor)
else:
registration_processor.process_other_line(line)
offset += 1
return True
| 11,081 | 39.892989 | 118 | py |
onnxruntime | onnxruntime-main/tools/ci_build/amd_hipify.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import os
import subprocess
def hipify(hipify_perl_path, src_file_path, dst_file_path):
dir_name = os.path.dirname(dst_file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name, exist_ok=True)
# Run hipify-perl first, capture output
s = subprocess.run([hipify_perl_path, "-roc", src_file_path], stdout=subprocess.PIPE, text=True, check=False).stdout
# Additional exact-match replacements.
# Order matters for all of the following replacements, reglardless of appearing in logical sections.
s = s.replace("kCudaExecutionProvider", "kRocmExecutionProvider")
s = s.replace("CUDAStreamType", "HIPStreamType")
s = s.replace("kCudaStreamDefault", "kHipStreamDefault")
s = s.replace("kCudaStreamCopyIn", "kHipStreamCopyIn")
s = s.replace("kCudaStreamCopyOut", "kHipStreamCopyOut")
s = s.replace("kTotalCudaStreams", "kTotalHipStreams")
# We want rocblas interfaces, not hipblas. Also force some hipify replacements back to rocblas from hipblas.
s = s.replace("CublasHandle", "RocblasHandle")
s = s.replace("cublas_handle", "rocblas_handle")
s = s.replace("hipblasHandle_t", "rocblas_handle")
s = s.replace("hipblasDatatype_t", "rocblas_datatype")
s = s.replace("HIPBLAS_STATUS_SUCCESS", "rocblas_status_success")
s = s.replace("hipblasStatus_t", "rocblas_status")
s = s.replace("hipblasCreate", "rocblas_create_handle")
s = s.replace("hipblasDestroy", "rocblas_destroy_handle")
s = s.replace("hipblasSetStream", "rocblas_set_stream")
s = s.replace("HIPBLAS_OP_T", "rocblas_operation_transpose")
s = s.replace("HIPBLAS_OP_N", "rocblas_operation_none")
s = s.replace("RegisterCudaContribKernels", "RegisterRocmContribKernels")
s = s.replace("cudaEvent", "hipEvent")
s = s.replace("CreateCudaAllocator", "CreateRocmAllocator")
s = s.replace("CudaErrString", "RocmErrString")
s = s.replace("CudaAsyncBuffer", "RocmAsyncBuffer")
s = s.replace("CudaKernel", "RocmKernel")
s = s.replace("CudaStream", "RocmStream")
s = s.replace("ToCudaType", "ToHipType")
s = s.replace("CudaT", "HipT")
s = s.replace("CUDA_LONG", "HIP_LONG")
s = s.replace("CUDA_RETURN_IF_ERROR", "HIP_RETURN_IF_ERROR")
s = s.replace("CUDA_KERNEL_ASSERT", "HIP_KERNEL_ASSERT")
s = s.replace("CUDA_CALL", "HIP_CALL")
s = s.replace("SliceCuda", "SliceRocm")
s = s.replace("thrust::cuda", "thrust::hip")
s = s.replace("CudaCall", "RocmCall")
s = s.replace("cuda", "rocm")
# s = s.replace('Cuda', 'Rocm')
s = s.replace("CUDA", "ROCM")
s = s.replace("GPU_WARP_SIZE = 32", "GPU_WARP_SIZE = 64")
s = s.replace("std::exp", "expf")
s = s.replace("std::log", "logf")
s = s.replace("WaitCudaNotificationOnDevice", "WaitRocmNotificationOnDevice")
s = s.replace("hipHostAlloc", "hipHostMalloc")
s = s.replace(
"#include <cub/device/device_radix_sort.cuh>",
"#include <hipcub/hipcub.hpp>\n#include <hipcub/backend/rocprim/device/device_radix_sort.hpp>",
)
s = s.replace(
'#include "cub/device/device_radix_sort.cuh"',
"#include <hipcub/hipcub.hpp>\n#include <hipcub/backend/rocprim/device/device_radix_sort.hpp>",
)
s = s.replace(
"#include <cub/device/device_segmented_radix_sort.cuh>",
"#include <hipcub/backend/rocprim/device/device_segmented_radix_sort.hpp>",
)
s = s.replace(
"#include <cub/device/device_reduce.cuh>", "#include <hipcub/backend/rocprim/device/device_reduce.hpp>"
)
s = s.replace(
"#include <cub/device/device_run_length_encode.cuh>",
"#include <hipcub/backend/rocprim/device/device_run_length_encode.hpp>",
)
s = s.replace("#include <cub/device/device_scan.cuh>", "#include <hipcub/backend/rocprim/device/device_scan.hpp>")
s = s.replace(
"#include <cub/iterator/counting_input_iterator.cuh>",
"#include <hipcub/backend/rocprim/iterator/counting_input_iterator.hpp>",
)
s = s.replace(
"#include <cub/iterator/discard_output_iterator.cuh>",
"#include <hipcub/backend/rocprim/iterator/discard_output_iterator.hpp>",
)
s = s.replace("#include <cub/util_allocator.cuh>", "#include <hipcub/util_allocator.hpp>")
s = s.replace('#include "cub/util_allocator.cuh"', "#include <hipcub/util_allocator.hpp>")
s = s.replace("#include <cub/util_type.cuh>", "#include <hipcub/backend/rocprim/util_type.hpp>")
s = s.replace('#include "cub/util_type.cuh"', "#include <hipcub/backend/rocprim/util_type.hpp>")
s = s.replace("#include <cub/device/device_partition.cuh>", "#include <hipcub/device/device_partition.hpp>")
s = s.replace("#include <math_constants.h>", "#include <limits>")
s = s.replace("#include <library_types.h>", "") # Doesn't exist
s = s.replace("typedef half MappedType", "typedef __half MappedType")
# CUBLAS -> HIPBLAS
# Note: We do not use the hipblas marshalling interfaces; use rocblas instead.
# s = s.replace('CUBLAS', 'HIPBLAS')
# s = s.replace('Cublas', 'Hipblas')
# s = s.replace('cublas', 'hipblas')
# CUBLAS -> ROCBLAS
s = s.replace("CUBLAS", "ROCBLAS")
s = s.replace("Cublas", "Rocblas")
s = s.replace("cublas", "rocblas")
# Undefined ROCMRT constants -> std::numeric_limits
s = s.replace("ROCMRT_INF_F", "std::numeric_limits<float>::infinity()")
# HIPBLAS -> rocblas
s = s.replace("HIPBLAS_R_16F", "rocblas_datatype_f16_r")
s = s.replace("HIPBLAS_R_32F", "rocblas_datatype_f32_r")
s = s.replace("ROCBLAS_GEMM_DEFAULT_TENSOR_OP", "rocblas_gemm_algo_standard")
s = s.replace("ROCBLAS_TENSOR_OP_MATH", "0 /* CUBLAS_TENSOR_OP_MATH is deprecated */")
# compatible layer
s = s.replace("rocblas_gemm_strided_batched_ex", "_compat_rocblas_gemm_strided_batched_ex")
s = s.replace("RocblasMathModeSetter", "CompatRocblasMathModeSetter")
# CURAND -> HIPRAND
s = s.replace("CURAND", "HIPRAND")
s = s.replace("Curand", "Hiprand")
s = s.replace("curand", "hiprand")
# NCCL -> RCCL
# s = s.replace('NCCL_CALL', 'RCCL_CALL')
s = s.replace("#include <nccl.h>", "#include <rccl/rccl.h>")
# CUDNN -> MIOpen
s = s.replace("CUDNN", "MIOPEN")
s = s.replace("Cudnn", "Miopen")
s = s.replace("cudnn", "miopen")
# hipify seems to have a bug for MIOpen, cudnn.h -> hipDNN.h, cudnn -> hipdnn
s = s.replace("#include <hipDNN.h>", "#include <miopen/miopen.h>")
s = s.replace("hipdnn", "miopen")
s = s.replace("HIPDNN_STATUS_SUCCESS", "miopenStatusSuccess")
s = s.replace("HIPDNN", "MIOPEN")
s = s.replace("MIOPEN_BATCHNORM_SPATIAL", "miopenBNSpatial")
s = s.replace("MIOPEN_BATCHNORM_PER_ACTIVATION", "miopenBNPerActivation")
s = s.replace("MIOPEN_LRN_CROSS_CHANNEL", "miopenLRNCrossChannel")
s = s.replace("MIOPEN_POOLING_MAX", "miopenPoolingMax")
s = s.replace("MIOPEN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING", "miopenPoolingAverageInclusive")
s = s.replace("MIOPEN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING", "miopenPoolingAverage")
# CUSPARSE -> HIPSPARSE
s = s.replace("CUSPARSE", "HIPSPARSE")
# CUFFT -> HIPFFT
s = s.replace("CUFFT", "HIPFFT")
# Undo where above hipify steps went too far.
s = s.replace("id, ROCM", "id, CUDA") # cuda_execution_provider.cc
s = s.replace("ROCM error executing", "HIP error executing")
s = s.replace("ROCM_PINNED", "CUDA_PINNED")
s = s.replace("rocm_err", "hip_err")
s = s.replace("RegisterHipTrainingKernels", "RegisterRocmTrainingKernels")
s = s.replace("ROCM_VERSION", "CUDA_VERSION") # semantically different meanings, cannot hipify
s = s.replace("__ROCM_ARCH__", "__CUDA_ARCH__") # semantically different meanings, cannot hipify
# "std::log" above incorrectly changed "std::logic_error" to "logfic_error"
s = s.replace("logfic_error", "std::logic_error")
# Deletions
s = s.replace('#include "device_atomic_functions.h"', "") # HIP atomics in main hip header already
# Fix warnings due to incorrect header paths, intentionally after all other hipify steps.
s = s.replace("#include <hiprand_kernel.h>", "#include <hiprand/hiprand_kernel.h>")
s = s.replace("#include <rocblas.h>", "#include <rocblas/rocblas.h>")
s = s.replace("#include <hipblas.h>", "#include <hipblas/hipblas.h>")
with open(dst_file_path, "w") as f:
f.write(s)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--hipify_perl", required=True)
parser.add_argument("--output", "-o", help="output file")
parser.add_argument("src", help="src")
args = parser.parse_args()
hipify(args.hipify_perl, args.src, args.output)
| 8,817 | 46.664865 | 120 | py |
onnxruntime | onnxruntime-main/tools/ci_build/gen_def.py | #!/usr/bin/python3
import argparse
import os
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--src_root", required=True, help="input symbol file")
parser.add_argument("--output", required=True, help="output file")
parser.add_argument("--output_source", required=True, help="output file")
parser.add_argument("--version_file", required=True, help="VERSION_NUMBER file")
parser.add_argument("--style", required=True, choices=["gcc", "vc", "xcode"])
parser.add_argument("--config", required=True, nargs="+")
return parser.parse_args()
args = parse_arguments()
print("Generating symbol file for %s" % str(args.config))
with open(args.version_file) as f:
VERSION_STRING = f.read().strip()
print("VERSION:%s" % VERSION_STRING)
symbols = set()
for c in args.config:
file_name = os.path.join(args.src_root, "core", "providers", c, "symbols.txt")
with open(file_name) as file:
for line in file:
line = line.strip() # noqa: PLW2901
if line in symbols:
print("dup symbol: %s", line)
exit(-1)
symbols.add(line)
symbols = sorted(symbols)
symbol_index = 1
with open(args.output, "w") as file:
if args.style == "vc":
file.write("LIBRARY\n")
file.write("EXPORTS\n")
elif args.style == "xcode":
pass # xcode compile don't has any header.
else:
file.write("VERS_%s {\n" % VERSION_STRING)
file.write(" global:\n")
for symbol in symbols:
if args.style == "vc":
file.write(" %s @%d\n" % (symbol, symbol_index))
elif args.style == "xcode":
file.write("_%s\n" % symbol)
else:
file.write(" %s;\n" % symbol)
symbol_index += 1
if args.style == "gcc":
file.write(" local:\n")
file.write(" *;\n")
file.write("}; \n")
with open(args.output_source, "w") as file:
file.write("#include <onnxruntime_c_api.h>\n")
for c in args.config:
# WinML adapter should not be exported in platforms other than Windows.
# Exporting OrtGetWinMLAdapter is exported without issues using .def file when compiling for Windows
# so it isn't necessary to include it in generated_source.c
# external symbols are removed, xnnpack ep will be created via the standard ORT API.
# https://github.com/microsoft/onnxruntime/pull/11798
if c not in ("vitisai", "winml", "cuda", "rocm", "migraphx", "qnn", "snpe", "xnnpack", "cann", "dnnl"):
file.write(f"#include <core/providers/{c}/{c}_provider_factory.h>\n")
file.write("void* GetFunctionEntryByName(const char* name){\n")
for symbol in symbols:
if symbol != "OrtGetWinMLAdapter":
file.write(f'if(strcmp(name,"{symbol}") ==0) return (void*)&{symbol};\n')
file.write("return NULL;\n")
file.write("}\n")
| 2,917 | 36.410256 | 111 | py |
onnxruntime | onnxruntime-main/tools/ci_build/reduce_op_kernels.py | # !/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import io
import re
import shutil
import sys
import typing
from pathlib import Path
import op_registration_utils
from logger import get_logger
# directory containing the reduced op files, relative to the build directory
OP_REDUCTION_DIR = "op_reduction.generated"
# add the path to tools/python so we can import the config parsing and type reduction processing
SCRIPT_DIR = Path(__file__).parent.resolve()
ORT_ROOT = SCRIPT_DIR.parents[1]
sys.path.append(str(ORT_ROOT / "tools" / "python"))
from util import parse_config # noqa: E402
from util.ort_format_model.operator_type_usage_processors import OpTypeImplFilterInterface # noqa: E402
log = get_logger("reduce_op_kernels")
def _adapt_filters_for_extended_minimal_build(
base_required_ops: typing.Optional[dict], base_op_type_impl_filter: typing.Optional[OpTypeImplFilterInterface]
):
"""
Adapts the values returned by parse_config() for an extended minimal build or higher.
In particular:
- Includes ONNX ops needed by layout transformation
- Includes MS ops needed by NHWC optimizer
"""
# graph transformations in an extended minimal build require certain ops to be available
extended_minimal_build_required_op_ids = set() # set of (domain, optype, opset)
with open(
ORT_ROOT / "onnxruntime/core/optimizer/layout_transformation/layout_transformation_potentially_added_ops.h",
) as f:
region_boundary_pattern = re.compile(r"@@region_(begin|end)\(extended_minimal_build_required_kernels\)@@")
op_id_pattern = re.compile(
r'OpIdentifierWithStringViews{(?P<domain>\w+),\s+"(?P<optype>\w+)",\s+(?P<opset>\d+)}'
)
in_region = False
for line in f:
region_boundary_match = region_boundary_pattern.search(line)
if region_boundary_match:
in_region = region_boundary_match.group(1) == "begin"
continue
if not in_region:
continue
op_id_match = op_id_pattern.search(line)
if op_id_match:
domain = op_registration_utils.map_ort_constant_to_domain(
op_id_match.group("domain"), allow_unknown_constant=False
)
optype = op_id_match.group("optype")
opset = int(op_id_match.group("opset"))
extended_minimal_build_required_op_ids.add((domain, optype, opset))
adapted_required_ops = None
if base_required_ops is not None:
adapted_required_ops = base_required_ops.copy()
for domain, optype, opset in extended_minimal_build_required_op_ids:
adapted_required_ops.setdefault(domain, dict()).setdefault(opset, set()).add(optype)
adapted_op_type_impl_filter = None
if base_op_type_impl_filter is not None:
class _AdaptedFilter(OpTypeImplFilterInterface):
def __init__(
self,
filter_to_adapt: OpTypeImplFilterInterface,
required_domain_and_optypes: typing.Set[typing.Tuple[str, str]],
):
self.filter_to_adapt = filter_to_adapt
self.required_domain_and_optypes = required_domain_and_optypes
def is_typed_registration_needed(self, domain: str, optype: str, type_registration_str: str):
# Always require registration for ops in self.required_domain_and_optypes.
if (domain, optype) in self.required_domain_and_optypes:
return True
return self.filter_to_adapt.is_typed_registration_needed(domain, optype, type_registration_str)
def get_cpp_entries(self):
# The required types for ops in self.required_optypes must be specified in the C++ implementation.
# Doing that also accounts for globally allowed types.
# We don't need to do anything special with the allowed type overrides here.
return self.filter_to_adapt.get_cpp_entries()
adapted_op_type_impl_filter = _AdaptedFilter(
base_op_type_impl_filter,
{(domain, optype) for (domain, optype, opset) in extended_minimal_build_required_op_ids},
)
return (adapted_required_ops, adapted_op_type_impl_filter)
class _ExcludingRegistrationProcessor(op_registration_utils.RegistrationProcessor):
"""Registration processor that excludes registrations and writes the result to an output file."""
def __init__(
self,
required_ops: typing.Optional[dict],
op_type_impl_filter: typing.Optional[OpTypeImplFilterInterface],
output_file: io.TextIOWrapper,
):
self._required_ops = required_ops
self._op_type_impl_filter = op_type_impl_filter
self._output_file = output_file
def _is_op_required(
self, domain: str, operator: str, start_version: int, end_version: typing.Optional[int]
) -> bool:
"""See if an op is required."""
if self._required_ops is None:
return True
if domain not in self._required_ops:
return False
for opset in self._required_ops[domain]:
if opset >= start_version and (end_version is None or opset <= end_version):
if operator in self._required_ops[domain][opset]:
return True
return False
def process_registration(
self,
lines: typing.List[str],
constant_for_domain: str,
operator: str,
start_version: int,
end_version: typing.Optional[int] = None,
type: typing.Optional[str] = None,
):
registration_identifier = "{}:{}({}){}".format(
constant_for_domain, operator, start_version, f"<{type}>" if type else ""
)
# convert from the ORT constant name to the domain string used in the config
domain = op_registration_utils.map_ort_constant_to_domain(constant_for_domain, allow_unknown_constant=False)
exclude = False
reason = ""
if domain is not None:
if not self._is_op_required(domain, operator, start_version, end_version):
exclude = True
reason = "Entire op is not required."
if not exclude and type is not None and self._op_type_impl_filter is not None:
if not self._op_type_impl_filter.is_typed_registration_needed(domain, operator, type):
exclude = True
reason = "Specific typed registration is not required."
else:
log.warning(f"Keeping {registration_identifier} registration from unknown domain: {constant_for_domain}")
if exclude:
log.info(f"Disabling {registration_identifier} registration: {reason}")
for line in lines:
self._output_file.write("// " + line)
# edge case of last entry in table where we still need the terminating }; to not be commented out
if lines[-1].rstrip().endswith("};"):
self._output_file.write("};\n")
else:
for line in lines:
self._output_file.write(line)
def process_other_line(self, line):
self._output_file.write(line)
def ok(self):
return True
def _get_op_reduction_root(build_dir: Path):
"""
Return the op reduction root directory which is a subdirectory of `build_dir`.
"""
return Path(build_dir, OP_REDUCTION_DIR)
def _get_op_reduction_file_path(ort_root: Path, build_dir: Path, original_path: Path):
"""
Return the op reduction file path corresponding to `original_path`.
Op reduction files are in the op reduction root but otherwise share the same components of `original_path`
relative to `ort_root`.
"""
return _get_op_reduction_root(build_dir) / original_path.relative_to(ort_root)
def _generate_provider_registrations(
ort_root: Path,
build_dir: Path,
use_cuda: bool,
required_ops: typing.Optional[dict],
op_type_impl_filter: typing.Optional[OpTypeImplFilterInterface],
):
"""Generate provider registration files."""
kernel_registration_files = [
Path(f) for f in op_registration_utils.get_kernel_registration_files(str(ort_root), use_cuda)
]
for kernel_registration_file in kernel_registration_files:
if not kernel_registration_file.is_file():
raise ValueError(f"Kernel registration file does not exist: {kernel_registration_file}")
log.info(f"Processing {kernel_registration_file}")
reduced_path = _get_op_reduction_file_path(ort_root, build_dir, kernel_registration_file)
reduced_path.parent.mkdir(parents=True, exist_ok=True)
# read from original and create the reduced kernel def file with commented out lines for any kernels that are
# not required
with open(reduced_path, "w") as file_to_write:
processor = _ExcludingRegistrationProcessor(required_ops, op_type_impl_filter, file_to_write)
op_registration_utils.process_kernel_registration_file(kernel_registration_file, processor)
if not processor.ok():
# error should have already been logged so just exit
sys.exit(-1)
def _generate_type_control_overrides(ort_root: Path, build_dir: Path, cpp_lines: typing.Sequence[str]):
"""
Generate type control overrides. Insert applicable C++ code to specify operator type requirements.
:param ort_root: Root of the ONNX Runtime repository
:param build_dir: Path to the build directory
:param cpp_lines: The C++ code to insert
"""
src = Path(ort_root, "onnxruntime", "core", "providers", "op_kernel_type_control_overrides.inc")
if not src.is_file():
raise ValueError(f"Op kernel type control overrides file does not exist: {src}")
# create a copy of op_kernel_type_control_overrides.inc
target = _get_op_reduction_file_path(ort_root, build_dir, src)
target.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(src, target)
if cpp_lines:
# find the insertion block and replace any existing content in it
inserted = False
with open(src) as input, open(target, "w") as output:
inside_insertion_block = False
for line in input.readlines():
if "@@insertion_point_begin(allowed_types)@@" in line:
inside_insertion_block = True
output.write(line)
[output.write(f"{code_line}\n") for code_line in cpp_lines]
inserted = True
continue
elif inside_insertion_block:
if "@@insertion_point_end(allowed_types)@@" in line:
inside_insertion_block = False
else:
# we ignore any old lines within the insertion block
continue
output.write(line)
if not inserted:
raise RuntimeError(f"Insertion point was not found in {target}")
def reduce_ops(
config_path: str,
build_dir: str,
enable_type_reduction: bool,
use_cuda: bool,
is_extended_minimal_build_or_higher: bool,
):
"""
Reduce op kernel implementations.
:param config_path: Path to configuration file that specifies the ops to include
:param build_dir: Path to the build directory. The op reduction files will be generated under the build directory.
:param enable_type_reduction: Whether per operator type reduction is enabled
:param use_cuda: Whether to reduce op kernels for the CUDA provider
:param is_extended_minimal_build_or_higher: Whether this build has at least the features of an extended minimal
build enabled.
"""
build_dir_path = Path(build_dir).resolve()
build_dir_path.mkdir(parents=True, exist_ok=True)
required_ops, op_type_impl_filter = parse_config(config_path, enable_type_reduction)
if is_extended_minimal_build_or_higher:
required_ops, op_type_impl_filter = _adapt_filters_for_extended_minimal_build(required_ops, op_type_impl_filter)
# delete any existing generated files first
op_reduction_root = _get_op_reduction_root(build_dir_path)
if op_reduction_root.is_dir():
log.info(f"Deleting existing op reduction file root directory: {op_reduction_root}")
shutil.rmtree(op_reduction_root)
_generate_provider_registrations(ORT_ROOT, build_dir_path, use_cuda, required_ops, op_type_impl_filter)
type_control_cpp_code = op_type_impl_filter.get_cpp_entries() if op_type_impl_filter is not None else []
_generate_type_control_overrides(ORT_ROOT, build_dir_path, type_control_cpp_code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Reduces operator kernel implementations in ONNX Runtime. "
"Entire op implementations or op implementations for specific types may be pruned."
)
parser.add_argument(
"config_path",
type=str,
help="Path to configuration file. "
"Create with <ORT root>/tools/python/create_reduced_build_config.py and edit if needed. "
"See https://onnxruntime.ai/docs/reference/operators/reduced-operator-config-file.html for more "
"information.",
)
parser.add_argument(
"--cmake_build_dir",
type=str,
required=True,
help="Path to the build directory. The op reduction files will be generated under the build directory.",
)
parser.add_argument(
"--is_extended_minimal_build_or_higher",
action="store_true",
help="Whether this build has at least the features of an extended minimal build enabled.",
)
parser.add_argument(
"--enable_type_reduction", action="store_true", help="Whether per operator type reduction is enabled."
)
parser.add_argument("--use_cuda", action="store_true", help="Whether to reduce op kernels for the CUDA provider.")
args = parser.parse_args()
reduce_ops(
config_path=args.config_path,
build_dir=args.cmake_build_dir,
enable_type_reduction=args.enable_type_reduction,
use_cuda=args.use_cuda,
is_extended_minimal_build_or_higher=args.is_extended_minimal_build_or_higher,
)
| 14,490 | 39.591036 | 120 | py |
onnxruntime | onnxruntime-main/tools/ci_build/logger.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
def get_logger(name):
logging.basicConfig(format="%(asctime)s %(name)s [%(levelname)s] - %(message)s", level=logging.DEBUG)
return logging.getLogger(name)
| 276 | 24.181818 | 105 | py |
onnxruntime | onnxruntime-main/tools/ci_build/get_docker_image.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import os
import shlex
import shutil
import sys
from pathlib import Path
from logger import get_logger
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", ".."))
sys.path.append(os.path.join(REPO_DIR, "tools", "python"))
from util import run # noqa: E402
log = get_logger("get_docker_image")
def parse_args():
parser = argparse.ArgumentParser(
description="Build a docker image and push it to a remote Azure Container Registry."
"The content in the remote registry can be used as a cache when we need to build the thing again."
"The user must be logged in to the container registry."
)
parser.add_argument("--dockerfile", default="Dockerfile", help="Path to the Dockerfile.")
parser.add_argument("--context", default=".", help="Path to the build context.")
parser.add_argument(
"--docker-build-args", default="", help="Arguments that will be passed to the 'docker build' command."
)
parser.add_argument(
"--container-registry",
help="The Azure container registry name. If not provided, no container registry will be used.",
)
parser.add_argument("--repository", required=True, help="The image repository name.")
parser.add_argument("--use_imagecache", action="store_true", help="use cached image in pipeline cache")
parser.add_argument("--docker-path", default="docker", help="Path to docker.")
parser.add_argument("--manylinux-src", default="manylinux", help="Path to manylinux src folder")
parser.add_argument(
"--multiple_repos",
action="store_true",
help="used in packaging pipeline, which couldn't use get-docker-images-steps.yml",
)
return parser.parse_args()
def main():
args = parse_args()
log.debug(
"Dockerfile: {}, context: {}, docker build args: '{}'".format(
args.dockerfile, args.context, args.docker_build_args
)
)
use_container_registry = args.container_registry is not None
if not use_container_registry:
log.info("No container registry will be used")
full_image_name = (
f"{args.container_registry}.azurecr.io/{args.repository}:latest"
if use_container_registry
else f"{args.repository}:latest"
)
log.info(f"Image: {full_image_name}")
dst_deps_file = Path(args.context) / "scripts" / "deps.txt"
# The docker file may provide a special deps.txt in its docker context dir and uses that one.
# Otherwise, copy a generic one from this repo's cmake dir.
if not dst_deps_file.exists():
log.info(f"Copy deps.txt to : {dst_deps_file}")
shutil.copyfile(Path(REPO_DIR) / "cmake" / "deps.txt", str(dst_deps_file))
if "manylinux" in args.dockerfile and args.multiple_repos:
manylinux_build_scripts_folder = Path(args.manylinux_src) / "docker" / "build_scripts"
dest = Path(args.context) / "build_scripts"
if dest.exists():
log.info(f"Deleting: {str(dest)}")
shutil.rmtree(str(dest))
shutil.copytree(str(manylinux_build_scripts_folder), str(dest))
src_entrypoint_file = str(Path(args.manylinux_src) / "docker" / "manylinux-entrypoint")
dst_entrypoint_file = str(Path(args.context) / "manylinux-entrypoint")
shutil.copyfile(src_entrypoint_file, dst_entrypoint_file)
shutil.copymode(src_entrypoint_file, dst_entrypoint_file)
run(
"patch",
"-p1",
"-i",
str((Path(SCRIPT_DIR) / "github" / "linux" / "docker" / "manylinux.patch").resolve()),
cwd=str(dest),
)
if use_container_registry:
run(
args.docker_path,
"--log-level",
"error",
"buildx",
"build",
"--push",
"--tag",
full_image_name,
"--cache-from",
full_image_name,
"--build-arg",
"BUILDKIT_INLINE_CACHE=1",
*shlex.split(args.docker_build_args),
"-f",
args.dockerfile,
args.context,
)
elif args.use_imagecache:
log.info("Building image with pipeline cache...")
run(
args.docker_path,
"--log-level",
"error",
"buildx",
"build",
"--tag",
full_image_name,
"--cache-from",
full_image_name,
"--build-arg",
"BUILDKIT_INLINE_CACHE=1",
*shlex.split(args.docker_build_args),
"-f",
args.dockerfile,
args.context,
)
else:
log.info("Building image...")
run(
args.docker_path,
"build",
"--pull",
*shlex.split(args.docker_build_args),
"--tag",
full_image_name,
"--file",
args.dockerfile,
args.context,
)
# tag so we can refer to the image by repository name
run(args.docker_path, "tag", full_image_name, args.repository)
return 0
if __name__ == "__main__":
sys.exit(main())
| 5,336 | 31.542683 | 110 | py |
onnxruntime | onnxruntime-main/tools/ci_build/upload_python_package_to_azure_storage.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import logging
import os
import subprocess
import warnings
log = logging.getLogger("Build")
def parse_nightly_and_local_version_from_whl_name(blob_name):
night_build = "nightly" if blob_name.find(".dev") > 0 else "stable"
start = blob_name.find("+")
if start == -1:
return night_build, None
start = start + 1
end = blob_name.find("-", start)
if end == -1:
return night_build, None
return night_build, blob_name[start:end]
def run_subprocess(args, cwd=None):
log.warning(f"Running subprocess in '{cwd or os.getcwd()}'\n{args}")
return subprocess.run(args, cwd=cwd, check=True)
def upload_whl(python_wheel_path, final_storage=False):
storage_account_name = "onnxruntimepackages" if final_storage else "onnxruntimepackagesint"
blob_name = os.path.basename(python_wheel_path)
run_subprocess(["azcopy", "cp", python_wheel_path, f"https://{storage_account_name}.blob.core.windows.net/$web/"])
nightly_build, local_version = parse_nightly_and_local_version_from_whl_name(blob_name)
if local_version:
html_blob_name = f"onnxruntime_{nightly_build}_{local_version}.html"
else:
html_blob_name = f"onnxruntime_{nightly_build}.html"
download_path_to_html = f"./onnxruntime_{nightly_build}.html"
run_subprocess(
[
"azcopy",
"cp",
f"https://{storage_account_name}.blob.core.windows.net/$web/" + html_blob_name,
download_path_to_html,
]
)
blob_name_plus_replaced = blob_name.replace("+", "%2B")
with open(download_path_to_html) as f:
lines = f.read().splitlines()
new_line = '<a href="{blobname}">{blobname}</a><br>'.format(blobname=blob_name_plus_replaced)
if new_line not in lines:
lines.append(new_line)
lines.sort()
with open(download_path_to_html, "w") as f:
for item in lines:
f.write("%s\n" % item)
else:
warnings.warn(f"'{new_line}' exists in {download_path_to_html}. The html file is not updated.")
run_subprocess(
[
"azcopy",
"cp",
download_path_to_html,
f"https://{storage_account_name}.blob.core.windows.net/$web/" + html_blob_name,
"--content-type",
"text/html",
"--overwrite",
"true",
]
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Upload python whl to azure storage.")
parser.add_argument("--python_wheel_path", type=str, help="path to python wheel")
parser.add_argument("--final_storage", action="store_true", help="upload to final storage")
args = parser.parse_args()
upload_whl(args.python_wheel_path, args.final_storage)
| 2,904 | 30.923077 | 118 | py |
onnxruntime | onnxruntime-main/tools/ci_build/op_registration_validator.py | # !/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Validate ORT kernel registrations.
"""
import argparse
import dataclasses
import itertools
import os
import sys
import typing
import op_registration_utils
from logger import get_logger
log = get_logger("op_registration_validator")
# deprecated ops where the last registration should have an end version.
# value for each entry is the opset when it was deprecated. end version of last registration should equal value - 1.
deprecated_ops = {
"kOnnxDomain:Scatter": 11,
"kOnnxDomain:Upsample": 10,
# LayerNormalization, MeanVarianceNormalization and ThresholdedRelu were in contrib ops and incorrectly registered
# using the kOnnxDomain. They became official ONNX operators later and are registered there now. That leaves
# entries in the contrib ops registrations with end versions for when the contrib op was 'deprecated'
# and became an official op.
"kOnnxDomain:LayerNormalization": 17,
"kOnnxDomain:MeanVarianceNormalization": 9,
"kOnnxDomain:ThresholdedRelu": 10,
}
@dataclasses.dataclass
class RegistrationInfo:
domain: str
operator: str
start_version: int
end_version: typing.Optional[int]
lines: typing.List[str]
def domain_and_op_str(self):
return f"{self.domain}:{self.operator}"
def _log_registration_error(r: RegistrationInfo, message: str):
log.error("Invalid registration for {}. {}\n{}".format(r.domain_and_op_str(), message, "".join(r.lines)))
class RegistrationValidator(op_registration_utils.RegistrationProcessor):
def __init__(self):
self.all_registrations: typing.List[RegistrationInfo] = []
def process_registration(
self,
lines: typing.List[str],
domain: str,
operator: str,
start_version: int,
end_version: typing.Optional[int] = None,
type: typing.Optional[str] = None,
):
self.all_registrations.append(
RegistrationInfo(
domain=domain, operator=operator, start_version=start_version, end_version=end_version, lines=lines
)
)
def ok(self):
num_invalid_registrations = self._validate_all_registrations()
if num_invalid_registrations > 0:
log.error(f"Found {num_invalid_registrations} invalid registration(s).")
return False
return True
def _validate_all_registrations(self) -> int:
"""
Validates all registrations added by `process_registration()` and returns the number of invalid ones.
"""
def registration_info_sort_key(r: RegistrationInfo):
return (
r.domain,
r.operator,
r.start_version,
1 if r.end_version is None else 0, # unspecified end_version > specified end_version
r.end_version,
)
def domain_and_op_key(r: RegistrationInfo):
return (r.domain, r.operator)
sorted_registrations = sorted(self.all_registrations, key=registration_info_sort_key)
num_invalid_registrations = 0
for _, registration_group in itertools.groupby(sorted_registrations, key=domain_and_op_key):
num_invalid_registrations += self._validate_registrations_for_domain_and_op(registration_group)
return num_invalid_registrations
def _validate_registrations_for_domain_and_op(self, registrations: typing.Iterator[RegistrationInfo]) -> int:
"""
Validates registrations in sorted order for a single domain and op and returns the number of invalid ones.
"""
num_invalid_registrations = 0
r = next(registrations, None)
while r is not None:
next_r = next(registrations, None)
if not self._validate_registration(r, next_r):
num_invalid_registrations += 1
r = next_r
return num_invalid_registrations
def _validate_registration(self, r: RegistrationInfo, next_r: typing.Optional[RegistrationInfo]) -> bool:
"""
Validates a registration, `r`, with the next one in sorted order for a single domain and op, `next_r`, and
returns whether it is valid.
"""
if not (r.end_version is None or r.start_version <= r.end_version):
_log_registration_error(
r, f"Start version ({r.start_version}) is greater than end version ({r.end_version})."
)
return False
if next_r is None:
return self._validate_last_registration(r)
# It is valid to match next registration start and end versions exactly.
# This is expected if there are multiple registrations for an opset (e.g., typed registrations).
if (r.start_version, r.end_version) == (next_r.start_version, next_r.end_version):
return True
# This registration has no end version but it should have one if the next registration has different versions.
if r.end_version is None:
_log_registration_error(
r,
f"Registration for opset {r.start_version} has no end version but was superseded by version "
f"{next_r.start_version}.",
)
return False
# This registration's end version is not adjacent to the start version of the next registration.
if r.end_version != next_r.start_version - 1:
_log_registration_error(
r,
f"Registration end version is not adjacent to the next registration's start version. "
f"Current start and end versions: {(r.start_version, r.end_version)}. "
f"Next start and end versions: {(next_r.start_version, next_r.end_version)}.",
)
return False
return True
def _validate_last_registration(self, last_r: RegistrationInfo) -> bool:
"""
Validates the last registration in sorted order for a single domain and op and returns whether it is valid.
"""
# make sure we have an unversioned last entry for each operator unless it's deprecated
# TODO If the operator is deprecated, validation is more lax. I.e., it doesn't require a versioned registration.
# This could be tightened up but we would need to handle the deprecated contrib ops registered in the ONNX
# domain that have newer registrations in a non-contrib op file differently. They should only be considered
# deprecated as contrib ops.
domain_and_op_str = last_r.domain_and_op_str()
deprecation_version = deprecated_ops.get(domain_and_op_str, None)
allow_missing_unversioned_registration = (
deprecation_version is not None and last_r.end_version == deprecation_version - 1
)
# special handling for ArgMin/ArgMax, which CUDA EP doesn't yet support for opset 12+
# TODO remove once CUDA EP supports ArgMin/ArgMax for opset 12+
ops_with_incomplete_support = ["kOnnxDomain:ArgMin", "kOnnxDomain:ArgMax"]
if domain_and_op_str in ops_with_incomplete_support:
log.warning(
f"Allowing missing unversioned registration for op with incomplete support: {domain_and_op_str}."
)
allow_missing_unversioned_registration = True
if last_r.end_version is not None and not allow_missing_unversioned_registration:
log.error(f"Missing unversioned registration for {domain_and_op_str}.")
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script to validate operator kernel registrations.")
parser.add_argument(
"--ort_root",
type=str,
help="Path to ONNXRuntime repository root. Inferred from the location of this script if not provided.",
)
args = parser.parse_args()
ort_root = os.path.abspath(args.ort_root) if args.ort_root else None
include_cuda = True # validate CPU and CUDA EP registrations
registration_files = op_registration_utils.get_kernel_registration_files(ort_root, include_cuda)
def validate_registration_file(file: str) -> bool:
log.info(f"Processing {file}")
processor = RegistrationValidator()
op_registration_utils.process_kernel_registration_file(file, processor)
return processor.ok()
validation_successful = all(
# Validate each file first by storing the validation results in a list.
# Otherwise, all() will exit early when it encounters the first invalid file.
list(map(validate_registration_file, registration_files))
)
log.info(f"Op kernel registration validation {'succeeded' if validation_successful else 'failed'}.")
sys.exit(0 if validation_successful else 1)
| 8,898 | 39.085586 | 120 | py |
onnxruntime | onnxruntime-main/tools/ci_build/coverage.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
# This script generates test code coverage for Android.
# The prerequistes:
# 1. The Onnxruntime build with coverage option to compile/link the source files using --coverage optoin
# 2. The tests are run on the target emulator and *.gcda files are available on the emulator
# 3. The emulator which ran tests must be running. Otherwise this script will fail
import argparse
import os
import sys
from build import run_subprocess
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", ".."))
sys.path.append(os.path.join(REPO_DIR, "tools", "python"))
import util.android as android # noqa: E402
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--build_dir", required=True, help="Path to the build directory.")
parser.add_argument(
"--config",
default="Debug",
choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
help="Configuration(s) to run code coverage.",
)
parser.add_argument("--android_sdk_path", required=True, help="The Android SDK root.")
return parser.parse_args()
def main():
args = parse_arguments()
sdk_tool_paths = android.get_sdk_tool_paths(args.android_sdk_path)
def adb_pull(src, dest, **kwargs):
return run_subprocess([sdk_tool_paths.adb, "pull", src, dest], **kwargs)
def adb_shell(*args, **kwargs):
return run_subprocess([sdk_tool_paths.adb, "shell", *args], **kwargs)
script_dir = os.path.realpath(os.path.dirname(__file__))
source_dir = os.path.normpath(os.path.join(script_dir, "..", ".."))
cwd = os.path.abspath(os.path.join(args.build_dir, args.config))
adb_shell("cd /data/local/tmp && tar -zcf gcda_files.tar.gz *.dir")
adb_pull("/data/local/tmp/gcda_files.tar.gz", cwd)
os.chdir(cwd)
run_subprocess("tar -zxf gcda_files.tar.gz -C CMakeFiles".split(" "))
cmd = ["gcovr", "-s", "-r"]
cmd.append(os.path.join(source_dir, "onnxruntime"))
cmd.extend([".", "-o"])
cmd.append(os.path.join(cwd, "coverage_rpt.txt"))
run_subprocess(cmd, cwd=os.path.join(cwd, "CMakeFiles"))
if __name__ == "__main__":
main()
| 2,312 | 34.584615 | 108 | py |
onnxruntime | onnxruntime-main/tools/ci_build/replace_urls_in_deps.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# This file replaces https URLs in deps.txt to local file paths. It runs after we download the dependencies from Azure
# DevOps Artifacts
import argparse
import csv
import os
from dataclasses import dataclass
from pathlib import Path
@dataclass(frozen=True)
class Dep:
name: str
url: str
sha1_hash: str
def parse_arguments():
parser = argparse.ArgumentParser()
# The directory that contains downloaded zip files
parser.add_argument("--new_dir", required=False)
return parser.parse_args()
def main():
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) # noqa: N806
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..")) # noqa: N806
args = parse_arguments()
new_dir = None
if args.new_dir:
new_dir = Path(args.new_dir)
else:
BUILD_BINARIESDIRECTORY = os.environ.get("BUILD_BINARIESDIRECTORY") # noqa: N806
if BUILD_BINARIESDIRECTORY is None:
raise NameError("Please specify --new_dir or set the env var BUILD_BINARIESDIRECTORY")
new_dir = Path(BUILD_BINARIESDIRECTORY) / "deps"
# Here we intentionally do not check if new_dir exists, because it might be used in a docker container instead.
deps = []
csv_file_path = Path(REPO_DIR) / "cmake" / "deps.txt"
# Read the whole file into memory first
with csv_file_path.open("r", encoding="utf-8") as f:
depfile_reader = csv.reader(f, delimiter=";")
for row in depfile_reader:
if len(row) != 3:
continue
# Lines start with "#" are comments
if row[0].startswith("#"):
continue
deps.append(Dep(row[0], row[1], row[2]))
# Write updated content back
with csv_file_path.open("w", newline="", encoding="utf-8") as f:
depfile_writer = csv.writer(f, delimiter=";")
for dep in deps:
if dep.url.startswith("https://"):
new_url = new_dir / dep.url[8:]
depfile_writer.writerow([dep.name, new_url.as_posix(), dep.sha1_hash])
else:
# Write the original thing back
depfile_writer.writerow([dep.name, dep.url, dep.sha1_hash])
if __name__ == "__main__":
main()
| 2,368 | 30.586667 | 118 | py |
onnxruntime | onnxruntime-main/tools/ci_build/__init__.py | 0 | 0 | 0 | py |
|
onnxruntime | onnxruntime-main/tools/ci_build/compile_triton.py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
import argparse
import importlib.util
import os
import shutil
import triton
def compile(function_table, out_dir):
def compile_one(func, sig, **kwargs):
ret = triton.compile(func, signature=sig, **kwargs)
return ret
metadata = []
for func_desc in function_table:
name = func_desc["name"]
group = func_desc["group"]
sig = func_desc["sig"]
func = func_desc["func"]
kwargs = func_desc["kwargs"]
# print("compile func: ", func_desc)
ret = compile_one(func, sig, **kwargs)
compile_res = {}
compile_res["name"] = name
compile_res["group"] = group
compile_res["func_name"] = ret.metadata["name"]
compile_res["num_warps"] = ret.metadata["num_warps"]
compile_res["shared"] = ret.metadata["shared"]
if "constants" in kwargs:
compile_res["constants"] = kwargs["constants"]
# move tmp kernel file into current dir
if "hsaco_path" in ret.asm and os.path.exists(ret.asm["hsaco_path"]):
# is rocm
lib_name = f"{name}.hsaco"
shutil.copyfile(ret.asm["hsaco_path"], f"{out_dir}/{lib_name}")
elif "cubin" in ret.asm:
# is cuda
lib_name = f"{name}.cubin"
# need to write cubin into file
with open(f"{out_dir}/{lib_name}", "wb") as fp:
fp.write(ret.asm["cubin"])
else:
raise Exception("not find rocm or cuda compiled kernel")
compile_res["lib_file"] = lib_name
metadata.append(compile_res)
return metadata
def convert_lib_to_obj(lib_file, out_dir):
obj_file = lib_file.split(".")[0] + ".o"
command = f"cd {out_dir}; objcopy -I binary -O elf64-x86-64 -B i386:x86-64 {lib_file} {obj_file}; cd -"
ret = os.system(command)
if ret != 0:
raise Exception(f"exec convert command: {command} failed.")
# check file exist
if not os.path.exists(f"{out_dir}/{obj_file}"):
raise Exception(f"the output file not exist, after exec comamnd: {command}")
return obj_file
def archive_obj_files(obj_files, out_dir, out_obj_file):
obj_files = " ".join(obj_files)
command = f"cd {out_dir}; ar rcs {out_obj_file} {obj_files}; cd -"
ret = os.system(command)
if ret != 0:
raise Exception(f"exec convert command: {command} failed.")
# check file exist
if not os.path.exists(f"{out_dir}/{out_obj_file}"):
raise Exception(f"the output file not exist, after exec comamnd: {command}")
def convert_and_save(metadata, header_file, out_dir, out_obj_file):
c_metadata = []
binary_files = []
for m in metadata:
meta_ele = []
obj_file = convert_lib_to_obj(m["lib_file"], out_dir)
binary_files.append(obj_file)
lib_name = m["lib_file"].replace(".", "_")
meta_ele.append(f'"_binary_{lib_name}_start"')
meta_ele.append(f"\"{m['func_name']}\"")
meta_ele.append(f"\"{m['group']}\"")
meta_ele.append(f"\"{m['name']}\"")
meta_ele.append(str(m["num_warps"]))
meta_ele.append(str(m["shared"]))
# convert constants
constants = []
for k, v in m["constants"].items():
constants.append(f'{{ "{k}", {str(v)}}}')
meta_ele.append(f"{{ { ', '.join(constants) } }}")
c_metadata.append(f"{{ { ', '.join(meta_ele) } }}")
archive_obj_files(binary_files, out_dir, out_obj_file)
code = f"""
#include <unordered_map>
struct _TritonKernelInfo {{
const char* name_start;
const char* func_name;
const char* group_name;
const char* name;
int num_warps;
int shared;
std::unordered_map<std::string, int> constants;
}};
const _TritonKernelInfo kernel_infos[] = {{
{ ', '.join(c_metadata) },
}};
"""
with open(header_file, "w") as fp:
fp.write(code)
def main(args):
out_obj_file = args.obj_file
out_dir = os.path.dirname(out_obj_file)
out_obj_file = os.path.basename(out_obj_file)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
metadata = []
print("[triton kernel] start compile triton kernel.")
for i, f in enumerate(args.script_files):
# import module in f, and call function
spec = importlib.util.spec_from_file_location(f"module_{i}", f)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
func_tb = module.get_function_table()
m = compile(func_tb, out_dir)
metadata.extend(m)
print("[triton kernel] compile triton kernel done.")
# save metadata into header file
convert_and_save(metadata, args.header, out_dir, out_obj_file)
print("[triton kernel] save into file done.")
def get_arges():
parser = argparse.ArgumentParser(description="PyTorch Template Finetune Example")
parser.add_argument(
"--header", type=str, default="triton_kernel_infos.h", help="the header file that should be generated."
)
parser.add_argument("--ort_root", type=str, default="onnxruntime", help="the root dir of onnxruntime.")
parser.add_argument("--script_files", type=str, nargs="+", help="the root dir of onnxruntime.")
parser.add_argument("--obj_file", type=str, default="triton_kernel_infos.a", help="output target object files.")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_arges()
main(args)
| 5,675 | 31.434286 | 116 | py |
onnxruntime | onnxruntime-main/tools/ci_build/update_tsaoptions.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import json
import os
from pathlib import Path
SCRIPT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
REPO_DIR = SCRIPT_DIR.parent.parent
with (REPO_DIR / ".config" / "tsaoptions.json").open() as f:
data = json.load(f)
buildNumber = os.getenv("BUILD_BUILDNUMBER") # noqa: N816
if buildNumber is not None:
data["buildNumber"] = buildNumber
with (REPO_DIR / ".config" / "tsaoptions.json").open(mode="w") as f:
json.dump(data, f)
| 547 | 26.4 | 68 | py |
onnxruntime | onnxruntime-main/tools/ci_build/clean_docker_image_cache.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import collections
import datetime
import json
import os
import re
import sys
import tempfile
from logger import get_logger
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", ".."))
sys.path.append(os.path.join(REPO_DIR, "tools", "python"))
from util import run # noqa: E402
log = get_logger("clean_docker_image_cache")
def parse_args():
parser = argparse.ArgumentParser(
description="Cleans the docker image cache container registry. "
"This assumes a fairly specific setup - an Azure container registry "
"and a storage account that receives "
"ContainerRegistryRepositoryEvents logs from that registry. "
"The logs are searched in order to determine whether images should be "
"retained or removed. "
"For an image to be retained, it must have been accessed at least N "
"times (specified by --cache-min-access-count) over the past K days "
"(specified by --cache-history-days)."
)
parser.add_argument("--container-registry", required=True, help="The container registry name.")
parser.add_argument("--log-storage-account", required=True, help="The storage account name.")
parser.add_argument("--log-storage-account-container", required=True, help="The storage account container name.")
parser.add_argument(
"--log-storage-path-pattern", default="*.json", help="The log path pattern in the storage account container."
)
parser.add_argument("--cache-history-days", type=int, default=7, help="The length of the cache history in days.")
parser.add_argument(
"--cache-min-access-count", type=int, default=1, help="The minimum access count over the cache history."
)
parser.add_argument("--dry-run", action="store_true", help="Do a dry-run and do not remove any images.")
parser.add_argument("--az-path", default="az", help="Path to the az client.")
return parser.parse_args()
def az(*args, parse_output=True, az_path):
proc = run(az_path, *args, "--output", "json", capture_stdout=parse_output)
if parse_output:
return json.loads(proc.stdout.decode())
return None
def download_logs(storage_account, container, log_path_pattern, target_dir, az_path):
log_paths = az(
"storage",
"blob",
"download-batch",
"--destination",
target_dir,
"--source",
container,
"--account-name",
storage_account,
"--pattern",
log_path_pattern,
az_path=az_path,
)
return [os.path.join(target_dir, log_path) for log_path in log_paths]
ImageInfo = collections.namedtuple("ImageInfo", ["repository", "digest"])
def get_image_name(image_info):
return f"{image_info.repository}@{image_info.digest}"
timestamp_pattern = re.compile(
r"^(?P<year>\d+)-(?P<month>\d+)-(?P<day>\d+)T(?P<hour>\d+):(?P<minute>\d+):(?P<second>\d+)"
)
def parse_timestamp(timestamp_str):
match = timestamp_pattern.match(timestamp_str)
if match is None:
return None
return datetime.datetime(
year=int(match["year"]),
month=int(match["month"]),
day=int(match["day"]),
hour=int(match["hour"]),
minute=int(match["minute"]),
second=int(match["second"]),
tzinfo=datetime.timezone.utc,
)
def parse_log_line(line, min_datetime):
entry = json.loads(line)
def check_time(value):
timestamp = parse_timestamp(value)
return timestamp is not None and timestamp >= min_datetime
for field_name, expected_value_or_checker in [
("category", "ContainerRegistryRepositoryEvents"),
("operationName", lambda value: value in ["Pull", "Push"]),
("resultType", "HttpStatusCode"),
("resultDescription", lambda value: value in ["200", "201"]),
("time", check_time),
]:
value = entry.get(field_name, "")
if callable(expected_value_or_checker):
if not expected_value_or_checker(value):
return None
else:
if value != expected_value_or_checker:
return None
props = entry.get("properties", {})
repo, digest = props.get("repository"), props.get("digest")
if repo is None or digest is None:
return None
return ImageInfo(repo, digest)
def get_valid_images_from_logs(log_paths, min_datetime, min_access_count):
image_counts = dict() # dict of {ImageInfo -> count}
for log_path in log_paths:
log.debug(f"Processing log file: {log_path}")
with open(log_path) as log_file:
for line in log_file:
image_info = parse_log_line(line, min_datetime)
if image_info is not None:
image_counts[image_info] = image_counts.get(image_info, 0) + 1
return {image for image, count in image_counts.items() if count >= min_access_count}
def get_registry_images(container_registry, az_path):
registry_images = set() # set of ImageInfo
repositories = az("acr", "repository", "list", "--name", container_registry, az_path=az_path)
for repository in repositories:
digests = az(
"acr",
"repository",
"show-manifests",
"--repository",
repository,
"--name",
container_registry,
"--query",
"[*].digest",
az_path=az_path,
)
registry_images.update([ImageInfo(repository, digest) for digest in digests])
return registry_images
def clean_images(container_registry, image_names, az_path):
for image_name in image_names:
az(
"acr",
"repository",
"delete",
"--name",
container_registry,
"--image",
image_name,
"--yes",
az_path=az_path,
parse_output=False,
)
# Note:
# the log download and parsing could be replaced by a log analytics query
"""
let cache_history = 7d;
let cache_min_access_count = 1;
ContainerRegistryRepositoryEvents
| where TimeGenerated >= ago(cache_history)
| where OperationName in ("Pull", "Push")
| where ResultDescription in ("200", "201")
| summarize AccessCount = count() by Repository, Digest
| where AccessCount >= cache_min_access_count
| project Repository, Digest
"""
# need to figure out how run the query the programmatically though
def main():
args = parse_args()
valid_images = set()
with tempfile.TemporaryDirectory() as tmp_dir:
log_paths = download_logs(
args.log_storage_account,
args.log_storage_account_container,
args.log_storage_path_pattern,
tmp_dir,
args.az_path,
)
cache_history = datetime.timedelta(days=args.cache_history_days)
min_timestamp = datetime.datetime.now(tz=datetime.timezone.utc) - cache_history
valid_images = get_valid_images_from_logs(log_paths, min_timestamp, args.cache_min_access_count)
all_images = get_registry_images(args.container_registry, args.az_path)
def sorted_image_names(image_infos):
return sorted([get_image_name(image_info) for image_info in image_infos])
log.debug("All images:\n{}".format("\n".join(sorted_image_names(all_images))))
log.debug("Valid images:\n{}".format("\n".join(sorted_image_names(valid_images))))
images_to_clean = all_images - valid_images
image_names_to_clean = sorted_image_names(images_to_clean)
log.info("Images to clean:\n{}".format("\n".join(image_names_to_clean)))
if args.dry_run:
log.info("Dry run, no images will be cleaned.")
return 0
clean_images(args.container_registry, image_names_to_clean, args.az_path)
return 0
if __name__ == "__main__":
sys.exit(main())
| 8,016 | 29.953668 | 117 | py |
onnxruntime | onnxruntime-main/tools/ci_build/build.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import contextlib
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
from pathlib import Path
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", ".."))
sys.path.insert(0, os.path.join(REPO_DIR, "tools", "python"))
import util.android as android # noqa: E402
from util import get_logger, is_linux, is_macOS, is_windows, run # noqa: E402
log = get_logger("build")
class BaseError(Exception):
"""Base class for errors originating from build.py."""
pass
class BuildError(BaseError):
"""Error from running build steps."""
def __init__(self, *messages):
super().__init__("\n".join(messages))
class UsageError(BaseError):
"""Usage related error."""
def __init__(self, message):
super().__init__(message)
def _check_python_version():
required_minor_version = 7
if (sys.version_info.major, sys.version_info.minor) < (3, required_minor_version):
raise UsageError(
f"Invalid Python version. At least Python 3.{required_minor_version} is required. "
f"Actual Python version: {sys.version}"
)
def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ["true", "false"]:
raise ValueError("Need bool; got %r" % s)
return {"true": True, "false": False}[s.lower()]
_check_python_version()
def _openvino_verify_device_type(device_read):
choices = ["CPU_FP32", "CPU_FP16", "GPU_FP32", "GPU_FP16", "VPUX_FP16", "VPUX_U8"]
choices1 = [
"CPU_FP32_NO_PARTITION",
"CPU_FP16_NO_PARTITION",
"GPU_FP32_NO_PARTITION",
"GPU_FP16_NO_PARTITION",
"VPUX_FP16_NO_PARTITION",
"VPUX_U8_NO_PARTITION",
]
status_hetero = True
res = False
if device_read in choices:
res = True
elif device_read in choices1:
res = True
elif device_read.startswith("HETERO:") or device_read.startswith("MULTI:") or device_read.startswith("AUTO:"):
res = True
comma_separated_devices = device_read.split(":")
comma_separated_devices = comma_separated_devices[1].split(",")
if len(comma_separated_devices) < 2:
print("At least two devices required in Hetero/Multi/Auto Mode")
status_hetero = False
dev_options = ["CPU", "GPU", "VPUX"]
for dev in comma_separated_devices:
if dev not in dev_options:
status_hetero = False
break
def invalid_hetero_build():
print("\nIf trying to build Hetero/Multi/Auto, specifiy the supported devices along with it.\n")
print("specify the keyword HETERO or MULTI or AUTO followed by the devices ")
print("in the order of priority you want to build\n")
print("The different hardware devices that can be added in HETERO or MULTI or AUTO")
print("are ['CPU','GPU', 'VPUX'] \n")
print("An example of how to specify the hetero build type. Ex: HETERO:GPU,CPU \n")
print("An example of how to specify the MULTI build type. Ex: MULTI:GPU,CPU \n")
print("An example of how to specify the AUTO build type. Ex: AUTO:GPU,CPU \n")
sys.exit("Wrong Build Type selected")
if res is False:
print("\nYou have selcted wrong configuration for the build.")
print("pick the build type for specific Hardware Device from following options: ", choices)
print("(or) from the following options with graph partitioning disabled: ", choices1)
print("\n")
if not (device_read.startswith("HETERO") or device_read.startswith("MULTI") or device_read.startswith("AUTO")):
invalid_hetero_build()
sys.exit("Wrong Build Type selected")
if status_hetero is False:
invalid_hetero_build()
return device_read
def parse_arguments():
class Parser(argparse.ArgumentParser):
# override argument file line parsing behavior - allow multiple arguments per line and handle quotes
def convert_arg_line_to_args(self, arg_line):
return shlex.split(arg_line)
parser = Parser(
description="ONNXRuntime CI build driver.",
usage="""
Default behavior is --update --build --test for native architecture builds.
Default behavior is --update --build for cross-compiled builds.
The Update phase will update git submodules, and run cmake to generate makefiles.
The Build phase will build all projects.
The Test phase will run all unit tests, and optionally the ONNX tests.
Use the individual flags to only run the specified stages.
""",
# files containing arguments can be specified on the command line with "@<filename>" and the arguments within
# will be included at that point
fromfile_prefix_chars="@",
)
# Main arguments
parser.add_argument("--build_dir", required=True, help="Path to the build directory.")
parser.add_argument(
"--config",
nargs="+",
default=["Debug"],
choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
help="Configuration(s) to build.",
)
parser.add_argument("--update", action="store_true", help="Update makefiles.")
parser.add_argument("--build", action="store_true", help="Build.")
parser.add_argument(
"--clean", action="store_true", help="Run 'cmake --build --target clean' for the selected config/s."
)
parser.add_argument(
"--parallel",
nargs="?",
const="0",
default="1",
type=int,
help="Use parallel build. The optional value specifies the maximum number of parallel jobs. "
"If the optional value is 0 or unspecified, it is interpreted as the number of CPUs.",
)
parser.add_argument("--test", action="store_true", help="Run unit tests.")
parser.add_argument("--skip_tests", action="store_true", help="Skip all tests.")
parser.add_argument(
"--compile_no_warning_as_error",
action="store_true",
help="Preventing warnings from being treated as errors on compile.",
)
# Training options
parser.add_argument("--enable_nvtx_profile", action="store_true", help="Enable NVTX profile in ORT.")
parser.add_argument("--enable_memory_profile", action="store_true", help="Enable memory profile in ORT.")
parser.add_argument(
"--enable_training",
action="store_true",
help="Enable full training functionality in ORT. Includes ORTModule and ORT Training APIs.",
)
parser.add_argument("--enable_training_apis", action="store_true", help="Enable ort training apis.")
parser.add_argument("--enable_training_ops", action="store_true", help="Enable training ops in inference graph.")
parser.add_argument("--enable_nccl", action="store_true", help="Enable Nccl.")
parser.add_argument("--mpi_home", help="Path to MPI installation dir")
parser.add_argument("--nccl_home", help="Path to NCCL installation dir")
parser.add_argument(
"--use_mpi", nargs="?", default=False, const=True, type=_str_to_bool, help="Disabled by default."
)
# enable ONNX tests
parser.add_argument(
"--enable_onnx_tests",
action="store_true",
help="""When running the Test phase, run onnx_test_running against
available test data directories.""",
)
parser.add_argument("--path_to_protoc_exe", help="Path to protoc exe.")
parser.add_argument("--fuzz_testing", action="store_true", help="Enable Fuzz testing of the onnxruntime.")
parser.add_argument(
"--enable_symbolic_shape_infer_tests",
action="store_true",
help="""When running the Test phase, run symbolic shape inference against
available test data directories.""",
)
# generate documentation
parser.add_argument(
"--gen_doc",
nargs="?",
const="yes",
type=str,
help="Generate documentation listing standard ONNX operators and types implemented by "
"various execution providers and contrib operator schemas. Must be used for inference builds, only!"
"Use `--gen_doc validate` to validate these match the current contents in /docs.",
)
parser.add_argument("--gen-api-doc", action="store_true", help="Generate API documentation for PyTorch frontend")
# CUDA related
parser.add_argument("--use_cuda", action="store_true", help="Enable CUDA.")
parser.add_argument(
"--cuda_version", help="The version of CUDA toolkit to use. Auto-detect if not specified. e.g. 9.0"
)
parser.add_argument(
"--cuda_home",
help="Path to CUDA home."
"Read from CUDA_HOME environment variable if --use_cuda is true and "
"--cuda_home is not specified.",
)
parser.add_argument(
"--cudnn_home",
help="Path to CUDNN home. "
"Read from CUDNN_HOME environment variable if --use_cuda is true and "
"--cudnn_home is not specified.",
)
parser.add_argument("--enable_cuda_line_info", action="store_true", help="Enable CUDA line info.")
# Python bindings
parser.add_argument("--enable_pybind", action="store_true", help="Enable Python Bindings.")
parser.add_argument("--build_wheel", action="store_true", help="Build Python Wheel.")
parser.add_argument(
"--wheel_name_suffix",
help="Suffix to append to created wheel names. This value is currently only used for nightly builds.",
)
parser.add_argument(
"--numpy_version", help="Installs a specific version of numpy before building the python binding."
)
parser.add_argument("--skip-keras-test", action="store_true", help="Skip tests with Keras if keras is installed")
# C-Sharp bindings
parser.add_argument(
"--build_csharp",
action="store_true",
help="Build C#.Net DLL and NuGet package. This should be only used in CI pipelines. "
"For building C# bindings and packaging them into nuget package use --build_nuget arg.",
)
parser.add_argument(
"--build_nuget",
action="store_true",
help="Build C#.Net DLL and NuGet package on the local machine. "
"Currently only Windows and Linux platforms are supported.",
)
parser.add_argument(
"--msbuild_extra_options",
nargs="+",
action="append",
help="Extra properties to pass to msbuild during build. "
"These are just msbuild /p: options without the leading /p:.",
)
# Java bindings
parser.add_argument("--build_java", action="store_true", help="Build Java bindings.")
# Node.js binding
parser.add_argument("--build_nodejs", action="store_true", help="Build Node.js binding and NPM package.")
# Objective-C binding
parser.add_argument("--build_objc", action="store_true", help="Build Objective-C binding.")
# Build a shared lib
parser.add_argument("--build_shared_lib", action="store_true", help="Build a shared library for the ONNXRuntime.")
# Build a shared lib
parser.add_argument(
"--build_apple_framework", action="store_true", help="Build a macOS/iOS framework for the ONNXRuntime."
)
# Build options
parser.add_argument(
"--cmake_extra_defines",
nargs="+",
action="append",
help="Extra definitions to pass to CMake during build system "
"generation. These are just CMake -D options without the leading -D.",
)
parser.add_argument("--target", help="Build a specific target, e.g. winml_dll")
# This flag is needed when :
# 1. The OS is 64 bits Windows
# 2. And the target binary is for 32 bits Windows
# 3. And the python used for running this script is 64 bits.
# But if you can get a 32 bits python, the build will run better and you won't need this flag.
parser.add_argument(
"--x86",
action="store_true",
help="[cross-compiling] Create Windows x86 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed",
)
parser.add_argument(
"--arm",
action="store_true",
help="[cross-compiling] Create ARM makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed",
)
parser.add_argument(
"--arm64",
action="store_true",
help="[cross-compiling] Create ARM64 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed",
)
parser.add_argument(
"--arm64ec",
action="store_true",
help="[cross-compiling] Create ARM64EC makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed",
)
parser.add_argument("--msvc_toolset", help="MSVC toolset to use. e.g. 14.11")
parser.add_argument("--android", action="store_true", help="Build for Android")
parser.add_argument(
"--android_abi",
default="arm64-v8a",
choices=["armeabi-v7a", "arm64-v8a", "x86", "x86_64"],
help="Specify the target Android Application Binary Interface (ABI)",
)
parser.add_argument("--android_api", type=int, default=27, help="Android API Level, e.g. 21")
parser.add_argument(
"--android_sdk_path", type=str, default=os.environ.get("ANDROID_HOME", ""), help="Path to the Android SDK"
)
parser.add_argument(
"--android_ndk_path", type=str, default=os.environ.get("ANDROID_NDK_HOME", ""), help="Path to the Android NDK"
)
parser.add_argument(
"--android_cpp_shared",
action="store_true",
help="Build with shared libc++ instead of the default static libc++.",
)
parser.add_argument("--android_run_emulator", action="store_true", help="Start up an Android emulator if needed.")
parser.add_argument("--use_gdk", action="store_true", help="Build with the GDK toolchain.")
parser.add_argument(
"--gdk_edition",
default=os.path.normpath(os.environ.get("GameDKLatest", "")).split(os.sep)[-1], # noqa: SIM112
help="Build with a specific GDK edition. Defaults to the latest installed.",
)
parser.add_argument("--gdk_platform", default="Scarlett", help="Sets the GDK target platform.")
parser.add_argument("--ios", action="store_true", help="build for ios")
parser.add_argument(
"--ios_sysroot", default="", help="Specify the location name of the macOS platform SDK to be used"
)
parser.add_argument(
"--ios_toolchain_file",
default="",
help="Path to ios toolchain file, or cmake/onnxruntime_ios.toolchain.cmake will be used",
)
parser.add_argument(
"--xcode_code_signing_team_id", default="", help="The development team ID used for code signing in Xcode"
)
parser.add_argument(
"--xcode_code_signing_identity", default="", help="The development identity used for code signing in Xcode"
)
parser.add_argument(
"--use_xcode",
action="store_const",
const="Xcode",
dest="cmake_generator",
help="Use Xcode as cmake generator, this is only supported on MacOS. Equivalent to '--cmake_generator Xcode'.",
)
parser.add_argument(
"--osx_arch",
default="arm64" if platform.machine() == "arm64" else "x86_64",
choices=["arm64", "arm64e", "x86_64"],
help="Specify the Target specific architectures for macOS and iOS, This is only supported on MacOS",
)
parser.add_argument(
"--apple_deploy_target",
type=str,
help="Specify the minimum version of the target platform "
"(e.g. macOS or iOS)"
"This is only supported on MacOS",
)
parser.add_argument(
"--disable_memleak_checker", action="store_true", help="Disable memory leak checker from Windows build"
)
# WebAssembly build
parser.add_argument("--build_wasm", action="store_true", help="Build for WebAssembly")
parser.add_argument("--build_wasm_static_lib", action="store_true", help="Build for WebAssembly static library")
parser.add_argument("--emsdk_version", default="3.1.37", help="Specify version of emsdk")
parser.add_argument("--enable_wasm_simd", action="store_true", help="Enable WebAssembly SIMD")
parser.add_argument("--enable_wasm_threads", action="store_true", help="Enable WebAssembly multi-threads support")
parser.add_argument(
"--disable_wasm_exception_catching", action="store_true", help="Disable exception catching in WebAssembly."
)
parser.add_argument(
"--enable_wasm_api_exception_catching", action="store_true", help="Catch exceptions at top level api."
)
parser.add_argument(
"--enable_wasm_exception_throwing_override",
action="store_true",
help="Enable exception throwing in WebAssembly, this will override default disabling exception throwing "
"behavior when disable exceptions.",
)
parser.add_argument("--wasm_run_tests_in_browser", action="store_true", help="Run WebAssembly tests in browser")
parser.add_argument(
"--enable_wasm_profiling", action="store_true", help="Enable WebAsselby profiling and preserve function names"
)
parser.add_argument(
"--enable_wasm_debug_info", action="store_true", help="Build WebAssembly with DWARF format debug info"
)
parser.add_argument("--wasm_malloc", help="Specify memory allocator for WebAssembly")
parser.add_argument(
"--emscripten_settings",
nargs="+",
action="append",
help="Extra emscripten settings to pass to emcc using '-s <key>=<value>' during build.",
)
# Enable onnxruntime-extensions
parser.add_argument(
"--use_extensions",
action="store_true",
help="Enable custom operators in onnxruntime-extensions, use git submodule onnxruntime-extensions "
"in path cmake/external/onnxruntime-extensions by default.",
)
parser.add_argument(
"--extensions_overridden_path",
type=str,
help="Path to pre-pulled onnxruntime-extensions, will override default onnxruntime-extensions path.",
)
# Arguments needed by CI
parser.add_argument("--cmake_path", default="cmake", help="Path to the CMake program.")
parser.add_argument(
"--ctest_path",
default="ctest",
help="Path to the CTest program. It can be an empty string. If it is empty, "
"we will use this script driving the test programs directly.",
)
parser.add_argument(
"--skip_submodule_sync",
action="store_true",
help="Don't do a 'git submodule update'. Makes the Update phase faster.",
)
parser.add_argument("--use_mimalloc", action="store_true", help="Use mimalloc allocator")
parser.add_argument("--use_dnnl", action="store_true", help="Build with DNNL.")
parser.add_argument(
"--dnnl_gpu_runtime", action="store", default="", type=str.lower, help="e.g. --dnnl_gpu_runtime ocl"
)
parser.add_argument(
"--dnnl_opencl_root",
action="store",
default="",
help="Path to OpenCL SDK. "
'e.g. --dnnl_opencl_root "C:/Program Files (x86)/IntelSWTools/sw_dev_tools/OpenCL/sdk"',
)
parser.add_argument(
"--use_openvino",
nargs="?",
const="CPU_FP32",
type=_openvino_verify_device_type,
help="Build with OpenVINO for specific hardware.",
)
parser.add_argument("--use_coreml", action="store_true", help="Build with CoreML support.")
parser.add_argument("--use_webnn", action="store_true", help="Build with WebNN support.")
parser.add_argument("--use_snpe", action="store_true", help="Build with SNPE support.")
parser.add_argument("--snpe_root", help="Path to SNPE SDK root.")
parser.add_argument("--use_nnapi", action="store_true", help="Build with NNAPI support.")
parser.add_argument(
"--nnapi_min_api", type=int, help="Minimum Android API level to enable NNAPI, should be no less than 27"
)
parser.add_argument("--use_jsep", action="store_true", help="Build with JavaScript kernels.")
parser.add_argument("--use_qnn", action="store_true", help="Build with QNN support.")
parser.add_argument("--qnn_home", help="Path to QNN SDK dir.")
parser.add_argument("--use_rknpu", action="store_true", help="Build with RKNPU.")
parser.add_argument("--use_preinstalled_eigen", action="store_true", help="Use pre-installed Eigen.")
parser.add_argument("--eigen_path", help="Path to pre-installed Eigen.")
parser.add_argument("--enable_msinternal", action="store_true", help="Enable for Microsoft internal builds only.")
parser.add_argument("--llvm_path", help="Path to llvm dir")
parser.add_argument("--use_vitisai", action="store_true", help="Build with Vitis-AI")
parser.add_argument("--use_tvm", action="store_true", help="Build with TVM")
parser.add_argument("--tvm_cuda_runtime", action="store_true", default=False, help="Build TVM with CUDA support")
parser.add_argument(
"--use_tvm_hash", action="store_true", help="Build ipp-crypto for hash generation. It is used by TVM EP only"
)
parser.add_argument("--use_tensorrt", action="store_true", help="Build with TensorRT")
parser.add_argument(
"--use_tensorrt_builtin_parser", action="store_true", default=True, help="Use TensorRT builtin parser"
)
parser.add_argument("--use_tensorrt_oss_parser", action="store_true", help="Use TensorRT OSS parser")
parser.add_argument("--tensorrt_home", help="Path to TensorRT installation dir")
parser.add_argument("--test_all_timeout", default="10800", help="Set timeout for onnxruntime_test_all")
parser.add_argument("--use_migraphx", action="store_true", help="Build with MIGraphX")
parser.add_argument("--migraphx_home", help="Path to MIGraphX installation dir")
parser.add_argument("--use_full_protobuf", action="store_true", help="Use the full protobuf library")
parser.add_argument(
"--llvm_config",
type=str,
default="",
help="Path to llvm-config.exe for LLVM buit from sources. It is strongly needed for build on Windows",
)
parser.add_argument(
"--skip_onnx_tests",
action="store_true",
help="Explicitly disable all onnx related tests. Note: Use --skip_tests to skip all tests.",
)
parser.add_argument("--skip_winml_tests", action="store_true", help="Explicitly disable all WinML related tests")
parser.add_argument("--skip_nodejs_tests", action="store_true", help="Explicitly disable all Node.js binding tests")
parser.add_argument(
"--enable_msvc_static_runtime", action="store_true", help="Enable static linking of MSVC runtimes."
)
parser.add_argument(
"--enable_language_interop_ops",
action="store_true",
help="Enable operator implemented in language other than cpp",
)
parser.add_argument(
"--cmake_generator",
choices=[
"MinGW Makefiles",
"Ninja",
"NMake Makefiles",
"Unix Makefiles",
"Visual Studio 16 2019",
"Visual Studio 17 2022",
"Xcode",
],
default=None,
help="Specify the generator that CMake invokes.",
)
parser.add_argument(
"--enable_multi_device_test",
action="store_true",
help="Test with multi-device. Mostly used for multi-device GPU",
)
parser.add_argument("--use_dml", action="store_true", help="Build with DirectML.")
parser.add_argument(
"--dml_path",
type=str,
default="",
help="Path to a custom DirectML installation (must have bin/, lib/, and include/ subdirectories).",
)
parser.add_argument("--use_winml", action="store_true", help="Build with WinML.")
parser.add_argument(
"--winml_root_namespace_override", type=str, help="Specify the namespace that WinML builds into."
)
parser.add_argument(
"--dml_external_project", action="store_true", help="Build with DirectML as an external project."
)
parser.add_argument(
"--use_telemetry", action="store_true", help="Only official builds can set this flag to enable telemetry."
)
parser.add_argument("--enable_wcos", action="store_true", help="Build for Windows Core OS.")
parser.add_argument("--enable_lto", action="store_true", help="Enable Link Time Optimization")
parser.add_argument("--enable_transformers_tool_test", action="store_true", help="Enable transformers tool test")
parser.add_argument(
"--use_acl",
nargs="?",
const="ACL_1905",
choices=["ACL_1902", "ACL_1905", "ACL_1908", "ACL_2002"],
help="Build with ACL for ARM architectures.",
)
parser.add_argument("--acl_home", help="Path to ACL home dir")
parser.add_argument("--acl_libs", help="Path to ACL libraries")
parser.add_argument("--use_armnn", action="store_true", help="Enable ArmNN Execution Provider.")
parser.add_argument(
"--armnn_relu", action="store_true", help="Use the Relu operator implementation from the ArmNN EP."
)
parser.add_argument(
"--armnn_bn", action="store_true", help="Use the Batch Normalization operator implementation from the ArmNN EP."
)
parser.add_argument("--armnn_home", help="Path to ArmNN home dir")
parser.add_argument("--armnn_libs", help="Path to ArmNN libraries")
parser.add_argument("--build_micro_benchmarks", action="store_true", help="Build ONNXRuntime micro-benchmarks.")
# options to reduce binary size
parser.add_argument(
"--minimal_build",
default=None,
nargs="*",
type=str.lower,
help="Create a build that only supports ORT format models. "
"See https://onnxruntime.ai/docs/tutorials/mobile/ for more information. "
"RTTI is automatically disabled in a minimal build. "
"To enable execution providers that compile kernels at runtime (e.g. NNAPI) pass 'extended' "
"as a parameter. e.g. '--minimal_build extended'. "
"To enable support for custom operators pass 'custom_ops' as a parameter. "
"e.g. '--minimal_build custom_ops'. This can be combined with an 'extended' build by passing "
"'--minimal_build extended custom_ops'",
)
parser.add_argument(
"--include_ops_by_config",
type=str,
help="Include ops from config file. See /docs/Reduced_Operator_Kernel_build.md for more information.",
)
parser.add_argument(
"--enable_reduced_operator_type_support",
action="store_true",
help="If --include_ops_by_config is specified, and the configuration file has type reduction "
"information, limit the types individual operators support where possible to further "
"reduce the build size. "
"See /docs/Reduced_Operator_Kernel_build.md for more information.",
)
parser.add_argument("--disable_contrib_ops", action="store_true", help="Disable contrib ops (reduces binary size)")
parser.add_argument(
"--disable_ml_ops", action="store_true", help="Disable traditional ML ops (reduces binary size)"
)
# Please note in our CMakeLists.txt this is already default on. But in this file we reverse it to default OFF.
parser.add_argument("--disable_rtti", action="store_true", help="Disable RTTI (reduces binary size)")
parser.add_argument(
"--disable_exceptions",
action="store_true",
help="Disable exceptions to reduce binary size. Requires --minimal_build.",
)
parser.add_argument("--rocm_version", help="The version of ROCM stack to use. ")
parser.add_argument("--use_rocm", action="store_true", help="Build with ROCm")
parser.add_argument("--rocm_home", help="Path to ROCm installation dir")
# Code coverage
parser.add_argument(
"--code_coverage", action="store_true", help="Generate code coverage when targetting Android (only)."
)
# lazy tensor support.
parser.add_argument(
"--enable_lazy_tensor", action="store_true", help="Enable use ORT as backend in Pytorch LazyTensor."
)
parser.add_argument("--ms_experimental", action="store_true", help="Build microsoft experimental operators.")
parser.add_argument(
"--enable_external_custom_op_schemas",
action="store_true",
help="Enable registering user defined custom operation schemas at shared library load time.\
This feature is only supported/available on Ubuntu.",
)
parser.add_argument(
"--external_graph_transformer_path", type=str, help="path to the external graph transformer dir."
)
parser.add_argument(
"--enable_cuda_profiling",
action="store_true",
help="enable cuda kernel profiling, \
cupti library must be added to PATH beforehand.",
)
parser.add_argument("--use_cann", action="store_true", help="Build with CANN")
parser.add_argument("--cann_home", help="Path to CANN installation dir")
parser.add_argument(
"--enable_rocm_profiling",
action="store_true",
help="enable rocm kernel profiling.",
)
parser.add_argument("--use_xnnpack", action="store_true", help="Enable xnnpack EP.")
parser.add_argument("--use_azure", action="store_true", help="Enable azure EP.")
parser.add_argument("--use_cache", action="store_true", help="Use compiler cache in CI")
parser.add_argument("--use_triton_kernel", action="store_true", help="Use triton compiled kernels")
parser.add_argument("--use_lock_free_queue", action="store_true", help="Use lock-free task queue for threadpool.")
if not is_windows():
parser.add_argument(
"--allow_running_as_root",
action="store_true",
help="Allow build to be run as root user. This is not allowed by default.",
)
args = parser.parse_args()
if args.android_sdk_path:
args.android_sdk_path = os.path.normpath(args.android_sdk_path)
if args.android_ndk_path:
args.android_ndk_path = os.path.normpath(args.android_ndk_path)
if args.enable_wasm_api_exception_catching:
# if we catch on api level, we don't want to catch all
args.disable_wasm_exception_catching = True
if not args.disable_wasm_exception_catching or args.enable_wasm_api_exception_catching:
# doesn't make sense to catch if no one throws
args.enable_wasm_exception_throwing_override = True
if args.cmake_generator is None and is_windows():
args.cmake_generator = "Ninja" if args.build_wasm else "Visual Studio 17 2022"
return args
def is_reduced_ops_build(args):
return args.include_ops_by_config is not None
def resolve_executable_path(command_or_path):
"""Returns the absolute path of an executable."""
if command_or_path and command_or_path.strip():
executable_path = shutil.which(command_or_path)
if executable_path is None:
raise BuildError(f"Failed to resolve executable path for '{command_or_path}'.")
return os.path.abspath(executable_path)
else:
return None
def get_linux_distro():
try:
with open("/etc/os-release") as f:
dist_info = dict(line.strip().split("=", 1) for line in f.readlines())
return dist_info.get("NAME", "").strip('"'), dist_info.get("VERSION", "").strip('"')
except (OSError, ValueError):
return "", ""
def is_ubuntu_1604():
dist, ver = get_linux_distro()
return dist == "Ubuntu" and ver.startswith("16.04")
def get_config_build_dir(build_dir, config):
# build directory per configuration
return os.path.join(build_dir, config)
def run_subprocess(
args,
cwd=None,
capture_stdout=False,
dll_path=None,
shell=False,
env=None,
python_path=None,
cuda_home=None,
):
if env is None:
env = {}
if isinstance(args, str):
raise ValueError("args should be a sequence of strings, not a string")
my_env = os.environ.copy()
if dll_path:
if is_windows():
if "PATH" in my_env:
my_env["PATH"] = dll_path + os.pathsep + my_env["PATH"]
else:
my_env["PATH"] = dll_path
else:
if "LD_LIBRARY_PATH" in my_env:
my_env["LD_LIBRARY_PATH"] += os.pathsep + dll_path
else:
my_env["LD_LIBRARY_PATH"] = dll_path
# Add nvcc's folder to PATH env so that our cmake file can find nvcc
if cuda_home:
my_env["PATH"] = os.path.join(cuda_home, "bin") + os.pathsep + my_env["PATH"]
if python_path:
if "PYTHONPATH" in my_env:
my_env["PYTHONPATH"] += os.pathsep + python_path
else:
my_env["PYTHONPATH"] = python_path
my_env.update(env)
return run(*args, cwd=cwd, capture_stdout=capture_stdout, shell=shell, env=my_env)
def update_submodules(source_dir):
run_subprocess(["git", "submodule", "sync", "--recursive"], cwd=source_dir)
run_subprocess(["git", "submodule", "update", "--init", "--recursive"], cwd=source_dir)
def is_docker():
path = "/proc/self/cgroup"
return (
os.path.exists("/.dockerenv")
or os.path.isfile(path)
and any("docker" in line for line in open(path)) # noqa: SIM115
)
def install_python_deps(numpy_version=""):
dep_packages = ["setuptools", "wheel", "pytest"]
dep_packages.append(f"numpy=={numpy_version}" if numpy_version else "numpy>=1.16.6")
dep_packages.append("sympy>=1.10")
dep_packages.append("packaging")
dep_packages.append("cerberus")
run_subprocess([sys.executable, "-m", "pip", "install", *dep_packages])
def setup_test_data(source_onnx_model_dir, dest_model_dir_name, build_dir, configs):
# create the symlink/shortcut of onnx models dir under build_dir
# currently, there're 2 sources of onnx models, one is build in OS image, another is
# from {source_dir}/js/test, which is downloaded from onnx web.
if is_windows():
src_model_dir = os.path.join(build_dir, dest_model_dir_name)
if os.path.exists(source_onnx_model_dir) and not os.path.exists(src_model_dir):
log.debug(f"creating shortcut {source_onnx_model_dir} -> {src_model_dir}")
run_subprocess(["mklink", "/D", "/J", src_model_dir, source_onnx_model_dir], shell=True)
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
dest_model_dir = os.path.join(config_build_dir, dest_model_dir_name)
if os.path.exists(source_onnx_model_dir) and not os.path.exists(dest_model_dir):
log.debug(f"creating shortcut {source_onnx_model_dir} -> {dest_model_dir}")
run_subprocess(["mklink", "/D", "/J", dest_model_dir, source_onnx_model_dir], shell=True)
elif os.path.exists(src_model_dir) and not os.path.exists(dest_model_dir):
log.debug(f"creating shortcut {src_model_dir} -> {dest_model_dir}")
run_subprocess(["mklink", "/D", "/J", dest_model_dir, src_model_dir], shell=True)
else:
src_model_dir = os.path.join(build_dir, dest_model_dir_name)
if os.path.exists(source_onnx_model_dir) and not os.path.exists(src_model_dir):
log.debug(f"create symlink {source_onnx_model_dir} -> {src_model_dir}")
os.symlink(source_onnx_model_dir, src_model_dir, target_is_directory=True)
def use_dev_mode(args):
if args.compile_no_warning_as_error:
return False
if args.use_acl:
return False
if args.use_armnn:
return False
if args.ios and is_macOS():
return False
SYSTEM_COLLECTIONURI = os.getenv("SYSTEM_COLLECTIONURI") # noqa: N806
if SYSTEM_COLLECTIONURI and SYSTEM_COLLECTIONURI != "https://dev.azure.com/onnxruntime/":
return False
return True
def add_default_definition(definition_list, key, default_value):
for x in definition_list:
if x.startswith(key + "="):
return definition_list
definition_list.append(key + "=" + default_value)
def normalize_arg_list(nested_list):
return [i for j in nested_list for i in j] if nested_list else []
def generate_build_tree(
cmake_path,
source_dir,
build_dir,
cuda_home,
cudnn_home,
rocm_home,
mpi_home,
nccl_home,
tensorrt_home,
migraphx_home,
acl_home,
acl_libs,
armnn_home,
armnn_libs,
qnn_home,
snpe_root,
cann_home,
path_to_protoc_exe,
configs,
cmake_extra_defines,
args,
cmake_extra_args,
):
log.info("Generating CMake build tree")
cmake_dir = os.path.join(source_dir, "cmake")
cmake_args = [cmake_path, cmake_dir]
if not use_dev_mode(args):
cmake_args += ["--compile-no-warning-as-error"]
# enable/disable float 8 types
disable_float8_types = args.use_rocm or args.android or args.minimal_build
cmake_args += [
"-Donnxruntime_RUN_ONNX_TESTS=" + ("ON" if args.enable_onnx_tests else "OFF"),
"-Donnxruntime_GENERATE_TEST_REPORTS=ON",
# There are two ways of locating python C API header file. "find_package(PythonLibs 3.5 REQUIRED)"
# and "find_package(Python 3.5 COMPONENTS Development.Module)". The first one is deprecated and it
# depends on the "PYTHON_EXECUTABLE" variable. The second needs "Python_EXECUTABLE". Here we set both
# of them to get the best compatibility.
"-DPython_EXECUTABLE=" + sys.executable,
"-DPYTHON_EXECUTABLE=" + sys.executable,
"-Donnxruntime_USE_MIMALLOC=" + ("ON" if args.use_mimalloc else "OFF"),
"-Donnxruntime_ENABLE_PYTHON=" + ("ON" if args.enable_pybind else "OFF"),
"-Donnxruntime_BUILD_CSHARP=" + ("ON" if args.build_csharp else "OFF"),
"-Donnxruntime_BUILD_JAVA=" + ("ON" if args.build_java else "OFF"),
"-Donnxruntime_BUILD_NODEJS=" + ("ON" if args.build_nodejs else "OFF"),
"-Donnxruntime_BUILD_OBJC=" + ("ON" if args.build_objc else "OFF"),
"-Donnxruntime_BUILD_SHARED_LIB=" + ("ON" if args.build_shared_lib else "OFF"),
"-Donnxruntime_BUILD_APPLE_FRAMEWORK=" + ("ON" if args.build_apple_framework else "OFF"),
"-Donnxruntime_USE_DNNL=" + ("ON" if args.use_dnnl else "OFF"),
"-Donnxruntime_USE_NNAPI_BUILTIN=" + ("ON" if args.use_nnapi else "OFF"),
"-Donnxruntime_USE_RKNPU=" + ("ON" if args.use_rknpu else "OFF"),
"-Donnxruntime_USE_LLVM=" + ("ON" if args.use_tvm else "OFF"),
"-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=" + ("ON" if args.enable_msinternal else "OFF"),
"-Donnxruntime_USE_VITISAI=" + ("ON" if args.use_vitisai else "OFF"),
"-Donnxruntime_USE_TENSORRT=" + ("ON" if args.use_tensorrt else "OFF"),
"-Donnxruntime_USE_TENSORRT_BUILTIN_PARSER="
+ ("ON" if args.use_tensorrt_builtin_parser and not args.use_tensorrt_oss_parser else "OFF"),
# set vars for TVM
"-Donnxruntime_USE_TVM=" + ("ON" if args.use_tvm else "OFF"),
"-Donnxruntime_TVM_CUDA_RUNTIME=" + ("ON" if args.use_tvm and args.tvm_cuda_runtime else "OFF"),
"-Donnxruntime_TVM_USE_HASH=" + ("ON" if args.use_tvm_hash else "OFF"),
# set vars for migraphx
"-Donnxruntime_USE_MIGRAPHX=" + ("ON" if args.use_migraphx else "OFF"),
"-Donnxruntime_DISABLE_CONTRIB_OPS=" + ("ON" if args.disable_contrib_ops else "OFF"),
"-Donnxruntime_DISABLE_ML_OPS=" + ("ON" if args.disable_ml_ops else "OFF"),
"-Donnxruntime_DISABLE_RTTI="
+ ("ON" if args.disable_rtti or (args.minimal_build is not None and not args.enable_pybind) else "OFF"),
"-Donnxruntime_DISABLE_EXCEPTIONS=" + ("ON" if args.disable_exceptions else "OFF"),
# Need to use 'is not None' with minimal_build check as it could be an empty list.
"-Donnxruntime_MINIMAL_BUILD=" + ("ON" if args.minimal_build is not None else "OFF"),
"-Donnxruntime_EXTENDED_MINIMAL_BUILD="
+ ("ON" if args.minimal_build and "extended" in args.minimal_build else "OFF"),
"-Donnxruntime_MINIMAL_BUILD_CUSTOM_OPS="
+ (
"ON"
if (args.minimal_build is not None and ("custom_ops" in args.minimal_build or args.use_extensions))
else "OFF"
),
"-Donnxruntime_REDUCED_OPS_BUILD=" + ("ON" if is_reduced_ops_build(args) else "OFF"),
"-Donnxruntime_ENABLE_LANGUAGE_INTEROP_OPS=" + ("ON" if args.enable_language_interop_ops else "OFF"),
"-Donnxruntime_USE_DML=" + ("ON" if args.use_dml else "OFF"),
"-Donnxruntime_USE_WINML=" + ("ON" if args.use_winml else "OFF"),
"-Donnxruntime_BUILD_MS_EXPERIMENTAL_OPS=" + ("ON" if args.ms_experimental else "OFF"),
"-Donnxruntime_USE_TELEMETRY=" + ("ON" if args.use_telemetry else "OFF"),
"-Donnxruntime_ENABLE_LTO=" + ("ON" if args.enable_lto else "OFF"),
"-Donnxruntime_USE_ACL=" + ("ON" if args.use_acl else "OFF"),
"-Donnxruntime_USE_ACL_1902=" + ("ON" if args.use_acl == "ACL_1902" else "OFF"),
"-Donnxruntime_USE_ACL_1905=" + ("ON" if args.use_acl == "ACL_1905" else "OFF"),
"-Donnxruntime_USE_ACL_1908=" + ("ON" if args.use_acl == "ACL_1908" else "OFF"),
"-Donnxruntime_USE_ACL_2002=" + ("ON" if args.use_acl == "ACL_2002" else "OFF"),
"-Donnxruntime_USE_ARMNN=" + ("ON" if args.use_armnn else "OFF"),
"-Donnxruntime_ARMNN_RELU_USE_CPU=" + ("OFF" if args.armnn_relu else "ON"),
"-Donnxruntime_ARMNN_BN_USE_CPU=" + ("OFF" if args.armnn_bn else "ON"),
"-Donnxruntime_USE_JSEP=" + ("ON" if args.use_jsep else "OFF"),
# Training related flags
"-Donnxruntime_ENABLE_NVTX_PROFILE=" + ("ON" if args.enable_nvtx_profile else "OFF"),
"-Donnxruntime_ENABLE_TRAINING=" + ("ON" if args.enable_training else "OFF"),
"-Donnxruntime_ENABLE_TRAINING_OPS=" + ("ON" if args.enable_training_ops else "OFF"),
"-Donnxruntime_ENABLE_TRAINING_APIS=" + ("ON" if args.enable_training_apis else "OFF"),
# Enable advanced computations such as AVX for some traininig related ops.
"-Donnxruntime_ENABLE_CPU_FP16_OPS=" + ("ON" if args.enable_training else "OFF"),
"-Donnxruntime_USE_NCCL=" + ("ON" if args.enable_nccl else "OFF"),
"-Donnxruntime_BUILD_BENCHMARKS=" + ("ON" if args.build_micro_benchmarks else "OFF"),
"-Donnxruntime_USE_ROCM=" + ("ON" if args.use_rocm else "OFF"),
"-DOnnxruntime_GCOV_COVERAGE=" + ("ON" if args.code_coverage else "OFF"),
"-Donnxruntime_USE_MPI=" + ("ON" if args.use_mpi else "OFF"),
"-Donnxruntime_ENABLE_MEMORY_PROFILE=" + ("ON" if args.enable_memory_profile else "OFF"),
"-Donnxruntime_ENABLE_CUDA_LINE_NUMBER_INFO=" + ("ON" if args.enable_cuda_line_info else "OFF"),
"-Donnxruntime_BUILD_WEBASSEMBLY_STATIC_LIB=" + ("ON" if args.build_wasm_static_lib else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_EXCEPTION_CATCHING="
+ ("OFF" if args.disable_wasm_exception_catching else "ON"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_API_EXCEPTION_CATCHING="
+ ("ON" if args.enable_wasm_api_exception_catching else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_EXCEPTION_THROWING="
+ ("ON" if args.enable_wasm_exception_throwing_override else "OFF"),
"-Donnxruntime_WEBASSEMBLY_RUN_TESTS_IN_BROWSER=" + ("ON" if args.wasm_run_tests_in_browser else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_THREADS=" + ("ON" if args.enable_wasm_threads else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_DEBUG_INFO=" + ("ON" if args.enable_wasm_debug_info else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_PROFILING=" + ("ON" if args.enable_wasm_profiling else "OFF"),
"-Donnxruntime_ENABLE_LAZY_TENSOR=" + ("ON" if args.enable_lazy_tensor else "OFF"),
"-Donnxruntime_ENABLE_EXTERNAL_CUSTOM_OP_SCHEMAS="
+ ("ON" if args.enable_external_custom_op_schemas else "OFF"),
"-Donnxruntime_ENABLE_CUDA_PROFILING=" + ("ON" if args.enable_cuda_profiling else "OFF"),
"-Donnxruntime_ENABLE_ROCM_PROFILING=" + ("ON" if args.enable_rocm_profiling else "OFF"),
"-Donnxruntime_USE_XNNPACK=" + ("ON" if args.use_xnnpack else "OFF"),
"-Donnxruntime_USE_WEBNN=" + ("ON" if args.use_webnn else "OFF"),
"-Donnxruntime_USE_CANN=" + ("ON" if args.use_cann else "OFF"),
"-Donnxruntime_USE_TRITON_KERNEL=" + ("ON" if args.use_triton_kernel else "OFF"),
"-Donnxruntime_DISABLE_FLOAT8_TYPES=" + ("ON" if disable_float8_types else "OFF"),
]
# By default on Windows we currently support only cross compiling for ARM/ARM64
# (no native compilation supported through this script).
if args.arm64 or args.arm64ec or args.arm:
add_default_definition(cmake_extra_defines, "onnxruntime_CROSS_COMPILING", "ON")
if args.use_extensions:
add_default_definition(cmake_extra_defines, "OPENCV_SKIP_SYSTEM_PROCESSOR_DETECTION", "ON")
if args.use_cache:
cmake_args.append("-Donnxruntime_BUILD_CACHE=ON")
if not (is_windows() and args.cmake_generator != "Ninja"):
cmake_args.append("-DCMAKE_CXX_COMPILER_LAUNCHER=ccache")
cmake_args.append("-DCMAKE_C_COMPILER_LAUNCHER=ccache")
if args.use_cuda:
cmake_args.append("-DCMAKE_CUDA_COMPILER_LAUNCHER=ccache")
if args.use_rocm:
cmake_args.append("-DCMAKE_HIP_COMPILER_LAUNCHER=ccache")
# By default cmake does not check TLS/SSL certificates. Here we turn it on.
# But, in some cases you may also need to supply a CA file.
add_default_definition(cmake_extra_defines, "CMAKE_TLS_VERIFY", "ON")
add_default_definition(cmake_extra_defines, "FETCHCONTENT_QUIET", "OFF")
if args.external_graph_transformer_path:
cmake_args.append("-Donnxruntime_EXTERNAL_TRANSFORMER_SRC_PATH=" + args.external_graph_transformer_path)
if args.use_winml:
cmake_args.append("-Donnxruntime_BUILD_WINML_TESTS=" + ("OFF" if args.skip_winml_tests else "ON"))
if args.use_dnnl:
cmake_args.append("-Donnxruntime_DNNL_GPU_RUNTIME=" + args.dnnl_gpu_runtime)
cmake_args.append("-Donnxruntime_DNNL_OPENCL_ROOT=" + args.dnnl_opencl_root)
if args.build_wasm:
cmake_args.append("-Donnxruntime_ENABLE_WEBASSEMBLY_SIMD=" + ("ON" if args.enable_wasm_simd else "OFF"))
if args.use_migraphx:
cmake_args.append("-Donnxruntime_MIGRAPHX_HOME=" + migraphx_home)
if args.use_cuda:
cmake_args.append("-Donnxruntime_NVCC_THREADS=" + str(args.parallel))
if args.use_rocm:
cmake_args.append("-Donnxruntime_ROCM_HOME=" + rocm_home)
cmake_args.append("-Donnxruntime_ROCM_VERSION=" + args.rocm_version)
if args.use_tensorrt:
cmake_args.append("-Donnxruntime_TENSORRT_HOME=" + tensorrt_home)
if args.llvm_config:
cmake_args.append("-Donnxruntime_TVM_USE_LLVM=" + args.llvm_config)
if args.use_cuda:
add_default_definition(cmake_extra_defines, "onnxruntime_USE_CUDA", "ON")
if args.cuda_version:
add_default_definition(cmake_extra_defines, "onnxruntime_CUDA_VERSION", args.cuda_version)
# TODO: this variable is not really needed
add_default_definition(cmake_extra_defines, "onnxruntime_CUDA_HOME", cuda_home)
if cudnn_home:
add_default_definition(cmake_extra_defines, "onnxruntime_CUDNN_HOME", cudnn_home)
if is_windows():
if args.enable_msvc_static_runtime:
add_default_definition(
cmake_extra_defines, "CMAKE_MSVC_RUNTIME_LIBRARY", "MultiThreaded$<$<CONFIG:Debug>:Debug>"
)
add_default_definition(cmake_extra_defines, "ONNX_USE_MSVC_STATIC_RUNTIME", "ON")
add_default_definition(cmake_extra_defines, "protobuf_MSVC_STATIC_RUNTIME", "ON")
add_default_definition(cmake_extra_defines, "gtest_force_shared_crt", "OFF")
else:
# CMAKE_MSVC_RUNTIME_LIBRARY is default to MultiThreaded$<$<CONFIG:Debug>:Debug>DLL
add_default_definition(cmake_extra_defines, "ONNX_USE_MSVC_STATIC_RUNTIME", "OFF")
add_default_definition(cmake_extra_defines, "protobuf_MSVC_STATIC_RUNTIME", "OFF")
add_default_definition(cmake_extra_defines, "gtest_force_shared_crt", "ON")
if acl_home and os.path.exists(acl_home):
cmake_args += ["-Donnxruntime_ACL_HOME=" + acl_home]
if acl_libs and os.path.exists(acl_libs):
cmake_args += ["-Donnxruntime_ACL_LIBS=" + acl_libs]
if armnn_home and os.path.exists(armnn_home):
cmake_args += ["-Donnxruntime_ARMNN_HOME=" + armnn_home]
if armnn_libs and os.path.exists(armnn_libs):
cmake_args += ["-Donnxruntime_ARMNN_LIBS=" + armnn_libs]
if mpi_home and os.path.exists(mpi_home):
if args.use_mpi:
cmake_args += ["-Donnxruntime_MPI_HOME=" + mpi_home]
else:
log.warning(
"mpi_home is supplied but use_mpi is set to false."
" Build will continue without linking MPI libraries."
)
if nccl_home and os.path.exists(nccl_home):
cmake_args += ["-Donnxruntime_NCCL_HOME=" + nccl_home]
if qnn_home and os.path.exists(qnn_home):
cmake_args += ["-Donnxruntime_QNN_HOME=" + qnn_home]
if snpe_root and os.path.exists(snpe_root):
cmake_args += ["-DSNPE_ROOT=" + snpe_root]
if cann_home and os.path.exists(cann_home):
cmake_args += ["-Donnxruntime_CANN_HOME=" + cann_home]
if args.winml_root_namespace_override:
cmake_args += ["-Donnxruntime_WINML_NAMESPACE_OVERRIDE=" + args.winml_root_namespace_override]
if args.use_openvino:
cmake_args += [
"-Donnxruntime_USE_OPENVINO=ON",
"-Donnxruntime_USE_OPENVINO_GPU_FP32=" + ("ON" if args.use_openvino == "GPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP16=" + ("ON" if args.use_openvino == "GPU_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP32=" + ("ON" if args.use_openvino == "CPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP16=" + ("ON" if args.use_openvino == "CPU_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VPUX_FP16=" + ("ON" if args.use_openvino == "VPUX_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VPUX_U8=" + ("ON" if args.use_openvino == "VPUX_U8" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP32_NP="
+ ("ON" if args.use_openvino == "GPU_FP32_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP16_NP="
+ ("ON" if args.use_openvino == "GPU_FP16_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP32_NP="
+ ("ON" if args.use_openvino == "CPU_FP32_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP16_NP="
+ ("ON" if args.use_openvino == "CPU_FP16_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VPUX_FP16_NP="
+ ("ON" if args.use_openvino == "VPUX_FP16_NP_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VPUX_U8_NP=" + ("ON" if args.use_openvino == "VPUX_U8_NP_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_HETERO=" + ("ON" if args.use_openvino.startswith("HETERO") else "OFF"),
"-Donnxruntime_USE_OPENVINO_DEVICE=" + (args.use_openvino),
"-Donnxruntime_USE_OPENVINO_MULTI=" + ("ON" if args.use_openvino.startswith("MULTI") else "OFF"),
"-Donnxruntime_USE_OPENVINO_AUTO=" + ("ON" if args.use_openvino.startswith("AUTO") else "OFF"),
]
# TensorRT and OpenVINO providers currently only support
# full_protobuf option.
if args.use_full_protobuf or args.use_tensorrt or args.use_openvino or args.use_vitisai or args.gen_doc:
cmake_args += ["-Donnxruntime_USE_FULL_PROTOBUF=ON", "-DProtobuf_USE_STATIC_LIBS=ON"]
if args.use_tvm and args.llvm_path is not None:
cmake_args += ["-DLLVM_DIR=%s" % args.llvm_path]
if args.use_cuda and not is_windows():
nvml_stub_path = cuda_home + "/lib64/stubs"
cmake_args += ["-DCUDA_CUDA_LIBRARY=" + nvml_stub_path]
if args.use_preinstalled_eigen:
cmake_args += ["-Donnxruntime_USE_PREINSTALLED_EIGEN=ON", "-Deigen_SOURCE_PATH=" + args.eigen_path]
if args.nnapi_min_api:
cmake_args += ["-Donnxruntime_NNAPI_MIN_API=" + str(args.nnapi_min_api)]
if args.android:
if not args.android_ndk_path:
raise BuildError("android_ndk_path required to build for Android")
if not args.android_sdk_path:
raise BuildError("android_sdk_path required to build for Android")
cmake_args += [
"-DCMAKE_TOOLCHAIN_FILE="
+ os.path.join(args.android_ndk_path, "build", "cmake", "android.toolchain.cmake"),
"-DANDROID_PLATFORM=android-" + str(args.android_api),
"-DANDROID_ABI=" + str(args.android_abi),
"-DANDROID_MIN_SDK=" + str(args.android_api),
]
if args.android_cpp_shared:
cmake_args += ["-DANDROID_STL=c++_shared"]
if args.dml_path:
cmake_args += [
"-Donnxruntime_USE_CUSTOM_DIRECTML=ON",
"-Ddml_INCLUDE_DIR=" + os.path.join(args.dml_path, "include"),
"-Ddml_LIB_DIR=" + os.path.join(args.dml_path, "lib"),
]
if args.dml_external_project:
cmake_args += [
"-Donnxruntime_USE_CUSTOM_DIRECTML=ON",
"-Ddml_EXTERNAL_PROJECT=ON",
]
if args.use_gdk:
cmake_args += [
"-DCMAKE_TOOLCHAIN_FILE=" + os.path.join(source_dir, "cmake", "gdk_toolchain.cmake"),
"-DGDK_EDITION=" + args.gdk_edition,
"-DGDK_PLATFORM=" + args.gdk_platform,
"-Donnxruntime_BUILD_UNIT_TESTS=OFF", # gtest doesn't build for GDK
]
if args.use_dml and not (args.dml_path or args.dml_external_project):
raise BuildError("You must set dml_path or dml_external_project when building with the GDK.")
if is_macOS() and not args.android:
cmake_args += ["-DCMAKE_OSX_ARCHITECTURES=" + args.osx_arch]
if args.apple_deploy_target:
cmake_args += ["-DCMAKE_OSX_DEPLOYMENT_TARGET=" + args.apple_deploy_target]
# Code sign the binaries, if the code signing development identity and/or team id are provided
if args.xcode_code_signing_identity:
cmake_args += ["-DCMAKE_XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY=" + args.xcode_code_signing_identity]
if args.xcode_code_signing_team_id:
cmake_args += ["-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=" + args.xcode_code_signing_team_id]
if args.use_qnn:
if args.qnn_home is None or os.path.exists(args.qnn_home) is False:
raise BuildError("qnn_home=" + qnn_home + " not valid." + " qnn_home paths must be specified and valid.")
cmake_args += ["-Donnxruntime_USE_QNN=ON"]
if args.use_coreml:
cmake_args += ["-Donnxruntime_USE_COREML=ON"]
if args.use_webnn:
if not args.build_wasm:
raise BuildError("WebNN is only available for WebAssembly build.")
if args.disable_rtti:
# Avoid unboundTypeError for WebNN EP since unbound type names are illegal with RTTI disabled
# in Embind API, relevant issue: https://github.com/emscripten-core/emscripten/issues/16911
raise BuildError("WebNN is not supported with RTTI disabled.")
cmake_args += ["-Donnxruntime_USE_WEBNN=ON"]
if args.use_snpe:
cmake_args += ["-Donnxruntime_USE_SNPE=ON"]
if args.ios:
if not args.cmake_generator == "Xcode":
raise BuildError("iOS build requires use of the Xcode CMake generator ('--cmake_generator Xcode').")
needed_args = [
args.ios_sysroot,
args.apple_deploy_target,
]
arg_names = [
"--ios_sysroot " + "<the location or name of the macOS platform SDK>", # noqa: ISC003
"--apple_deploy_target " + "<the minimum version of the target platform>", # noqa: ISC003
]
if not all(needed_args):
raise BuildError(
"iOS build on MacOS canceled due to missing arguments: "
+ ", ".join(val for val, cond in zip(arg_names, needed_args) if not cond)
)
cmake_args += [
"-DCMAKE_SYSTEM_NAME=iOS",
"-Donnxruntime_BUILD_SHARED_LIB=ON",
"-DCMAKE_OSX_SYSROOT=" + args.ios_sysroot,
"-DCMAKE_OSX_DEPLOYMENT_TARGET=" + args.apple_deploy_target,
# we do not need protoc binary for ios cross build
"-Dprotobuf_BUILD_PROTOC_BINARIES=OFF",
"-DCMAKE_TOOLCHAIN_FILE="
+ (args.ios_toolchain_file if args.ios_toolchain_file else "../cmake/onnxruntime_ios.toolchain.cmake"),
]
if args.build_wasm:
emsdk_dir = os.path.join(cmake_dir, "external", "emsdk")
emscripten_cmake_toolchain_file = os.path.join(
emsdk_dir, "upstream", "emscripten", "cmake", "Modules", "Platform", "Emscripten.cmake"
)
cmake_args += ["-DCMAKE_TOOLCHAIN_FILE=" + emscripten_cmake_toolchain_file]
if args.disable_wasm_exception_catching:
# WebAssembly unittest requires exception catching to work. If this feature is disabled, we do not build
# unit test.
cmake_args += [
"-Donnxruntime_BUILD_UNIT_TESTS=OFF",
]
# add default emscripten settings
emscripten_settings = normalize_arg_list(args.emscripten_settings)
# set -s MALLOC
if args.wasm_malloc is not None:
add_default_definition(emscripten_settings, "MALLOC", args.wasm_malloc)
add_default_definition(emscripten_settings, "MALLOC", "dlmalloc")
# set -s STACK_SIZE=5242880
add_default_definition(emscripten_settings, "STACK_SIZE", "5242880")
if emscripten_settings:
cmake_args += [f"-Donnxruntime_EMSCRIPTEN_SETTINGS={';'.join(emscripten_settings)}"]
# Append onnxruntime-extensions cmake options
if args.use_extensions:
cmake_args += ["-Donnxruntime_USE_EXTENSIONS=ON"]
# default path of onnxruntime-extensions, using git submodule
for config in configs:
onnxruntime_extensions_path = os.path.join(build_dir, config, "_deps", "extensions-src")
onnxruntime_extensions_path = os.path.abspath(onnxruntime_extensions_path)
if args.extensions_overridden_path and os.path.exists(args.extensions_overridden_path):
# use absolute path here because onnxruntime-extensions is outside onnxruntime
onnxruntime_extensions_path = os.path.abspath(args.extensions_overridden_path)
cmake_args += ["-Donnxruntime_EXTENSIONS_OVERRIDDEN=ON"]
print("[onnxruntime-extensions] Loading onnxruntime-extensions from: ", onnxruntime_extensions_path)
else:
print("[onnxruntime-extensions] Loading onnxruntime-extensions from: FetchContent")
cmake_args += ["-Donnxruntime_EXTENSIONS_PATH=" + onnxruntime_extensions_path]
if is_reduced_ops_build(args):
operators_config_file = os.path.abspath(args.include_ops_by_config)
cmake_tool_dir = os.path.join(onnxruntime_extensions_path, "tools")
# generate _selectedoplist.cmake by operators config file
run_subprocess([sys.executable, "gen_selectedops.py", operators_config_file], cwd=cmake_tool_dir)
if path_to_protoc_exe:
cmake_args += [f"-DONNX_CUSTOM_PROTOC_EXECUTABLE={path_to_protoc_exe}"]
if args.fuzz_testing:
if not (
args.build_shared_lib
and is_windows()
and args.cmake_generator == "Visual Studio 17 2022"
and args.use_full_protobuf
):
raise BuildError("Fuzz test has only be tested with build shared libs option using MSVC on windows")
cmake_args += [
"-Donnxruntime_BUILD_UNIT_TESTS=ON",
"-Donnxruntime_FUZZ_TEST=ON",
"-Donnxruntime_USE_FULL_PROTOBUF=ON",
]
if args.enable_lazy_tensor:
import torch
cmake_args += ["-Donnxruntime_PREBUILT_PYTORCH_PATH=%s" % os.path.dirname(torch.__file__)]
cmake_args += ["-D_GLIBCXX_USE_CXX11_ABI=" + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))]
if args.use_azure:
add_default_definition(cmake_extra_defines, "onnxruntime_USE_AZURE", "ON")
if args.use_lock_free_queue:
add_default_definition(cmake_extra_defines, "onnxruntime_USE_LOCK_FREE_QUEUE", "ON")
cmake_args += [f"-D{define}" for define in cmake_extra_defines]
cmake_args += cmake_extra_args
# ADO pipelines will store the pipeline build number
# (e.g. 191101-2300.1.master) and source version in environment
# variables. If present, use these values to define the
# WinML/ORT DLL versions.
build_number = os.getenv("Build_BuildNumber") # noqa: SIM112
source_version = os.getenv("Build_SourceVersion") # noqa: SIM112
if build_number and source_version:
build_matches = re.fullmatch(r"(\d\d)(\d\d)(\d\d)(\d\d)\.(\d+)", build_number)
if build_matches:
YY = build_matches.group(2) # noqa: N806
MM = build_matches.group(3) # noqa: N806
DD = build_matches.group(4) # noqa: N806
# Get ORT major and minor number
with open(os.path.join(source_dir, "VERSION_NUMBER")) as f:
first_line = f.readline()
ort_version_matches = re.match(r"(\d+).(\d+)", first_line)
if not ort_version_matches:
raise BuildError("Couldn't read version from VERSION_FILE")
ort_major = ort_version_matches.group(1)
ort_minor = ort_version_matches.group(2)
# Example (BuildNumber: 191101-2300.1.master,
# SourceVersion: 0bce7ae6755c792eda558e5d27ded701707dc404)
# MajorPart = 1
# MinorPart = 0
# BuildPart = 1911
# PrivatePart = 123
# String = 191101-2300.1.master.0bce7ae
cmake_args += [
f"-DVERSION_MAJOR_PART={ort_major}",
f"-DVERSION_MINOR_PART={ort_minor}",
f"-DVERSION_BUILD_PART={YY}",
f"-DVERSION_PRIVATE_PART={MM}{DD}",
f"-DVERSION_STRING={ort_major}.{ort_minor}.{build_number}.{source_version[0:7]}",
]
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
if args.use_tvm:
os.environ["PATH"] = (
os.path.join(config_build_dir, "_deps", "tvm-build")
+ os.pathsep
+ os.path.join(config_build_dir, "_deps", "tvm-src")
+ os.pathsep
+ os.path.dirname(sys.executable)
+ os.pathsep
+ os.environ["PATH"]
)
preinstalled_dir = Path(build_dir) / config
run_subprocess(
[
*cmake_args,
"-Donnxruntime_ENABLE_MEMLEAK_CHECKER="
+ (
"ON"
if config.lower() == "debug"
and not args.use_tvm
and not args.use_openvino
and not args.use_gdk
and not args.enable_msvc_static_runtime
and not args.disable_memleak_checker
else "OFF"
),
f"-DCMAKE_BUILD_TYPE={config}",
f"-DCMAKE_PREFIX_PATH={build_dir}/{config}/installed"
if preinstalled_dir.exists() and not (args.arm64 or args.arm64ec or args.arm)
else "",
],
cwd=config_build_dir,
cuda_home=cuda_home,
)
def clean_targets(cmake_path, build_dir, configs):
for config in configs:
log.info("Cleaning targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path, "--build", build_dir2, "--config", config, "--target", "clean"]
run_subprocess(cmd_args)
def build_targets(args, cmake_path, build_dir, configs, num_parallel_jobs, target=None):
for config in configs:
log.info("Building targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path, "--build", build_dir2, "--config", config]
if target:
cmd_args.extend(["--target", target])
build_tool_args = []
if num_parallel_jobs != 1:
if is_windows() and args.cmake_generator != "Ninja" and not args.build_wasm:
build_tool_args += [
f"/maxcpucount:{num_parallel_jobs}",
# if nodeReuse is true, msbuild processes will stay around for a bit after the build completes
"/nodeReuse:False",
f"/p:CL_MPCount={num_parallel_jobs}",
]
elif args.cmake_generator == "Xcode":
# CMake will generate correct build tool args for Xcode
cmd_args += ["--parallel", str(num_parallel_jobs)]
else:
build_tool_args += [f"-j{num_parallel_jobs}"]
if build_tool_args:
cmd_args += ["--"]
cmd_args += build_tool_args
env = {}
if args.android:
env["ANDROID_SDK_ROOT"] = args.android_sdk_path
env["ANDROID_NDK_HOME"] = args.android_ndk_path
run_subprocess(cmd_args, env=env)
def add_dir_if_exists(directory, dir_list):
if os.path.isdir(directory):
dir_list.append(directory)
def setup_cuda_vars(args):
cuda_home = ""
cudnn_home = ""
if args.use_cuda:
cuda_home = args.cuda_home if args.cuda_home else os.getenv("CUDA_HOME")
cudnn_home = args.cudnn_home if args.cudnn_home else os.getenv("CUDNN_HOME")
cuda_home_valid = cuda_home is not None and os.path.exists(cuda_home)
cudnn_home_valid = cudnn_home is not None and os.path.exists(cudnn_home)
if not cuda_home_valid or (not is_windows() and not cudnn_home_valid):
raise BuildError(
"cuda_home and cudnn_home paths must be specified and valid.",
"cuda_home='{}' valid={}. cudnn_home='{}' valid={}".format(
cuda_home, cuda_home_valid, cudnn_home, cudnn_home_valid
),
)
return cuda_home, cudnn_home
def setup_cann_vars(args):
cann_home = ""
if args.use_cann:
cann_home = args.cann_home if args.cann_home else os.getenv("ASCEND_HOME_PATH")
cann_home_valid = cann_home is not None and os.path.exists(cann_home)
if not cann_home_valid:
raise BuildError(
"cann_home paths must be specified and valid.",
f"cann_home='{cann_home}' valid={cann_home_valid}.",
)
return cann_home
def setup_tensorrt_vars(args):
tensorrt_home = ""
if args.use_tensorrt:
tensorrt_home = args.tensorrt_home if args.tensorrt_home else os.getenv("TENSORRT_HOME")
tensorrt_home_valid = tensorrt_home is not None and os.path.exists(tensorrt_home)
if not tensorrt_home_valid:
raise BuildError(
"tensorrt_home paths must be specified and valid.",
f"tensorrt_home='{tensorrt_home}' valid={tensorrt_home_valid}.",
)
# Set maximum workspace size in byte for
# TensorRT (1GB = 1073741824 bytes).
os.environ["ORT_TENSORRT_MAX_WORKSPACE_SIZE"] = "1073741824"
# Set maximum number of iterations to detect unsupported nodes
# and partition the models for TensorRT.
os.environ["ORT_TENSORRT_MAX_PARTITION_ITERATIONS"] = "1000"
# Set minimum subgraph node size in graph partitioning
# for TensorRT.
os.environ["ORT_TENSORRT_MIN_SUBGRAPH_SIZE"] = "1"
# Set FP16 flag
os.environ["ORT_TENSORRT_FP16_ENABLE"] = "0"
return tensorrt_home
def setup_migraphx_vars(args):
migraphx_home = None
if args.use_migraphx:
print(f"migraphx_home = {args.migraphx_home}")
migraphx_home = args.migraphx_home or os.getenv("MIGRAPHX_HOME") or None
migraphx_home_not_valid = migraphx_home and not os.path.exists(migraphx_home)
if migraphx_home_not_valid:
raise BuildError(
"migraphx_home paths must be specified and valid.",
f"migraphx_home='{migraphx_home}' valid={migraphx_home_not_valid}.",
)
return migraphx_home or ""
def setup_dml_build(args, cmake_path, build_dir, configs):
if not args.use_dml:
return
if args.dml_path:
for expected_file in ["bin/DirectML.dll", "lib/DirectML.lib", "include/DirectML.h"]:
file_path = os.path.join(args.dml_path, expected_file)
if not os.path.exists(file_path):
raise BuildError("dml_path is invalid.", f"dml_path='{args.dml_path}' expected_file='{file_path}'.")
elif not args.dml_external_project:
for config in configs:
# Run the RESTORE_PACKAGES target to perform the initial
# NuGet setup.
cmd_args = [
cmake_path,
"--build",
get_config_build_dir(build_dir, config),
"--config",
config,
"--target",
"RESTORE_PACKAGES",
]
run_subprocess(cmd_args)
def setup_rocm_build(args):
rocm_home = None
if args.use_rocm:
print(f"rocm_home = {args.rocm_home}")
rocm_home = args.rocm_home or None
rocm_home_not_valid = rocm_home and not os.path.exists(rocm_home)
if rocm_home_not_valid:
raise BuildError(
"rocm_home paths must be specified and valid.",
f"rocm_home='{rocm_home}' valid={rocm_home_not_valid}.",
)
return rocm_home or ""
def run_android_tests(args, source_dir, build_dir, config, cwd):
sdk_tool_paths = android.get_sdk_tool_paths(args.android_sdk_path)
device_dir = "/data/local/tmp"
def adb_push(src, dest, **kwargs):
return run_subprocess([sdk_tool_paths.adb, "push", src, dest], **kwargs)
def adb_shell(*args, **kwargs):
return run_subprocess([sdk_tool_paths.adb, "shell", *args], **kwargs)
def adb_install(*args, **kwargs):
return run_subprocess([sdk_tool_paths.adb, "install", *args], **kwargs)
def run_adb_shell(cmd):
# GCOV_PREFIX_STRIP specifies the depth of the directory hierarchy to strip and
# GCOV_PREFIX specifies the root directory
# for creating the runtime code coverage files.
if args.code_coverage:
adb_shell(
"cd {0} && GCOV_PREFIX={0} GCOV_PREFIX_STRIP={1} {2}".format(device_dir, cwd.count(os.sep) + 1, cmd)
)
else:
adb_shell(f"cd {device_dir} && {cmd}")
if args.android_abi == "x86_64":
with contextlib.ExitStack() as context_stack:
if args.android_run_emulator:
avd_name = "ort_android"
system_image = f"system-images;android-{args.android_api};default;{args.android_abi}"
android.create_virtual_device(sdk_tool_paths, system_image, avd_name)
emulator_proc = context_stack.enter_context(
android.start_emulator(
sdk_tool_paths=sdk_tool_paths,
avd_name=avd_name,
extra_args=["-partition-size", "2047", "-wipe-data"],
)
)
context_stack.callback(android.stop_emulator, emulator_proc)
adb_push("testdata", device_dir, cwd=cwd)
adb_push(
os.path.join(source_dir, "cmake", "external", "onnx", "onnx", "backend", "test"), device_dir, cwd=cwd
)
adb_push("onnxruntime_test_all", device_dir, cwd=cwd)
adb_shell(f"chmod +x {device_dir}/onnxruntime_test_all")
adb_push("onnx_test_runner", device_dir, cwd=cwd)
adb_shell(f"chmod +x {device_dir}/onnx_test_runner")
run_adb_shell(f"{device_dir}/onnxruntime_test_all")
if args.build_java:
# use the gradle wrapper under <repo root>/java
gradle_executable = os.path.join(source_dir, "java", "gradlew.bat" if is_windows() else "gradlew")
android_test_path = os.path.join(cwd, "java", "androidtest", "android")
run_subprocess(
[
gradle_executable,
"--no-daemon",
f"-DminSdkVer={args.android_api}",
"clean",
"connectedDebugAndroidTest",
],
cwd=android_test_path,
)
if args.use_nnapi:
run_adb_shell("{0}/onnx_test_runner -e nnapi {0}/test".format(device_dir))
else:
run_adb_shell("{0}/onnx_test_runner {0}/test".format(device_dir))
# run shared_lib_test if necessary
if args.build_shared_lib:
adb_push("libonnxruntime.so", device_dir, cwd=cwd)
adb_push("onnxruntime_shared_lib_test", device_dir, cwd=cwd)
adb_push("libcustom_op_library.so", device_dir, cwd=cwd)
adb_push("libcustom_op_get_const_input_test_library.so", device_dir, cwd=cwd)
adb_push("onnxruntime_customopregistration_test", device_dir, cwd=cwd)
adb_shell(f"chmod +x {device_dir}/onnxruntime_shared_lib_test")
adb_shell(f"chmod +x {device_dir}/onnxruntime_customopregistration_test")
run_adb_shell("LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{0} {0}/onnxruntime_shared_lib_test".format(device_dir))
run_adb_shell(
"LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{0} {0}/onnxruntime_customopregistration_test".format(device_dir)
)
def run_ios_tests(args, source_dir, config, cwd):
simulator_device_name = subprocess.check_output(
["bash", os.path.join(source_dir, "tools", "ci_build", "github", "apple", "get_simulator_device_name.sh")],
text=True,
).strip()
xc_test_schemes = [
"onnxruntime_test_all_xc",
]
if args.build_shared_lib:
xc_test_schemes += [
"onnxruntime_shared_lib_test_xc",
"onnxruntime_customopregistration_test_xc",
]
for xc_test_scheme in xc_test_schemes:
run_subprocess(
[
"xcodebuild",
"test-without-building",
"-project",
"./onnxruntime.xcodeproj",
"-configuration",
config,
"-scheme",
xc_test_scheme,
"-destination",
f"platform=iOS Simulator,OS=latest,name={simulator_device_name}",
],
cwd=cwd,
)
if args.build_apple_framework:
package_test_py = os.path.join(source_dir, "tools", "ci_build", "github", "apple", "test_ios_packages.py")
framework_info_file = os.path.join(cwd, "framework_info.json")
dynamic_framework_dir = os.path.join(cwd, config + "-" + args.ios_sysroot)
static_framework_dir = os.path.join(cwd, config + "-" + args.ios_sysroot, "static_framework")
# test dynamic framework
run_subprocess(
[
sys.executable,
package_test_py,
"--c_framework_dir",
dynamic_framework_dir,
"--framework_info_file",
framework_info_file,
],
cwd=cwd,
)
# test static framework
run_subprocess(
[
sys.executable,
package_test_py,
"--c_framework_dir",
static_framework_dir,
"--framework_info_file",
framework_info_file,
],
cwd=cwd,
)
def run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs):
for config in configs:
log.info("Running tests for %s configuration", config)
cwd = get_config_build_dir(build_dir, config)
cwd = os.path.abspath(cwd)
if args.android:
run_android_tests(args, source_dir, build_dir, config, cwd)
continue
elif args.ios:
run_ios_tests(args, source_dir, config, cwd)
continue
dll_path_list = []
if args.use_tensorrt:
dll_path_list.append(os.path.join(args.tensorrt_home, "lib"))
dll_path = None
if len(dll_path_list) > 0:
dll_path = os.pathsep.join(dll_path_list)
if not ctest_path and not is_windows():
executables = ["onnxruntime_test_all", "onnxruntime_mlas_test"]
if args.build_shared_lib:
executables.append("onnxruntime_shared_lib_test")
executables.append("onnxruntime_global_thread_pools_test")
executables.append("onnxruntime_customopregistration_test")
for exe in executables:
test_output = f"--gtest_output=xml:{cwd}/{exe}.{config}.results.xml"
run_subprocess([os.path.join(cwd, exe), test_output], cwd=cwd, dll_path=dll_path)
else:
ctest_cmd = [ctest_path, "--build-config", config, "--verbose", "--timeout", args.test_all_timeout]
run_subprocess(ctest_cmd, cwd=cwd, dll_path=dll_path)
if args.enable_pybind:
python_path = None
if args.use_tvm:
python_path = str((Path(build_dir) / config / "_deps" / "tvm-src" / "python").resolve())
# Disable python tests in a reduced build as we don't know which ops have been included and which
# models can run.
if is_reduced_ops_build(args) or args.minimal_build is not None:
return
if is_windows():
cwd = os.path.join(cwd, config)
run_subprocess(
[sys.executable, "onnxruntime_test_python.py"], cwd=cwd, dll_path=dll_path, python_path=python_path
)
if not args.disable_contrib_ops:
run_subprocess([sys.executable, "onnxruntime_test_python_sparse_matmul.py"], cwd=cwd, dll_path=dll_path)
if args.enable_symbolic_shape_infer_tests:
run_subprocess(
[sys.executable, "onnxruntime_test_python_symbolic_shape_infer.py"], cwd=cwd, dll_path=dll_path
)
# For CUDA enabled builds test IOBinding feature
if args.use_cuda:
# We need to have Torch installed to test the IOBinding feature
# which currently uses Torch's allocator to allocate GPU memory for testing
log.info("Testing IOBinding feature")
run_subprocess([sys.executable, "onnxruntime_test_python_iobinding.py"], cwd=cwd, dll_path=dll_path)
log.info("Testing CUDA Graph feature")
run_subprocess([sys.executable, "onnxruntime_test_python_cudagraph.py"], cwd=cwd, dll_path=dll_path)
if not args.disable_ml_ops and not args.use_tensorrt:
run_subprocess([sys.executable, "onnxruntime_test_python_mlops.py"], cwd=cwd, dll_path=dll_path)
try:
import onnx # noqa: F401
onnx_test = True
except ImportError as error:
log.exception(error)
log.warning("onnx is not installed. The ONNX tests will be skipped.")
onnx_test = False
if onnx_test:
# Disable python onnx tests for TensorRT and CANN EP, because many tests are
# not supported yet.
if args.use_tensorrt or args.use_cann:
return
run_subprocess(
[sys.executable, "onnxruntime_test_python_backend.py"],
cwd=cwd,
dll_path=dll_path,
python_path=python_path,
)
if not args.disable_contrib_ops:
run_subprocess(
[sys.executable, "-m", "unittest", "discover", "-s", "quantization"], cwd=cwd, dll_path=dll_path
)
if args.enable_transformers_tool_test:
import google.protobuf
import numpy
numpy_init_version = numpy.__version__
pb_init_version = google.protobuf.__version__
run_subprocess(
[sys.executable, "-m", "pip", "install", "-r", "requirements.txt"], cwd=SCRIPT_DIR
)
run_subprocess([sys.executable, "-m", "pytest", "transformers"], cwd=cwd)
# Restore initial numpy/protobuf version in case other tests use it
run_subprocess([sys.executable, "-m", "pip", "install", "numpy==" + numpy_init_version])
run_subprocess([sys.executable, "-m", "pip", "install", "protobuf==" + pb_init_version])
if not args.disable_ml_ops:
run_subprocess(
[sys.executable, "onnxruntime_test_python_backend_mlops.py"], cwd=cwd, dll_path=dll_path
)
run_subprocess(
[
sys.executable,
os.path.join(source_dir, "onnxruntime", "test", "onnx", "gen_test_models.py"),
"--output_dir",
"test_models",
],
cwd=cwd,
)
if not args.skip_onnx_tests:
run_subprocess([os.path.join(cwd, "onnx_test_runner"), "test_models"], cwd=cwd)
if config != "Debug":
run_subprocess([sys.executable, "onnx_backend_test_series.py"], cwd=cwd, dll_path=dll_path)
if not args.skip_keras_test:
try:
import keras # noqa: F401
import onnxmltools # noqa: F401
onnxml_test = True
except ImportError:
log.warning("onnxmltools and keras are not installed. The keras tests will be skipped.")
onnxml_test = False
if onnxml_test:
run_subprocess([sys.executable, "onnxruntime_test_python_keras.py"], cwd=cwd, dll_path=dll_path)
def tvm_run_python_tests(build_dir, configs):
for config in configs:
cwd = get_config_build_dir(build_dir, config)
if is_windows():
cwd = os.path.join(cwd, config)
python_path = os.path.join(build_dir, config, "_deps", "tvm-src", "python")
run_subprocess(
[sys.executable, "onnxruntime_test_python_tvm.py"], cwd=cwd, python_path=os.path.abspath(python_path)
)
def run_nodejs_tests(nodejs_binding_dir):
args = ["npm", "test", "--", "--timeout=90000"]
if is_windows():
args = ["cmd", "/c", *args]
run_subprocess(args, cwd=nodejs_binding_dir)
def build_python_wheel(
source_dir,
build_dir,
configs,
use_cuda,
cuda_version,
use_rocm,
rocm_version,
use_dnnl,
use_tensorrt,
use_openvino,
use_tvm,
use_vitisai,
use_acl,
use_armnn,
use_dml,
use_cann,
use_azure,
use_qnn,
wheel_name_suffix,
enable_training,
nightly_build=False,
default_training_package_device=False,
use_ninja=False,
enable_training_apis=False,
enable_rocm_profiling=False,
):
for config in configs:
cwd = get_config_build_dir(build_dir, config)
if is_windows() and not use_ninja:
cwd = os.path.join(cwd, config)
args = [sys.executable, os.path.join(source_dir, "setup.py"), "bdist_wheel"]
# Any combination of the following arguments can be applied
if nightly_build:
args.append("--nightly_build")
if default_training_package_device:
args.append("--default_training_package_device")
if wheel_name_suffix:
args.append(f"--wheel_name_suffix={wheel_name_suffix}")
if enable_training:
args.append("--enable_training")
if enable_training_apis:
args.append("--enable_training_apis")
if enable_rocm_profiling:
args.append("--enable_rocm_profiling")
# The following arguments are mutually exclusive
if use_cuda:
# The following line assumes no other EP is enabled
args.append("--wheel_name_suffix=gpu")
if cuda_version:
args.append(f"--cuda_version={cuda_version}")
elif use_rocm:
args.append("--use_rocm")
if rocm_version:
args.append(f"--rocm_version={rocm_version}")
elif use_openvino:
args.append("--use_openvino")
elif use_dnnl:
args.append("--use_dnnl")
elif use_tvm:
args.append("--use_tvm")
elif use_vitisai:
args.append("--use_vitisai")
elif use_acl:
args.append("--use_acl")
elif use_armnn:
args.append("--use_armnn")
elif use_dml:
args.append("--wheel_name_suffix=directml")
elif use_cann:
args.append("--use_cann")
elif use_azure:
args.append("--use_azure")
elif use_qnn:
args.append("--use_qnn")
run_subprocess(args, cwd=cwd)
def derive_linux_build_property():
if is_windows():
return '/p:IsLinuxBuild="false"'
else:
return '/p:IsLinuxBuild="true"'
def build_nuget_package(
cmake_path,
source_dir,
build_dir,
configs,
use_cuda,
use_rocm,
use_openvino,
use_tensorrt,
use_dnnl,
use_tvm,
use_winml,
use_snpe,
use_qnn,
enable_training_apis,
msbuild_extra_options,
):
if not (is_windows() or is_linux()):
raise BuildError(
"Currently csharp builds and nuget package creation is only supportted on Windows and Linux platforms."
)
csharp_build_dir = os.path.join(source_dir, "csharp")
is_linux_build = derive_linux_build_property()
# in most cases we don't want/need to include the Xamarin mobile targets, as doing so means the Xamarin
# mobile workloads must be installed on the machine.
# they are only included in the Microsoft.ML.OnnxRuntime nuget package
sln = "OnnxRuntime.DesktopOnly.CSharp.sln"
# derive package name and execution provider based on the build args
target_name = "/t:CreatePackage"
execution_provider = '/p:ExecutionProvider="None"'
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime"'
enable_training_tests = '/p:TrainingEnabledNativeBuild="false"'
if enable_training_apis:
enable_training_tests = '/p:TrainingEnabledNativeBuild="true"'
if use_cuda:
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.Training.Gpu"'
else:
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.Training"'
elif use_winml:
package_name = '/p:OrtPackageId="Microsoft.AI.MachineLearning"'
target_name = "/t:CreateWindowsAIPackage"
elif use_openvino:
execution_provider = '/p:ExecutionProvider="openvino"'
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.OpenVino"'
elif use_tensorrt:
execution_provider = '/p:ExecutionProvider="tensorrt"'
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.TensorRT"'
elif use_dnnl:
execution_provider = '/p:ExecutionProvider="dnnl"'
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.DNNL"'
elif use_cuda:
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.Gpu"'
elif use_rocm:
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.ROCm"'
elif use_tvm:
execution_provider = '/p:ExecutionProvider="tvm"'
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.Tvm"'
elif use_snpe:
execution_provider = '/p:ExecutionProvider="snpe"'
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.Snpe"'
elif use_qnn:
execution_provider = '/p:ExecutionProvider="qnn"'
package_name = '/p:OrtPackageId="Microsoft.ML.OnnxRuntime.QNN"'
elif any(map(lambda x: "OrtPackageId=" in x, msbuild_extra_options)):
pass
else:
# use the solution file that includes Xamarin mobile targets
sln = "OnnxRuntime.CSharp.sln"
# set build directory based on build_dir arg
native_dir = os.path.normpath(os.path.join(source_dir, build_dir))
ort_build_dir = '/p:OnnxRuntimeBuildDirectory="' + native_dir + '"'
# dotnet restore
cmd_args = ["dotnet", "restore", sln, "--configfile", "NuGet.CSharp.config"]
run_subprocess(cmd_args, cwd=csharp_build_dir)
# build csharp bindings and create nuget package for each config
for config in configs:
if is_linux():
native_build_dir = os.path.join(native_dir, config)
cmd_args = [cmake_path, "-DCMAKE_INSTALL_PREFIX=./nuget-staging/usr/local", "-Pcmake_install.cmake"]
run_subprocess(cmd_args, cwd=native_build_dir)
configuration = '/p:Configuration="' + config + '"'
if not use_winml:
cmd_args = [
"dotnet",
"msbuild",
sln,
configuration,
package_name,
is_linux_build,
ort_build_dir,
enable_training_tests,
]
run_subprocess(cmd_args, cwd=csharp_build_dir)
else:
winml_interop_dir = os.path.join(source_dir, "csharp", "src", "Microsoft.AI.MachineLearning.Interop")
winml_interop_project = os.path.join(winml_interop_dir, "Microsoft.AI.MachineLearning.Interop.csproj")
winml_interop_project = os.path.normpath(winml_interop_project)
cmd_args = [
"dotnet",
"msbuild",
winml_interop_project,
configuration,
'/p:Platform="Any CPU"',
ort_build_dir,
"-restore",
]
run_subprocess(cmd_args, cwd=csharp_build_dir)
if is_windows():
if not use_winml:
# user needs to make sure nuget is installed and added to the path variable
nuget_exe = "nuget.exe"
else:
# this path is setup by cmake/nuget_helpers.cmake for MSVC on Windows
nuget_exe = os.path.normpath(os.path.join(native_dir, config, "nuget_exe", "src", "nuget.exe"))
else:
# user needs to make sure nuget is installed and can be found
nuget_exe = "nuget"
nuget_exe_arg = '/p:NugetExe="' + nuget_exe + '"'
cmd_args = [
"dotnet",
"msbuild",
"OnnxRuntime.CSharp.proj",
target_name,
package_name,
configuration,
execution_provider,
is_linux_build,
ort_build_dir,
nuget_exe_arg,
]
cmd_args.extend(msbuild_extra_options)
run_subprocess(cmd_args, cwd=csharp_build_dir)
def run_csharp_tests(source_dir, build_dir, use_cuda, use_openvino, use_tensorrt, use_dnnl, enable_training_apis):
# Currently only running tests on windows.
if not is_windows():
return
csharp_source_dir = os.path.join(source_dir, "csharp")
# define macros based on build args
macros = ""
if use_openvino:
macros += "USE_OPENVINO;"
if use_tensorrt:
macros += "USE_TENSORRT;"
if use_dnnl:
macros += "USE_DNNL;"
if use_cuda:
macros += "USE_CUDA;"
if enable_training_apis:
macros += "__TRAINING_ENABLED_NATIVE_BUILD__;__ENABLE_TRAINING_APIS__"
define_constants = ""
if macros:
define_constants = '/p:DefineConstants="' + macros + '"'
# set build directory based on build_dir arg
native_build_dir = os.path.normpath(os.path.join(source_dir, build_dir))
ort_build_dir = '/p:OnnxRuntimeBuildDirectory="' + native_build_dir + '"'
# Skip pretrained models test. Only run unit tests as part of the build
# add "--verbosity", "detailed" to this command if required
cmd_args = [
"dotnet",
"test",
"test\\Microsoft.ML.OnnxRuntime.Tests.NetCoreApp\\Microsoft.ML.OnnxRuntime.Tests.NetCoreApp.csproj",
"--filter",
"FullyQualifiedName!=Microsoft.ML.OnnxRuntime.Tests.InferenceTest.TestPreTrainedModels",
define_constants,
ort_build_dir,
]
run_subprocess(cmd_args, cwd=csharp_source_dir)
def is_cross_compiling_on_apple(args):
if not is_macOS():
return False
if args.ios:
return True
if args.osx_arch != platform.machine():
return True
return False
def generate_documentation(source_dir, build_dir, configs, validate):
# Randomly choose one build config
config = next(iter(configs))
cwd = get_config_build_dir(build_dir, config)
if is_windows():
cwd = os.path.join(cwd, config)
contrib_op_doc_path = os.path.join(source_dir, "docs", "ContribOperators.md")
opkernel_doc_path = os.path.join(source_dir, "docs", "OperatorKernels.md")
shutil.copy(os.path.join(source_dir, "tools", "python", "gen_contrib_doc.py"), cwd)
shutil.copy(os.path.join(source_dir, "tools", "python", "gen_opkernel_doc.py"), cwd)
# limit to just com.microsoft (excludes purely internal stuff like com.microsoft.nchwc).
run_subprocess(
[sys.executable, "gen_contrib_doc.py", "--output_path", contrib_op_doc_path, "--domains", "com.microsoft"],
cwd=cwd,
)
# we currently limit the documentation created by a build to a subset of EP's.
# Run get_opkernel_doc.py directly if you need/want documentation from other EPs that are enabled in the build.
run_subprocess(
[
sys.executable,
"gen_opkernel_doc.py",
"--output_path",
opkernel_doc_path,
"--providers",
"CPU",
"CUDA",
"DML",
],
cwd=cwd,
)
if validate:
try:
have_diff = False
def diff_file(path, regenerate_qualifiers=""):
diff = subprocess.check_output(["git", "diff", path], cwd=source_dir).decode("utf-8")
if diff:
nonlocal have_diff
have_diff = True
log.warning(
"The updated document {} is different from the checked in version. "
"Please regenerate the file{}, or copy the updated version from the "
"CI build's published artifacts if applicable.".format(path, regenerate_qualifiers)
)
log.debug("diff:\n" + diff)
diff_file(opkernel_doc_path, " with CPU, CUDA and DML execution providers enabled")
diff_file(contrib_op_doc_path)
if have_diff:
# Output for the CI to publish the updated md files as an artifact
print("##vso[task.setvariable variable=DocUpdateNeeded]true")
raise BuildError("Generated documents have diffs. Check build output for details.")
except subprocess.CalledProcessError:
raise BuildError("git diff returned non-zero error code") # noqa: B904
def main():
log.debug("Command line arguments:\n {}".format(" ".join(shlex.quote(arg) for arg in sys.argv[1:])))
args = parse_arguments()
if os.getenv("ORT_BUILD_WITH_CACHE") == "1":
args.use_cache = True
if not is_windows():
if not args.allow_running_as_root:
is_root_user = os.geteuid() == 0
if is_root_user:
raise BuildError(
"Running as root is not allowed. If you really want to do that, use '--allow_running_as_root'."
)
cmake_extra_defines = normalize_arg_list(args.cmake_extra_defines)
cross_compiling = args.arm or args.arm64 or args.arm64ec or args.android
# If there was no explicit argument saying what to do, default
# to update, build and test (for native builds).
if not (args.update or args.clean or args.build or args.test or args.gen_doc):
log.debug("Defaulting to running update, build [and test for native builds].")
args.update = True
args.build = True
if cross_compiling:
args.test = args.android_abi == "x86_64" or args.android_abi == "arm64-v8a"
else:
args.test = True
if args.skip_tests:
args.test = False
if args.use_tensorrt:
args.use_cuda = True
if args.use_migraphx:
args.use_rocm = True
if args.build_wheel or args.gen_doc or args.use_tvm:
args.enable_pybind = True
if args.build_csharp or args.build_nuget or args.build_java or args.build_nodejs:
args.build_shared_lib = True
if args.build_nuget and cross_compiling:
raise BuildError("Currently nuget package creation is not supported while cross-compiling")
if args.enable_pybind and args.disable_rtti:
raise BuildError("Python bindings use typeid so you can't disable RTTI")
if args.enable_pybind and args.disable_exceptions:
raise BuildError("Python bindings require exceptions to be enabled.")
if args.nnapi_min_api:
if not args.use_nnapi:
raise BuildError("Using --nnapi_min_api requires --use_nnapi")
if args.nnapi_min_api < 27:
raise BuildError("--nnapi_min_api should be 27+")
if args.build_wasm_static_lib:
args.build_wasm = True
if args.build_wasm:
if not args.disable_wasm_exception_catching and args.disable_exceptions:
# When '--disable_exceptions' is set, we set '--disable_wasm_exception_catching' as well
args.disable_wasm_exception_catching = True
if args.test and args.disable_wasm_exception_catching and not args.minimal_build:
raise BuildError("WebAssembly tests need exception catching enabled to run if it's not minimal build")
if args.test and args.enable_wasm_debug_info:
# With flag --enable_wasm_debug_info, onnxruntime_test_all.wasm will be very huge (>1GB). This will fail
# Node.js when trying to load the .wasm file.
# To debug ONNX Runtime WebAssembly, use ONNX Runtime Web to debug ort-wasm.wasm in browsers.
raise BuildError("WebAssembly tests cannot be enabled with flag --enable_wasm_debug_info")
if args.wasm_malloc is not None:
# mark --wasm_malloc as deprecated
log.warning(
"Flag '--wasm_malloc=<Value>' is deprecated. Please use '--emscripten_settings MALLOC=<Value>'."
)
if args.code_coverage and not args.android:
raise BuildError("Using --code_coverage requires --android")
if args.gen_api_doc and len(args.config) != 1:
raise BuildError("Using --get-api-doc requires a single build config")
# Disabling unit tests for GPU on nuget creation
if args.use_openvino != "CPU_FP32" and args.build_nuget:
args.test = False
# GDK builds don't support testing
if args.use_gdk:
args.test = False
# enable_training is a higher level flag that enables all training functionality.
if args.enable_training:
args.enable_training_apis = True
args.enable_training_ops = True
configs = set(args.config)
# setup paths and directories
# cmake_path and ctest_path can be None. For example, if a person only wants to run the tests, he/she doesn't need
# to have cmake/ctest.
cmake_path = resolve_executable_path(args.cmake_path)
ctest_path = resolve_executable_path(args.ctest_path)
build_dir = args.build_dir
script_dir = os.path.realpath(os.path.dirname(__file__))
source_dir = os.path.normpath(os.path.join(script_dir, "..", ".."))
# if using cuda, setup cuda paths and env vars
cuda_home, cudnn_home = setup_cuda_vars(args)
mpi_home = args.mpi_home
nccl_home = args.nccl_home
snpe_root = args.snpe_root
acl_home = args.acl_home
acl_libs = args.acl_libs
armnn_home = args.armnn_home
armnn_libs = args.armnn_libs
qnn_home = args.qnn_home
# if using tensorrt, setup tensorrt paths
tensorrt_home = setup_tensorrt_vars(args)
# if using migraphx, setup migraphx paths
migraphx_home = setup_migraphx_vars(args)
# if using rocm, setup rocm paths
rocm_home = setup_rocm_build(args)
# if using cann, setup cann paths
cann_home = setup_cann_vars(args)
if args.update or args.build:
for config in configs:
os.makedirs(get_config_build_dir(build_dir, config), exist_ok=True)
log.info("Build started")
if args.update:
if is_reduced_ops_build(args):
from reduce_op_kernels import reduce_ops
is_extended_minimal_build_or_higher = args.minimal_build is None or "extended" in args.minimal_build
for config in configs:
reduce_ops(
config_path=args.include_ops_by_config,
build_dir=get_config_build_dir(build_dir, config),
enable_type_reduction=args.enable_reduced_operator_type_support,
use_cuda=args.use_cuda,
is_extended_minimal_build_or_higher=is_extended_minimal_build_or_higher,
)
cmake_extra_args = []
path_to_protoc_exe = None
if args.path_to_protoc_exe:
path_to_protoc_exe = Path(args.path_to_protoc_exe)
if not path_to_protoc_exe.exists():
raise BuildError("The value to --path_to_protoc_exe is invalid.")
if not args.skip_submodule_sync:
update_submodules(source_dir)
if is_windows() and not args.build_wasm:
cpu_arch = platform.architecture()[0]
if args.cmake_generator == "Ninja":
if cpu_arch == "32bit" or args.arm or args.arm64 or args.arm64ec:
raise BuildError(
"To cross-compile with Ninja, load the toolset "
"environment for the target processor (e.g. Cross "
"Tools Command Prompt for VS)"
)
cmake_extra_args = ["-G", args.cmake_generator]
elif args.arm or args.arm64 or args.arm64ec:
if args.arm:
cmake_extra_args = ["-A", "ARM"]
elif args.arm64:
cmake_extra_args = ["-A", "ARM64"]
elif args.arm64ec:
cmake_extra_args = ["-A", "ARM64EC"]
cmake_extra_args += ["-G", args.cmake_generator]
# Cannot test on host build machine for cross-compiled
# builds (Override any user-defined behaviour for test if any)
if args.test:
log.warning(
"Cannot test on host build machine for cross-compiled "
"ARM(64) builds. Will skip test running after build."
)
args.test = False
else:
target_arch = platform.machine()
if target_arch == "AMD64":
if cpu_arch == "32bit" or args.x86:
target_arch = "Win32"
else:
target_arch = "x64"
host_arch = "x64"
elif target_arch == "ARM64":
host_arch = "ARM64"
else:
raise BuildError("unknown python arch")
if args.msvc_toolset:
toolset = "host=" + host_arch + ",version=" + args.msvc_toolset
else:
toolset = "host=" + host_arch
if args.cuda_version:
toolset += ",cuda=" + args.cuda_version
elif args.cuda_home:
toolset += ",cuda=" + args.cuda_home
cmake_extra_args = ["-A", target_arch, "-T", toolset, "-G", args.cmake_generator]
if args.enable_wcos:
cmake_extra_defines.append("CMAKE_USER_MAKE_RULES_OVERRIDE=wcos_rules_override.cmake")
elif args.cmake_generator is not None:
cmake_extra_args += ["-G", args.cmake_generator]
if is_macOS():
if not args.ios and not args.android and args.osx_arch == "arm64" and platform.machine() == "x86_64":
if args.test:
log.warning("Cannot test ARM64 build on X86_64. Will skip test running after build.")
args.test = False
if args.build_wasm:
if is_windows() and platform.architecture()[0] == "32bit":
raise BuildError("Please use a 64-bit python to run this script")
if args.build_wheel or args.enable_pybind:
raise BuildError("WASM does not support pybind")
emsdk_version = args.emsdk_version
emsdk_dir = os.path.join(source_dir, "cmake", "external", "emsdk")
emsdk_file = os.path.join(emsdk_dir, "emsdk.bat") if is_windows() else os.path.join(emsdk_dir, "emsdk")
log.info("Installing emsdk...")
run_subprocess([emsdk_file, "install", emsdk_version], cwd=emsdk_dir)
log.info("Activating emsdk...")
run_subprocess([emsdk_file, "activate", emsdk_version], cwd=emsdk_dir)
if is_ubuntu_1604():
if args.arm or args.arm64:
raise BuildError("Only Windows ARM(64) cross-compiled builds supported currently through this script")
if not is_docker() and not args.use_acl and not args.use_armnn:
install_python_deps()
if args.enable_pybind and is_windows():
install_python_deps(args.numpy_version)
if args.use_rocm and args.rocm_version is None:
args.rocm_version = ""
if args.enable_external_custom_op_schemas and not is_linux():
raise BuildError("Registering external custom op schemas is only supported on Linux.")
generate_build_tree(
cmake_path,
source_dir,
build_dir,
cuda_home,
cudnn_home,
rocm_home,
mpi_home,
nccl_home,
tensorrt_home,
migraphx_home,
acl_home,
acl_libs,
armnn_home,
armnn_libs,
qnn_home,
snpe_root,
cann_home,
path_to_protoc_exe,
configs,
cmake_extra_defines,
args,
cmake_extra_args,
)
if args.clean:
clean_targets(cmake_path, build_dir, configs)
# if using DML, perform initial nuget package restore
setup_dml_build(args, cmake_path, build_dir, configs)
if args.build:
if args.parallel < 0:
raise BuildError(f"Invalid parallel job count: {args.parallel}")
num_parallel_jobs = os.cpu_count() if args.parallel == 0 else args.parallel
build_targets(args, cmake_path, build_dir, configs, num_parallel_jobs, args.target)
if args.test:
if args.enable_onnx_tests:
source_onnx_model_dir = "C:\\local\\models" if is_windows() else "/data/models"
setup_test_data(source_onnx_model_dir, "models", build_dir, configs)
run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs)
# TODO(agladyshev):
# to support Windows, we need to update .github/workflows/windows.yml
# and add to the PATH variable the following value: C:Program Files\LLVM\bin
if args.enable_pybind and args.use_tvm and not is_windows():
tvm_run_python_tests(build_dir, configs)
# run node.js binding tests
if args.build_nodejs and not args.skip_nodejs_tests:
nodejs_binding_dir = os.path.normpath(os.path.join(source_dir, "js", "node"))
run_nodejs_tests(nodejs_binding_dir)
# Build packages after running the tests.
# NOTE: if you have a test that rely on a file which only get copied/generated during packaging step, it could
# fail unexpectedly. Similar, if your packaging step forgot to copy a file into the package, we don't know it
# either.
if args.build:
if args.build_wheel:
nightly_build = bool(os.getenv("NIGHTLY_BUILD") == "1")
default_training_package_device = bool(os.getenv("DEFAULT_TRAINING_PACKAGE_DEVICE") == "1")
build_python_wheel(
source_dir,
build_dir,
configs,
args.use_cuda,
args.cuda_version,
args.use_rocm,
args.rocm_version,
args.use_dnnl,
args.use_tensorrt,
args.use_openvino,
args.use_tvm,
args.use_vitisai,
args.use_acl,
args.use_armnn,
args.use_dml,
args.use_cann,
args.use_azure,
args.use_qnn,
args.wheel_name_suffix,
args.enable_training,
nightly_build=nightly_build,
default_training_package_device=default_training_package_device,
use_ninja=(args.cmake_generator == "Ninja"),
enable_training_apis=args.enable_training_apis,
enable_rocm_profiling=args.enable_rocm_profiling,
)
if args.build_nuget:
build_nuget_package(
cmake_path,
source_dir,
build_dir,
configs,
args.use_cuda,
args.use_rocm,
args.use_openvino,
args.use_tensorrt,
args.use_dnnl,
args.use_tvm,
args.use_winml,
args.use_snpe,
args.use_qnn,
args.enable_training_apis,
normalize_arg_list(args.msbuild_extra_options),
)
if args.test and args.build_nuget:
run_csharp_tests(
source_dir,
build_dir,
args.use_cuda,
args.use_openvino,
args.use_tensorrt,
args.use_dnnl,
args.enable_training_apis,
)
if args.gen_doc:
# special case CI where we create the build config separately to building
if args.update and not args.build:
pass
else:
# assumes build has occurred for easier use in CI where we don't always build via build.py and need to run
# documentation generation as a separate task post-build
generate_documentation(source_dir, build_dir, configs, args.gen_doc == "validate")
if args.gen_api_doc and (args.build or args.test):
print("Generating Python doc for ORTModule...")
docbuild_dir = os.path.join(source_dir, "tools", "doc")
run_subprocess(
["bash", "builddoc.sh", os.path.dirname(sys.executable), source_dir, build_dir, args.config[0]],
cwd=docbuild_dir,
)
log.info("Build complete")
if __name__ == "__main__":
try:
sys.exit(main())
except BaseError as e:
log.error(str(e))
sys.exit(1)
| 113,014 | 42.036938 | 120 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/linux/ort_minimal/readelf_utils.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Utilities to help analyze the sections in a binary using readelf.
"""
import argparse
import collections
import os
import re
import subprocess
import sys
def get_section_sizes(binary_path, readelf_path, dump_to_file=None):
"""
Get the size of each section using readelf.
:param binary_path: Path to binary to analyze.
:param readelf_path: Path to readelf binary. Default is 'readelf'.
:param dump_to_file: File object to write section sizes and diagnostic info to. Defaults to None.
:return:
"""
cmd = [readelf_path, "--sections", "--wide", binary_path]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
result.check_returncode()
output = result.stdout.decode("utf-8")
section_sizes = {}
# Parse output in this format:
# [Nr] Name Type Address Off Size ES Flg Lk Inf Al
for match in re.finditer(r"\[[\s\d]+\] (\..*)$", output, re.MULTILINE):
items = match.group(1).split()
name = items[0]
# convert size from hex to int
size = int(items[4], 16)
section_sizes[name] = size
if dump_to_file:
print(f"{name}:{size}", file=dump_to_file)
return section_sizes
def diff_sections_total_size(base_binary_path, binary_path, readelf_path="readelf"):
"""
Diff the sections entries for two binaries.
:param base_binary_path: Path to base binary for diff.
:param binary_path: Path to binary to diff using.
:param readelf_path: Path to 'readelf' binary. Defaults to 'readelf'
:return: Ordered dictionary containing size of diff for all sections with a diff, the diff for the sum of the
sections in the 'Sections total' entry, and the diff for the on-disk file size in the 'File size' entry
"""
filesize = os.path.getsize(binary_path)
base_filesize = os.path.getsize(base_binary_path)
section_sizes = get_section_sizes(binary_path, readelf_path)
base_section_sizes = get_section_sizes(base_binary_path, readelf_path)
merged_keys = set(base_section_sizes.keys()) | set(section_sizes.keys())
base_total = 0
total = 0
results = collections.OrderedDict()
for section in sorted(merged_keys):
base_size = base_section_sizes[section] if section in base_section_sizes else 0
size = section_sizes[section] if section in section_sizes else 0
base_total += base_size
total += size
if size != base_size:
results[section] = size - base_size
results["Sections total"] = total - base_total
results["File size"] = filesize - base_filesize
return results
def main():
argparser = argparse.ArgumentParser(
description="Analyze sections in a binary using readelf. "
"Perform a diff between two binaries if --base_binary_path is specified."
)
argparser.add_argument("-r", "--readelf_path", type=str, help="Path to readelf executable.")
argparser.add_argument(
"-b",
"--base_binary_path",
type=os.path.realpath,
default=None,
help="Path to base binary if performing a diff between two binaries.",
)
argparser.add_argument(
"-w", "--write_to", type=str, default=None, help="Path to write output to. Writes to stdout if not provided."
)
argparser.add_argument("binary_path", type=os.path.realpath, help="Shared library to analyze.")
args = argparser.parse_args()
out_file = sys.stdout
if args.write_to:
out_file = open(args.write_to, "w") # noqa: SIM115
if args.base_binary_path:
diffs = diff_sections_total_size(args.base_binary_path, args.binary_path, args.readelf_path)
for key, value in diffs.items():
print(f"{key}:{value}", file=out_file)
else:
section_sizes = get_section_sizes(args.binary_path, args.readelf_path, out_file)
filesize = os.path.getsize(args.binary_path)
print(f"Sections total:{sum(section_sizes.values())}", file=out_file)
print(f"File size:{filesize}", file=out_file)
if args.write_to:
out_file.close()
if __name__ == "__main__":
main()
| 4,278 | 33.232 | 117 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/linux/ort_minimal/check_build_binary_size.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import os
import sys
# local helpers
import readelf_utils
def _check_binary_size(path, readelf, threshold, os_str, arch, build_config):
print(f"Checking binary size of {path} using {readelf}")
ondisk_size = os.path.getsize(path)
print("Section:size in bytes")
# call get_section_sizes to dump the section info to stdout
sections = readelf_utils.get_section_sizes(path, readelf, sys.stdout)
sections_total = sum(sections.values())
print(f"Sections total={sections_total} bytes")
print(f"File size={ondisk_size} bytes")
# Write the binary size to a file for uploading later
# On-disk binary size jumps in 4KB increments so we use the total of the sections as it has finer granularity.
# Note that the sum of the section is slightly larger than the on-disk size
# due to packing and/or alignment adjustments.
with open(os.path.join(os.path.dirname(path), "binary_size_data.txt"), "w") as file:
file.writelines(["os,arch,build_config,size\n", f"{os_str},{arch},{build_config},{sections_total}\n"])
if threshold is not None and sections_total > threshold:
raise RuntimeError(
"Sections total size for {} of {} exceeds threshold of {} by {}. On-disk size={}".format(
path, sections_total, threshold, sections_total - threshold, ondisk_size
)
)
def main():
argparser = argparse.ArgumentParser(
description="Check the binary size for provided path and "
"create a text file for upload to the performance dashboard."
)
# optional
argparser.add_argument("-t", "--threshold", type=int, help="Return error if binary size exceeds this threshold.")
argparser.add_argument("-r", "--readelf_path", type=str, default="readelf", help="Path to readelf executable.")
argparser.add_argument("--os", type=str, default="android", help="OS value to include in binary_size_data.txt")
argparser.add_argument(
"--arch", type=str, default="arm64-v8a", help="Arch value to include in binary_size_data.txt"
)
argparser.add_argument(
"--build_config",
type=str,
default="minimal-baseline",
help="Build_config value to include in binary_size_data.txt",
)
# file to analyze
argparser.add_argument("path", type=os.path.realpath, help="Path to binary to check.")
args = argparser.parse_args()
_check_binary_size(args.path, args.readelf_path, args.threshold, args.os, args.arch, args.build_config)
if __name__ == "__main__":
main()
| 2,681 | 37.314286 | 117 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/linux/ort_minimal/build_ort_and_check_binary_size.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import json
import pathlib
import subprocess
import sys
REPO_ROOT = pathlib.Path(__file__).resolve().parents[5]
def parse_args():
parser = argparse.ArgumentParser(description="Builds ORT and checks the binary size.")
parser.add_argument("build_check_binsize_config", type=pathlib.Path, help="Path to configuration file.")
parser.add_argument("--build_dir", type=pathlib.Path, required=True, help="Path to build directory.")
parser.add_argument("--threshold_size_in_bytes", type=int, help="Binary size limit in bytes.")
parser.add_argument(
"--with_debug_info", action="store_true", help="Whether to include debug information in the build."
)
return parser.parse_args()
def main():
args = parse_args()
with open(args.build_check_binsize_config) as config_file:
config = json.load(config_file)
config_type = config["type"]
os = config["os"]
arch = config["arch"]
build_params = config["build_params"]
build_config = "MinSizeRel" # could make this configurable if needed
# Build and install protoc
protobuf_installation_script = (
REPO_ROOT
/ "tools"
/ "ci_build"
/ "github"
/ "linux"
/ "docker"
/ "inference"
/ "x64"
/ "python"
/ "cpu"
/ "scripts"
/ "install_protobuf.sh"
)
subprocess.run(
[
str(protobuf_installation_script),
"-p",
str(pathlib.Path(args.build_dir) / "installed"),
"-d",
str(REPO_ROOT / "cmake" / "deps.txt"),
],
shell=False,
check=True,
)
# build ORT
build_command = (
[sys.executable, str(REPO_ROOT / "tools/ci_build/build.py"), *build_params]
+ (["--cmake_extra_defines", "ADD_DEBUG_INFO_TO_MINIMAL_BUILD=ON"] if args.with_debug_info else [])
# put the following options last so they don't get overridden by build_params
+ [
f"--build_dir={args.build_dir}",
f"--config={build_config}",
"--update",
"--build",
"--parallel",
"--test",
"--path_to_protoc_exe",
str(pathlib.Path(args.build_dir) / "installed" / "bin" / "protoc"),
]
)
subprocess.run(build_command, check=True)
# check binary size
check_binary_size_command = (
[
sys.executable,
str(REPO_ROOT / "tools/ci_build/github/linux/ort_minimal/check_build_binary_size.py"),
f"--os={os}",
f"--arch={arch}",
f"--build_config={config_type}",
]
+ ([f"--threshold={args.threshold_size_in_bytes}"] if args.threshold_size_in_bytes else [])
+ [str(args.build_dir / build_config / "libonnxruntime.so")]
)
subprocess.run(check_binary_size_command, check=True)
if __name__ == "__main__":
main()
| 3,047 | 29.178218 | 108 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/linux/ort_minimal/__init__.py | 0 | 0 | 0 | py |
|
onnxruntime | onnxruntime-main/tools/ci_build/github/android/build_aar_package.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import json
import os
import pathlib
import shutil
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..", "..", ".."))
BUILD_PY = os.path.join(REPO_DIR, "tools", "ci_build", "build.py")
JAVA_ROOT = os.path.join(REPO_DIR, "java")
DEFAULT_BUILD_VARIANT = "Full"
sys.path.insert(0, os.path.join(REPO_DIR, "tools", "python"))
from util import is_windows # noqa: E402
# We by default will build all 4 ABIs
DEFAULT_BUILD_ABIS = ["armeabi-v7a", "arm64-v8a", "x86", "x86_64"]
# Onnx Runtime native library is built against NDK API 21 by default
# It is possible to build from source for Android API levels below 21, but it is not guaranteed
DEFAULT_ANDROID_MIN_SDK_VER = 21
# Android API 24 is the default target API version for Android builds, based on Microsoft 1CS requirements
# It is possible to build from source using API level 21 and higher as the target SDK version
DEFAULT_ANDROID_TARGET_SDK_VER = 24
def _parse_build_settings(args):
setting_file = args.build_settings_file.resolve()
if not setting_file.is_file():
raise FileNotFoundError(f"Build config file {setting_file} is not a file.")
with open(setting_file) as f:
build_settings_data = json.load(f)
build_settings = {}
if "build_abis" in build_settings_data:
build_settings["build_abis"] = build_settings_data["build_abis"]
else:
build_settings["build_abis"] = DEFAULT_BUILD_ABIS
build_params = []
if "build_params" in build_settings_data:
build_params += build_settings_data["build_params"]
else:
raise ValueError("build_params is required in the build config file")
if "android_min_sdk_version" in build_settings_data:
build_settings["android_min_sdk_version"] = build_settings_data["android_min_sdk_version"]
else:
build_settings["android_min_sdk_version"] = DEFAULT_ANDROID_MIN_SDK_VER
build_params += ["--android_api=" + str(build_settings["android_min_sdk_version"])]
if "android_target_sdk_version" in build_settings_data:
build_settings["android_target_sdk_version"] = build_settings_data["android_target_sdk_version"]
else:
build_settings["android_target_sdk_version"] = DEFAULT_ANDROID_TARGET_SDK_VER
if build_settings["android_min_sdk_version"] > build_settings["android_target_sdk_version"]:
raise ValueError(
"android_min_sdk_version {} cannot be larger than android_target_sdk_version {}".format(
build_settings["android_min_sdk_version"], build_settings["android_target_sdk_version"]
)
)
build_settings["build_params"] = build_params
build_settings["build_variant"] = build_settings_data.get("build_variant", DEFAULT_BUILD_VARIANT)
return build_settings
def _build_aar(args):
build_settings = _parse_build_settings(args)
build_dir = os.path.abspath(args.build_dir)
ops_config_path = os.path.abspath(args.include_ops_by_config) if args.include_ops_by_config else None
# Setup temp environment for building
temp_env = os.environ.copy()
temp_env["ANDROID_HOME"] = os.path.abspath(args.android_sdk_path)
temp_env["ANDROID_NDK_HOME"] = os.path.abspath(args.android_ndk_path)
# Temp dirs to hold building results
intermediates_dir = os.path.join(build_dir, "intermediates")
build_config = args.config
aar_dir = os.path.join(intermediates_dir, "aar", build_config)
jnilibs_dir = os.path.join(intermediates_dir, "jnilibs", build_config)
exe_dir = os.path.join(intermediates_dir, "executables", build_config)
base_build_command = [sys.executable, BUILD_PY] + build_settings["build_params"] + ["--config=" + build_config]
header_files_path = ""
# Build and install protoc
protobuf_installation_script = os.path.join(
REPO_DIR,
"tools",
"ci_build",
"github",
"linux",
"docker",
"inference",
"x64",
"python",
"cpu",
"scripts",
"install_protobuf.sh",
)
subprocess.run(
[
protobuf_installation_script,
"-p",
os.path.join(build_dir, "protobuf"),
"-d",
os.path.join(REPO_DIR, "cmake", "deps.txt"),
],
shell=False,
check=True,
)
# Build binary for each ABI, one by one
for abi in build_settings["build_abis"]:
abi_build_dir = os.path.join(intermediates_dir, abi)
abi_build_command = [
*base_build_command,
"--android_abi=" + abi,
"--build_dir=" + abi_build_dir,
"--path_to_protoc_exe",
os.path.join(build_dir, "protobuf", "bin", "protoc"),
]
if ops_config_path is not None:
abi_build_command += ["--include_ops_by_config=" + ops_config_path]
subprocess.run(abi_build_command, env=temp_env, shell=False, check=True, cwd=REPO_DIR)
# create symbolic links for libonnxruntime.so and libonnxruntime4j_jni.so
# to jnilibs/[abi] for later compiling the aar package
abi_jnilibs_dir = os.path.join(jnilibs_dir, abi)
os.makedirs(abi_jnilibs_dir, exist_ok=True)
for lib_name in ["libonnxruntime.so", "libonnxruntime4j_jni.so"]:
target_lib_name = os.path.join(abi_jnilibs_dir, lib_name)
# If the symbolic already exists, delete it first
# For some reason, os.path.exists will return false for a symbolic link in Linux,
# add double check with os.path.islink
if os.path.exists(target_lib_name) or os.path.islink(target_lib_name):
os.remove(target_lib_name)
os.symlink(os.path.join(abi_build_dir, build_config, lib_name), target_lib_name)
# copy executables for each abi, in case we want to publish those as well
abi_exe_dir = os.path.join(exe_dir, abi)
for exe_name in ["libonnxruntime.so", "onnxruntime_perf_test", "onnx_test_runner"]:
os.makedirs(abi_exe_dir, exist_ok=True)
target_exe_name = os.path.join(abi_exe_dir, exe_name)
shutil.copyfile(os.path.join(abi_build_dir, build_config, exe_name), target_exe_name)
# we only need to define the header files path once
if not header_files_path:
header_files_path = os.path.join(abi_build_dir, build_config, "android", "headers")
# The directory to publish final AAR
aar_publish_dir = os.path.join(build_dir, "aar_out", build_config)
os.makedirs(aar_publish_dir, exist_ok=True)
gradle_path = os.path.join(JAVA_ROOT, "gradlew" if not is_windows() else "gradlew.bat")
# get the common gradle command args
gradle_command = [
gradle_path,
"--no-daemon",
"-b=build-android.gradle",
"-c=settings-android.gradle",
"-DjniLibsDir=" + jnilibs_dir,
"-DbuildDir=" + aar_dir,
"-DheadersDir=" + header_files_path,
"-DpublishDir=" + aar_publish_dir,
"-DminSdkVer=" + str(build_settings["android_min_sdk_version"]),
"-DtargetSdkVer=" + str(build_settings["android_target_sdk_version"]),
"-DbuildVariant=" + str(build_settings["build_variant"]),
"-DENABLE_TRAINING_APIS=1"
if "--enable_training_apis" in build_settings["build_params"]
else "-DENABLE_TRAINING_APIS=0",
]
# clean, build, and publish to a local directory
subprocess.run([*gradle_command, "clean"], env=temp_env, shell=False, check=True, cwd=JAVA_ROOT)
subprocess.run([*gradle_command, "build"], env=temp_env, shell=False, check=True, cwd=JAVA_ROOT)
subprocess.run([*gradle_command, "publish"], env=temp_env, shell=False, check=True, cwd=JAVA_ROOT)
def parse_args():
parser = argparse.ArgumentParser(
os.path.basename(__file__),
description="""Create Android Archive (AAR) package for one or more Android ABI(s)
and building properties specified in the given build config file, see
tools/ci_build/github/android/default_mobile_aar_build_settings.json for details.
The output of the final AAR package can be found under [build_dir]/aar_out
""",
)
parser.add_argument(
"--android_sdk_path", type=str, default=os.environ.get("ANDROID_HOME", ""), help="Path to the Android SDK"
)
parser.add_argument(
"--android_ndk_path", type=str, default=os.environ.get("ANDROID_NDK_HOME", ""), help="Path to the Android NDK"
)
parser.add_argument(
"--build_dir",
type=str,
default=os.path.join(REPO_DIR, "build/android_aar"),
help="Provide the root directory for build output",
)
parser.add_argument(
"--include_ops_by_config",
type=str,
help="Include ops from config file. See /docs/Reduced_Operator_Kernel_build.md for more information.",
)
parser.add_argument(
"--config",
type=str,
default="Release",
choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
help="Configuration to build.",
)
parser.add_argument(
"build_settings_file", type=pathlib.Path, help="Provide the file contains settings for building AAR"
)
return parser.parse_args()
def main():
args = parse_args()
# Android SDK and NDK path are required
if not args.android_sdk_path:
raise ValueError("android_sdk_path is required")
if not args.android_ndk_path:
raise ValueError("android_ndk_path is required")
_build_aar(args)
if __name__ == "__main__":
main()
| 9,767 | 37.456693 | 118 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/js/validate-npm-packages.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import sys
# This is a script to validate NPM packages.
# If package version, publish tag and filename does not fulfill the requirement, an error will raise.
# arg.1 - Folder of extracted artifact "onnxruntime-node" for node.js binding
ort_node_pkg_dir = sys.argv[1]
# arg.2 - Folder of extracted artifact "onnxruntime-web" for web
ort_web_pkg_dir = sys.argv[2]
# arg.3 - Folder of extracted artifact "onnxruntime-react-native" for react native
ort_react_native_pkg_dir = sys.argv[3]
# arg.4 - source branch, eg. "refs/heads/master"
source_branch = sys.argv[4]
# arg.5 - NPM tag, eg. "", "dev", "latest", "rc"
tag = sys.argv[5]
# print out command line parameters
print("====== argv ======")
print("ort_node_pkg_dir:", ort_node_pkg_dir)
print("ort_web_pkg_dir:", ort_web_pkg_dir)
print("ort_react_native_pkg_dir:", ort_react_native_pkg_dir)
print("source_branch:", source_branch)
print("tag:", tag)
# check release flags from environment variables
RELEASE_NODE = os.environ.get("RELEASE_NODE", "") == "1"
RELEASE_WEB = os.environ.get("RELEASE_WEB", "") == "1"
RELEASE_REACT_NATIVE = os.environ.get("RELEASE_REACT_NATIVE", "") == "1"
# print ouf release flags
print("====== flags ======")
print("RELEASE_NODE:", RELEASE_NODE)
print("RELEASE_WEB:", RELEASE_WEB)
print("RELEASE_REACT_NATIVE:", RELEASE_REACT_NATIVE)
if not RELEASE_NODE and not RELEASE_WEB and not RELEASE_REACT_NATIVE:
raise Exception("not releasing any package. exiting.")
count_ort_node_common_tgz = 0
count_ort_node_tgz = 0
ort_node_common_ver = ""
ort_node_ver = ""
for file in os.listdir(ort_node_pkg_dir):
if file.startswith("onnxruntime-common-") and file.endswith(".tgz"):
ort_node_common_ver = file[19:-4]
count_ort_node_common_tgz += 1
if file.startswith("onnxruntime-node-") and file.endswith(".tgz"):
ort_node_ver = file[17:-4]
count_ort_node_tgz += 1
count_ort_web_common_tgz = 0
count_ort_web_tgz = 0
ort_web_common_ver = ""
ort_web_ver = ""
for file in os.listdir(ort_web_pkg_dir):
if file.startswith("onnxruntime-common-") and file.endswith(".tgz"):
ort_web_common_ver = file[19:-4]
count_ort_web_common_tgz += 1
if file.startswith("onnxruntime-web-") and file.endswith(".tgz"):
ort_web_ver = file[16:-4]
count_ort_web_tgz += 1
count_ort_react_native_common_tgz = 0
count_ort_react_native_tgz = 0
ort_react_native_common_ver = ""
ort_react_native_ver = ""
for file in os.listdir(ort_react_native_pkg_dir):
if file.startswith("onnxruntime-common-") and file.endswith(".tgz"):
ort_react_native_common_ver = file[19:-4]
count_ort_react_native_common_tgz += 1
if file.startswith("onnxruntime-react-native-") and file.endswith(".tgz"):
ort_react_native_ver = file[25:-4]
count_ort_react_native_tgz += 1
if count_ort_node_common_tgz >= 2:
raise Exception("expect at most 1 package file for onnxruntime-common in onnxruntime-node folder")
if count_ort_web_common_tgz >= 2:
raise Exception("expect at most 1 package file for onnxruntime-common in onnxruntime-web folder")
if count_ort_react_native_common_tgz >= 2:
raise Exception("expect at most 1 package file for onnxruntime-common in onnxruntime-react-native folder")
if RELEASE_NODE and RELEASE_WEB and count_ort_node_common_tgz != count_ort_web_common_tgz:
raise Exception("inconsistent package number for onnxruntime-common (onnxruntime-node/onnxruntime-web)")
if RELEASE_NODE and RELEASE_REACT_NATIVE and count_ort_node_common_tgz != count_ort_react_native_common_tgz:
raise Exception("inconsistent package number for onnxruntime-common (onnxruntime-node/onnxruntime-react-native)")
if RELEASE_WEB and RELEASE_REACT_NATIVE and count_ort_web_common_tgz != count_ort_react_native_common_tgz:
raise Exception("inconsistent package number for onnxruntime-common (onnxruntime-web/onnxruntime-react-native)")
if RELEASE_NODE and RELEASE_WEB and ort_node_common_ver != ort_web_common_ver:
raise Exception("inconsistent version number for onnxruntime-common (onnxruntime-node/onnxruntime-web)")
if RELEASE_NODE and RELEASE_REACT_NATIVE and ort_node_common_ver != ort_react_native_common_ver:
raise Exception("inconsistent version number for onnxruntime-common (onnxruntime-node/onnxruntime-react-native)")
if RELEASE_WEB and RELEASE_REACT_NATIVE and ort_web_common_ver != ort_react_native_common_ver:
raise Exception("inconsistent version number for onnxruntime-common (onnxruntime-web/onnxruntime-react-native)")
ort_common_ver = (
ort_node_common_ver if RELEASE_NODE else (ort_web_common_ver if RELEASE_WEB else ort_react_native_common_ver)
)
ort_common_from = "" if not ort_common_ver else ("node" if RELEASE_NODE else ("web" if RELEASE_WEB else "react-native"))
print("====== output environment variables ======")
print(f"##vso[task.setvariable variable=ORT_COMMON_FROM]{ort_common_from}")
if tag == "latest" or tag == "" or tag == "rc": # noqa: PLC1901
if not RELEASE_NODE or not RELEASE_WEB or not RELEASE_REACT_NATIVE:
raise Exception("@latest or @rc build must release all packages (node, web, react-native)")
if count_ort_node_common_tgz != 1:
raise Exception("expect one package file for onnxruntime-common for release build")
if count_ort_node_tgz != 1:
raise Exception("expect one package file for onnxruntime-node")
if count_ort_web_tgz != 1:
raise Exception("expect one package file for onnxruntime-web")
if count_ort_react_native_tgz != 1:
raise Exception("expect one package file for onnxruntime-react-native")
if RELEASE_NODE and RELEASE_WEB and ort_node_ver != ort_web_ver:
raise Exception("version number is different for onnxruntime-node and onnxruntime-web")
if RELEASE_NODE and RELEASE_REACT_NATIVE and ort_node_ver != ort_react_native_ver:
raise Exception("version number is different for onnxruntime-node and onnxruntime-react-native")
if RELEASE_WEB and RELEASE_REACT_NATIVE and ort_web_ver != ort_react_native_ver:
raise Exception("version number is different for onnxruntime-web and onnxruntime-react-native")
print("====== validated versions ======")
print(f"source_branch={source_branch}")
print(f"tag={tag}")
print(f"ort_common_ver={ort_common_ver}")
print(f"ort_node_ver={ort_node_ver}")
print(f"ort_web_ver={ort_web_ver}")
print(f"ort_react_native_ver={ort_react_native_ver}")
if tag == "latest" or tag == "": # noqa: PLC1901
print("Publishing @latest ...")
if not source_branch.startswith("refs/heads/rel-"):
raise Exception('@latest build must publish from source branch "refs/heads/rel-*"')
if (
"-" in ort_common_ver.replace("-rev", "")
or "-" in ort_web_ver.replace("-rev", "")
or "-" in ort_react_native_ver.replace("-rev", "")
):
raise Exception('@latest build version cannot contain "-" (unless -rev)')
if tag == "rc":
print("Publishing @rc ...")
if not source_branch.startswith("refs/heads/rel-"):
raise Exception('@rc build must publish from source branch "refs/heads/rel-*"')
if "-rc" not in ort_web_ver:
raise Exception('@rc build version should contain "-rc"')
if "-rc" not in ort_react_native_ver:
raise Exception('@rc build version should contain "-rc"')
if (
"-" not in ort_common_ver.replace("-rev", "")
and "-" not in ort_web_ver.replace("-rev", "")
and "-" not in ort_react_native_ver.replace("-rev", "")
and "+" not in ort_common_ver.replace("-rev", "")
and "+" not in ort_web_ver.replace("-rev", "")
and "+" not in ort_react_native_ver.replace("-rev", "")
):
if tag != "latest" and tag != "": # noqa: PLC1901
raise Exception("default version without decorator can only be published in @latest tag")
| 7,872 | 45.585799 | 120 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/windows/post_code_coverage_to_dashboard.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# command line arguments
# --report_url=<string>
# --report_file=<string, local file path, TXT/JSON file>
# --commit_hash=<string, full git commit hash>
import argparse
import datetime
import json
import sys
# ingest from dataframe
import pandas
from azure.kusto.data import DataFormat, KustoConnectionStringBuilder
from azure.kusto.ingest import IngestionProperties, QueuedIngestClient, ReportLevel
def parse_arguments():
parser = argparse.ArgumentParser(description="ONNXRuntime test coverage report uploader for dashboard")
parser.add_argument("--report_url", type=str, help="URL to the LLVM json report")
parser.add_argument("--report_file", type=str, help="Path to the local JSON/TXT report", required=True)
parser.add_argument("--commit_hash", type=str, help="Full Git commit hash", required=True)
parser.add_argument("--branch", type=str, help="Source code branch")
parser.add_argument("--os", type=str, help="Build configuration:os")
parser.add_argument("--arch", type=str, help="Build configuration:arch")
parser.add_argument("--build_config", type=str, help="Build configuration: build variants")
return parser.parse_args()
def parse_txt_report(report_file):
data = {}
with open(report_file) as report:
for line in reversed(report.readlines()):
if "TOTAL" in line:
fields = line.strip().split()
data["lines_valid"] = int(fields[1])
data["lines_covered"] = int(fields[2])
data["coverage"] = float(fields[3].strip("%")) / 100
break
return data
def parse_json_report(report_file):
result = {}
with open(report_file) as json_file:
data = json.load(json_file)
linestat = data["data"][0]["totals"]["lines"]
result["coverage"] = float(linestat["percent"] / 100.0)
result["lines_covered"] = int(linestat["covered"])
result["lines_valid"] = int(linestat["count"])
return result
def write_to_db(coverage_data, args):
# connect to database
cluster = "https://ingest-onnxruntimedashboarddb.southcentralus.kusto.windows.net"
kcsb = KustoConnectionStringBuilder.with_az_cli_authentication(cluster)
# The authentication method will be taken from the chosen KustoConnectionStringBuilder.
client = QueuedIngestClient(kcsb)
fields = [
"UploadTime",
"CommitId",
"Coverage",
"LinesCovered",
"TotalLines",
"OS",
"Arch",
"BuildConfig",
"ReportURL",
"Branch",
]
now_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
rows = [
[
now_str,
args.commit_hash,
coverage_data["coverage"],
coverage_data["lines_covered"],
coverage_data["lines_valid"],
args.os.lower(),
args.arch.lower(),
args.build_config.lower(),
args.report_url.lower(),
args.branch.lower(),
]
]
ingestion_props = IngestionProperties(
database="powerbi",
table="test_coverage",
data_format=DataFormat.CSV,
report_level=ReportLevel.FailuresAndSuccesses,
)
df = pandas.DataFrame(data=rows, columns=fields)
client.ingest_from_dataframe(df, ingestion_properties=ingestion_props)
if __name__ == "__main__":
try:
args = parse_arguments()
if args.report_file.endswith(".json"):
coverage_data = parse_json_report(args.report_file)
elif args.report_file.endswith(".txt"):
coverage_data = parse_txt_report(args.report_file)
else:
raise ValueError("Only report extensions txt or json are accepted")
write_to_db(coverage_data, args)
except BaseException as e:
print(str(e))
sys.exit(1)
| 3,952 | 33.077586 | 107 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/windows/post_binary_sizes_to_dashboard.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import datetime
import os
import sys
# ingest from dataframe
import pandas
from azure.kusto.data import DataFormat, KustoConnectionStringBuilder
from azure.kusto.ingest import IngestionProperties, QueuedIngestClient, ReportLevel
def parse_arguments():
parser = argparse.ArgumentParser(description="ONNXRuntime binary size uploader for dashboard")
parser.add_argument("--commit_hash", help="Full Git commit hash")
parser.add_argument(
"--build_project",
default="Lotus",
choices=["Lotus", "onnxruntime"],
help="Lotus or onnxruntime build project, to construct the build URL",
)
parser.add_argument("--build_id", help="Build Id")
parser.add_argument("--size_data_file", help="Path to file that contains the binary size data")
parser.add_argument(
"--ignore_db_error", action="store_true", help="Ignore database errors while executing this script"
)
return parser.parse_args()
# Assumes size_data_file is a csv file with a header line, containing binary sizes and other attributes
# CSV fields are:
# os,arch,build_config,size
# No empty line or space between fields expected
def get_binary_sizes(size_data_file):
binary_size = []
with open(size_data_file) as f:
line = f.readline()
headers = line.strip().split(",")
while line:
line = f.readline()
if not line:
break
linedata = line.strip().split(",")
tablerow = {}
for i in range(0, len(headers)):
if headers[i] == "size":
tablerow[headers[i]] = int(linedata[i])
else:
tablerow[headers[i]] = linedata[i]
binary_size.append(tablerow)
return binary_size
def write_to_db(binary_size_data, args):
# connect to database
cluster = "https://ingest-onnxruntimedashboarddb.southcentralus.kusto.windows.net"
kcsb = KustoConnectionStringBuilder.with_az_cli_authentication(cluster)
# The authentication method will be taken from the chosen KustoConnectionStringBuilder.
client = QueuedIngestClient(kcsb)
fields = ["build_time", "build_id", "build_project", "commit_id", "os", "arch", "build_config", "size", "Branch"]
now_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
branch_name = os.environ.get("BUILD_SOURCEBRANCHNAME", "main")
rows = []
for row in binary_size_data:
rows.append(
[
now_str,
args.build_id,
args.build_project,
args.commit_hash,
row["os"],
row["arch"],
row["build_config"],
row["size"],
branch_name.lower(),
]
)
ingestion_props = IngestionProperties(
database="powerbi",
table="binary_size",
data_format=DataFormat.CSV,
report_level=ReportLevel.FailuresAndSuccesses,
)
df = pandas.DataFrame(data=rows, columns=fields)
client.ingest_from_dataframe(df, ingestion_properties=ingestion_props)
if __name__ == "__main__":
args = parse_arguments()
binary_size_data = get_binary_sizes(args.size_data_file)
try:
write_to_db(binary_size_data, args)
except BaseException as e:
print(str(e))
# If there is DB connection error, and we choose '--ignore_db_error'
# we can let the script exit clean in order not to fail the pipeline
if not args.ignore_db_error:
sys.exit(1)
| 3,690 | 33.820755 | 117 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/apple/test_ios_packages.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import contextlib
import os
import pathlib
import shutil
import subprocess
import tempfile
from c.assemble_c_pod_package import assemble_c_pod_package
from package_assembly_utils import PackageVariant, gen_file_from_template, get_ort_version
SCRIPT_PATH = pathlib.Path(__file__).resolve(strict=True)
REPO_DIR = SCRIPT_PATH.parents[4]
def _test_ios_packages(args):
# check if CocoaPods is installed
if shutil.which("pod") is None:
if args.fail_if_cocoapods_missing:
raise ValueError("CocoaPods is required for this test")
else:
print("CocoaPods is not installed, ignore this test")
return
# Now we need to create a zip file contains the framework and the podspec file, both of these 2 files
# should be under the c_framework_dir
c_framework_dir = args.c_framework_dir.resolve()
if not c_framework_dir.is_dir():
raise FileNotFoundError(f"c_framework_dir {c_framework_dir} is not a folder.")
has_framework = (c_framework_dir / "onnxruntime.framework").exists()
has_xcframework = (c_framework_dir / "onnxruntime.xcframework").exists()
if not has_framework and not has_xcframework:
raise FileNotFoundError(f"{c_framework_dir} does not have onnxruntime.framework/xcframework")
if has_framework and has_xcframework:
raise ValueError("Cannot proceed when both onnxruntime.framework and onnxruntime.xcframework exist")
framework_name = "onnxruntime.framework" if has_framework else "onnxruntime.xcframework"
# create a temp folder
with contextlib.ExitStack() as context_stack:
if args.test_project_stage_dir is None:
stage_dir = pathlib.Path(context_stack.enter_context(tempfile.TemporaryDirectory())).resolve()
else:
# If we specify the stage dir, then use it to create test project
stage_dir = args.test_project_stage_dir.resolve()
if os.path.exists(stage_dir):
shutil.rmtree(stage_dir)
os.makedirs(stage_dir)
# assemble the test project here
target_proj_path = stage_dir / "ios_package_test"
# copy the test project source files to target_proj_path
test_proj_path = pathlib.Path(REPO_DIR, "onnxruntime/test/platform/ios/ios_package_test")
shutil.copytree(test_proj_path, target_proj_path)
# assemble local pod files here
local_pods_dir = stage_dir / "local_pods"
# We will only publish xcframework, however, assembly of the xcframework is a post process
# and it cannot be done by CMake for now. See, https://gitlab.kitware.com/cmake/cmake/-/issues/21752
# For a single sysroot and arch built by build.py or cmake, we can only generate framework
# We still need a way to test it. framework_dir and public_headers_dir have different values when testing a
# framework and a xcframework.
framework_dir = args.c_framework_dir / framework_name
public_headers_dir = framework_dir / "Headers" if has_framework else args.c_framework_dir / "Headers"
pod_name, podspec = assemble_c_pod_package(
staging_dir=local_pods_dir,
pod_version=get_ort_version(),
framework_info_file=args.framework_info_file,
public_headers_dir=public_headers_dir,
framework_dir=framework_dir,
package_variant=PackageVariant[args.variant],
)
# move podspec out to target_proj_path first
podspec = shutil.move(podspec, target_proj_path / podspec.name)
# create a zip file contains the framework
zip_file_path = local_pods_dir / f"{pod_name}.zip"
# shutil.make_archive require target file as full path without extension
shutil.make_archive(zip_file_path.with_suffix(""), "zip", root_dir=local_pods_dir)
# update the podspec to point to the local framework zip file
with open(podspec) as file:
file_data = file.read()
file_data = file_data.replace("file:///http_source_placeholder", f"file:///{zip_file_path}")
with open(podspec, "w") as file:
file.write(file_data)
# generate Podfile to point to pod
gen_file_from_template(
target_proj_path / "Podfile.template",
target_proj_path / "Podfile",
{"C_POD_NAME": pod_name, "C_POD_PODSPEC": f"./{podspec.name}"},
)
# clean the Cocoapods cache first, in case the same pod was cached in previous runs
subprocess.run(["pod", "cache", "clean", "--all"], shell=False, check=True, cwd=target_proj_path)
# install pods
subprocess.run(["pod", "install"], shell=False, check=True, cwd=target_proj_path)
# run the tests
if not args.prepare_test_project_only:
simulator_device_name = subprocess.check_output(
["bash", str(REPO_DIR / "tools" / "ci_build" / "github" / "apple" / "get_simulator_device_name.sh")],
text=True,
).strip()
subprocess.run(
[
"xcrun",
"xcodebuild",
"test",
"-workspace",
"./ios_package_test.xcworkspace",
"-scheme",
"ios_package_test",
"-destination",
f"platform=iOS Simulator,OS=latest,name={simulator_device_name}",
],
shell=False,
check=True,
cwd=target_proj_path,
)
def parse_args():
parser = argparse.ArgumentParser(
os.path.basename(__file__), description="Test iOS framework using CocoaPods package."
)
parser.add_argument(
"--fail_if_cocoapods_missing",
action="store_true",
help="This script will fail if CocoaPods is not installed, "
"will not throw error unless fail_if_cocoapod_missing is set.",
)
parser.add_argument(
"--framework_info_file",
type=pathlib.Path,
required=True,
help="Path to the framework_info.json file containing additional values for the podspec. "
"This file should be generated by CMake in the build directory.",
)
parser.add_argument(
"--c_framework_dir", type=pathlib.Path, required=True, help="Provide the parent directory for C/C++ framework"
)
parser.add_argument(
"--variant",
choices=PackageVariant.all_variant_names(),
default=PackageVariant.Test.name,
help="Pod package variant.",
)
parser.add_argument(
"--test_project_stage_dir",
type=pathlib.Path,
help="The stage dir for the test project, if not specified, will use a temporary path",
)
parser.add_argument(
"--prepare_test_project_only",
action="store_true",
help="Prepare the test project only, without running the tests",
)
return parser.parse_args()
def main():
args = parse_args()
_test_ios_packages(args)
if __name__ == "__main__":
main()
| 7,240 | 36.518135 | 118 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/apple/build_and_assemble_ios_pods.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import logging
import pathlib
import shutil
import sys
import tempfile
from c.assemble_c_pod_package import assemble_c_pod_package
from objectivec.assemble_objc_pod_package import assemble_objc_pod_package
from package_assembly_utils import PackageVariant, get_ort_version
SCRIPT_PATH = pathlib.Path(__file__).resolve()
SCRIPT_DIR = SCRIPT_PATH.parent
REPO_DIR = SCRIPT_PATH.parents[4]
logging.basicConfig(format="%(asctime)s %(name)s [%(levelname)s] - %(message)s", level=logging.DEBUG)
log = logging.getLogger(SCRIPT_PATH.stem)
def parse_args():
parser = argparse.ArgumentParser(
description="Builds an iOS framework and uses it to assemble iOS pod package files.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--build-dir",
type=pathlib.Path,
default=REPO_DIR / "build" / "ios_framework",
help="The build directory. This will contain the iOS framework build output.",
)
parser.add_argument(
"--staging-dir",
type=pathlib.Path,
default=REPO_DIR / "build" / "ios_pod_staging",
help="The staging directory. This will contain the iOS pod package files. "
"The pod package files do not have dependencies on files in the build directory.",
)
parser.add_argument(
"--pod-version",
default=f"{get_ort_version()}-local",
help="The version string of the pod. The same version is used for all pods.",
)
parser.add_argument(
"--variant",
choices=PackageVariant.release_variant_names(),
default=PackageVariant.Mobile.name,
help="Pod package variant.",
)
parser.add_argument("--test", action="store_true", help="Run tests on the framework and pod package files.")
build_framework_group = parser.add_argument_group(
title="iOS framework build arguments",
description="See the corresponding arguments in build_ios_framework.py for details.",
)
build_framework_group.add_argument("--include-ops-by-config")
build_framework_group.add_argument(
"--build-settings-file", required=True, help="The positional argument of build_ios_framework.py."
)
build_framework_group.add_argument(
"-b",
"--build-ios-framework-arg",
action="append",
dest="build_ios_framework_extra_args",
default=[],
help="Pass an argument through to build_ios_framework.py. This may be specified multiple times.",
)
args = parser.parse_args()
return args
def run(arg_list, cwd=None):
import os
import shlex
import subprocess
log.info(
"Running subprocess in '{}'\n {}".format(cwd or os.getcwd(), " ".join([shlex.quote(arg) for arg in arg_list]))
)
return subprocess.run(arg_list, check=True, cwd=cwd)
def main():
args = parse_args()
build_dir = args.build_dir.resolve()
staging_dir = args.staging_dir.resolve()
# build framework
package_variant = PackageVariant[args.variant]
framework_info_file = build_dir / "framework_info.json"
log.info("Building iOS framework.")
build_ios_framework_args = [
sys.executable,
str(SCRIPT_DIR / "build_ios_framework.py"),
*args.build_ios_framework_extra_args,
]
if args.include_ops_by_config is not None:
build_ios_framework_args += ["--include_ops_by_config", args.include_ops_by_config]
build_ios_framework_args += ["--build_dir", str(build_dir), args.build_settings_file]
run(build_ios_framework_args)
if args.test:
test_ios_packages_args = [
sys.executable,
str(SCRIPT_DIR / "test_ios_packages.py"),
"--fail_if_cocoapods_missing",
"--framework_info_file",
str(framework_info_file),
"--c_framework_dir",
str(build_dir / "framework_out"),
"--variant",
package_variant.name,
]
run(test_ios_packages_args)
# assemble pods and then move them to their target locations (staging_dir/<pod_name>)
staging_dir.mkdir(parents=True, exist_ok=True)
with tempfile.TemporaryDirectory(dir=staging_dir) as pod_assembly_dir_name:
pod_assembly_dir = pathlib.Path(pod_assembly_dir_name)
log.info("Assembling C/C++ pod.")
c_pod_staging_dir = pod_assembly_dir / "c_pod"
c_pod_name, c_pod_podspec = assemble_c_pod_package(
staging_dir=c_pod_staging_dir,
pod_version=args.pod_version,
framework_info_file=framework_info_file,
framework_dir=build_dir / "framework_out" / "onnxruntime.xcframework",
public_headers_dir=build_dir / "framework_out" / "Headers",
package_variant=package_variant,
)
if args.test:
test_c_pod_args = ["pod", "lib", "lint", "--verbose"]
run(test_c_pod_args, cwd=c_pod_staging_dir)
log.info("Assembling Objective-C pod.")
objc_pod_staging_dir = pod_assembly_dir / "objc_pod"
objc_pod_name, objc_pod_podspec = assemble_objc_pod_package(
staging_dir=objc_pod_staging_dir,
pod_version=args.pod_version,
framework_info_file=framework_info_file,
package_variant=package_variant,
)
if args.test:
test_objc_pod_args = ["pod", "lib", "lint", "--verbose", f"--include-podspecs={c_pod_podspec}"]
run(test_objc_pod_args, cwd=objc_pod_staging_dir)
def move_dir(src, dst):
if dst.is_dir():
shutil.rmtree(dst)
shutil.move(src, dst)
move_dir(c_pod_staging_dir, staging_dir / c_pod_name)
move_dir(objc_pod_staging_dir, staging_dir / objc_pod_name)
log.info(f"Successfully assembled iOS pods at '{staging_dir}'.")
if __name__ == "__main__":
main()
| 6,017 | 31.354839 | 119 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/apple/build_ios_framework.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import glob
import json
import os
import pathlib
import shutil
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..", "..", ".."))
BUILD_PY = os.path.join(REPO_DIR, "tools", "ci_build", "build.py")
# We by default will build below 3 archs
DEFAULT_BUILD_OSX_ARCHS = {
"iphoneos": ["arm64"],
"iphonesimulator": ["arm64", "x86_64"],
}
def _parse_build_settings(args):
with open(args.build_settings_file.resolve()) as f:
build_settings_data = json.load(f)
build_settings = {}
build_settings["build_osx_archs"] = build_settings_data.get("build_osx_archs", DEFAULT_BUILD_OSX_ARCHS)
build_params = []
if "build_params" in build_settings_data:
build_params += build_settings_data["build_params"]
else:
raise ValueError("build_params is required in the build config file")
build_settings["build_params"] = build_params
return build_settings
# Build fat framework for all archs of a single sysroot
# For example, arm64 and x86_64 for iphonesimulator
def _build_for_ios_sysroot(
build_config, intermediates_dir, base_build_command, sysroot, archs, build_dynamic_framework
):
# paths of the onnxruntime libraries for different archs
ort_libs = []
info_plist_path = ""
# Build binary for each arch, one by one
for current_arch in archs:
build_dir_current_arch = os.path.join(intermediates_dir, sysroot + "_" + current_arch)
build_command = [
*base_build_command,
"--ios_sysroot=" + sysroot,
"--osx_arch=" + current_arch,
"--build_dir=" + build_dir_current_arch,
]
# the actual build process for current arch
subprocess.run(build_command, shell=False, check=True, cwd=REPO_DIR)
# get the compiled lib path
framework_dir = os.path.join(
build_dir_current_arch,
build_config,
build_config + "-" + sysroot,
"onnxruntime.framework"
if build_dynamic_framework
else os.path.join("static_framework", "onnxruntime.framework"),
)
ort_libs.append(os.path.join(framework_dir, "onnxruntime"))
# We only need to copy Info.plist, framework_info.json, and headers once since they are the same
if not info_plist_path:
info_plist_path = os.path.join(build_dir_current_arch, build_config, "Info.plist")
framework_info_path = os.path.join(build_dir_current_arch, build_config, "framework_info.json")
headers = glob.glob(os.path.join(framework_dir, "Headers", "*.h"))
# manually create the fat framework
framework_dir = os.path.join(intermediates_dir, "frameworks", sysroot, "onnxruntime.framework")
# remove the existing framework if any
if os.path.exists(framework_dir):
shutil.rmtree(framework_dir)
pathlib.Path(framework_dir).mkdir(parents=True, exist_ok=True)
# copy the Info.plist, framework_info.json, and header files
shutil.copy(info_plist_path, framework_dir)
shutil.copy(framework_info_path, os.path.dirname(framework_dir))
header_dir = os.path.join(framework_dir, "Headers")
pathlib.Path(header_dir).mkdir(parents=True, exist_ok=True)
for _header in headers:
shutil.copy(_header, header_dir)
# use lipo to create a fat ort library
lipo_command = ["lipo", "-create"]
lipo_command += ort_libs
lipo_command += ["-output", os.path.join(framework_dir, "onnxruntime")]
subprocess.run(lipo_command, shell=False, check=True)
return framework_dir
def _build_package(args):
build_settings = _parse_build_settings(args)
build_dir = os.path.abspath(args.build_dir)
# Temp dirs to hold building results
intermediates_dir = os.path.join(build_dir, "intermediates")
build_config = args.config
base_build_command = [sys.executable, BUILD_PY] + build_settings["build_params"] + ["--config=" + build_config]
if args.include_ops_by_config is not None:
base_build_command += ["--include_ops_by_config=" + str(args.include_ops_by_config.resolve())]
if args.path_to_protoc_exe is not None:
base_build_command += ["--path_to_protoc_exe=" + str(args.path_to_protoc_exe.resolve())]
# build framework for individual sysroot
framework_dirs = []
framework_info_path = ""
public_headers_path = ""
for sysroot in build_settings["build_osx_archs"]:
framework_dir = _build_for_ios_sysroot(
build_config,
intermediates_dir,
base_build_command,
sysroot,
build_settings["build_osx_archs"][sysroot],
args.build_dynamic_framework,
)
framework_dirs.append(framework_dir)
# podspec and headers for each sysroot are the same, pick one of them
if not framework_info_path:
framework_info_path = os.path.join(os.path.dirname(framework_dir), "framework_info.json")
public_headers_path = os.path.join(os.path.dirname(framework_dir), "onnxruntime.framework", "Headers")
# create the folder for xcframework and copy the LICENSE and podspec file
xcframework_dir = os.path.join(build_dir, "framework_out")
pathlib.Path(xcframework_dir).mkdir(parents=True, exist_ok=True)
shutil.copy(os.path.join(REPO_DIR, "LICENSE"), xcframework_dir)
shutil.copytree(public_headers_path, os.path.join(xcframework_dir, "Headers"), dirs_exist_ok=True)
shutil.copy(framework_info_path, build_dir)
# remove existing xcframework if any
xcframework_path = os.path.join(xcframework_dir, "onnxruntime.xcframework")
if os.path.exists(xcframework_path):
shutil.rmtree(xcframework_path)
# Assemble the final xcframework
build_xcframework_cmd = ["xcrun", "xcodebuild", "-create-xcframework", "-output", xcframework_path]
for framework_dir in framework_dirs:
build_xcframework_cmd.extend(["-framework", framework_dir])
subprocess.run(build_xcframework_cmd, shell=False, check=True, cwd=REPO_DIR)
def parse_args():
parser = argparse.ArgumentParser(
os.path.basename(__file__),
description="""Create iOS framework and podspec for one or more osx_archs (xcframework)
and building properties specified in the given build config file, see
tools/ci_build/github/apple/default_mobile_ios_framework_build_settings.json for details.
The output of the final xcframework and podspec can be found under [build_dir]/framework_out.
Please note, this building script will only work on macOS.
""",
)
parser.add_argument(
"--build_dir",
type=pathlib.Path,
default=os.path.join(REPO_DIR, "build/iOS_framework"),
help="Provide the root directory for build output",
)
parser.add_argument(
"--include_ops_by_config",
type=pathlib.Path,
help="Include ops from config file. See /docs/Reduced_Operator_Kernel_build.md for more information.",
)
parser.add_argument(
"--config",
type=str,
default="Release",
choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
help="Configuration to build.",
)
parser.add_argument(
"--build_dynamic_framework",
action="store_true",
help="Build Dynamic Framework (default is build static framework).",
)
parser.add_argument(
"build_settings_file", type=pathlib.Path, help="Provide the file contains settings for building iOS framework"
)
parser.add_argument("--path_to_protoc_exe", type=pathlib.Path, help="Path to protoc exe.")
args = parser.parse_args()
if not args.build_settings_file.resolve().is_file():
raise FileNotFoundError(f"Build config file {args.build_settings_file.resolve()} is not a file.")
if args.include_ops_by_config is not None:
include_ops_by_config_file = args.include_ops_by_config.resolve()
if not include_ops_by_config_file.is_file():
raise FileNotFoundError(f"Include ops config file {include_ops_by_config_file} is not a file.")
return args
def main():
args = parse_args()
_build_package(args)
if __name__ == "__main__":
main()
| 8,434 | 36.65625 | 118 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/apple/__init__.py | 0 | 0 | 0 | py |
|
onnxruntime | onnxruntime-main/tools/ci_build/github/apple/package_assembly_utils.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import enum
import json
import os
import pathlib
import re
import shutil
from typing import Dict, List
_script_dir = pathlib.Path(__file__).parent.resolve(strict=True)
repo_root = _script_dir.parents[3]
class PackageVariant(enum.Enum):
Full = 0 # full ORT build with all opsets, ops, and types
Mobile = 1 # minimal ORT build with reduced ops
Training = 2 # full ORT build with all opsets, ops, and types, plus training APIs
Test = -1 # for testing purposes only
@classmethod
def release_variant_names(cls):
return [v.name for v in cls if v.value >= 0]
@classmethod
def all_variant_names(cls):
return [v.name for v in cls]
_template_variable_pattern = re.compile(r"@(\w+)@") # match "@var@"
def gen_file_from_template(
template_file: pathlib.Path, output_file: pathlib.Path, variable_substitutions: Dict[str, str], strict: bool = True
):
"""
Generates a file from a template file.
The template file may contain template variables that will be substituted
with the provided values in the generated output file.
In the template file, template variable names are delimited by "@"'s,
e.g., "@var@".
:param template_file The template file path.
:param output_file The generated output file path.
:param variable_substitutions The mapping from template variable name to value.
:param strict Whether to require the set of template variable names in the file and the keys of
`variable_substitutions` to be equal.
"""
with open(template_file) as template:
content = template.read()
variables_in_file = set()
def replace_template_variable(match):
variable_name = match.group(1)
variables_in_file.add(variable_name)
return variable_substitutions.get(variable_name, match.group(0))
content = _template_variable_pattern.sub(replace_template_variable, content)
if strict and variables_in_file != variable_substitutions.keys():
variables_in_substitutions = set(variable_substitutions.keys())
raise ValueError(
f"Template file variables and substitution variables do not match. "
f"Only in template file: {sorted(variables_in_file - variables_in_substitutions)}. "
f"Only in substitutions: {sorted(variables_in_substitutions - variables_in_file)}."
)
with open(output_file, mode="w") as output:
output.write(content)
def filter_files(all_file_patterns: List[str], excluded_file_patterns: List[str]):
"""
Filters file paths based on inclusion and exclusion patterns
:param all_file_patterns The list of file paths to filter.
:param excluded_file_patterns The list of exclusion patterns.
:return The filtered list of file paths
"""
# get all files matching the patterns in all_file_patterns
all_files = [str(path.relative_to(repo_root)) for pattern in all_file_patterns for path in repo_root.glob(pattern)]
# get all files matching the patterns in excluded_file_patterns
exclude_files = [
str(path.relative_to(repo_root)) for pattern in excluded_file_patterns for path in repo_root.glob(pattern)
]
# return the difference
return list(set(all_files) - set(exclude_files))
def copy_repo_relative_to_dir(patterns: List[str], dest_dir: pathlib.Path):
"""
Copies file paths relative to the repo root to a directory.
The given paths or path patterns are relative to the repo root, and the
repo root-relative intermediate directory structure is maintained.
:param patterns The paths or path patterns relative to the repo root.
:param dest_dir The destination directory.
"""
paths = [path for pattern in patterns for path in repo_root.glob(pattern)]
for path in paths:
repo_relative_path = path.relative_to(repo_root)
dst_path = dest_dir / repo_relative_path
os.makedirs(dst_path.parent, exist_ok=True)
shutil.copy(path, dst_path)
def load_json_config(json_config_file: pathlib.Path):
"""
Loads configuration info from a JSON file.
:param json_config_file The JSON configuration file path.
:return The configuration info values.
"""
with open(json_config_file) as config:
return json.load(config)
def get_ort_version():
"""
Gets the ONNX Runtime version string from the repo.
:return The ONNX Runtime version string.
"""
with open(repo_root / "VERSION_NUMBER") as version_file:
return version_file.read().strip()
| 4,642 | 34.442748 | 119 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/apple/objectivec/assemble_objc_pod_package.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import pathlib
import sys
_script_dir = pathlib.Path(__file__).parent.resolve(strict=True)
sys.path.append(str(_script_dir.parent))
from c.assemble_c_pod_package import get_pod_config_file as get_c_pod_config_file # noqa: E402
from package_assembly_utils import ( # noqa: E402
PackageVariant,
copy_repo_relative_to_dir,
filter_files,
gen_file_from_template,
load_json_config,
)
# these variables contain paths or path patterns that are relative to the repo root
# the license file
license_file = "LICENSE"
# include directories for compiling the pod itself
include_dirs = [
"objectivec",
]
all_objc_files = {
"source_files": [
"objectivec/include/*.h",
"objectivec/*.h",
"objectivec/*.m",
"objectivec/*.mm",
],
"public_header_files": [
"objectivec/include/*.h",
],
"test_source_files": [
"objectivec/test/*.h",
"objectivec/test/*.m",
"objectivec/test/*.mm",
],
"test_resource_files": [
"objectivec/test/testdata/*.ort",
"onnxruntime/test/testdata/training_api/*",
],
}
training_only_objc_files = {
"source_files": [
"objectivec/include/onnxruntime_training.h",
"objectivec/include/ort_checkpoint.h",
"objectivec/include/ort_training_session.h",
"objectivec/ort_checkpoint.mm",
"objectivec/ort_checkpoint_internal.h",
"objectivec/ort_training_session_internal.h",
"objectivec/ort_training_session.mm",
],
"public_header_files": [
"objectivec/include/ort_checkpoint.h",
"objectivec/include/ort_training_session.h",
"objectivec/include/onnxruntime_training.h",
],
"test_source_files": [
"objectivec/test/ort_training_session_test.mm",
"objectivec/test/ort_checkpoint_test.mm",
"objectivec/test/ort_training_utils_test.mm",
],
"test_resource_files": [
"onnxruntime/test/testdata/training_api/*",
],
}
def get_pod_files(package_variant: PackageVariant):
"""
Gets the source and header files for the given package variant.
"""
if package_variant == PackageVariant.Training:
return all_objc_files
else:
# return files that are in pod_files but not in training_only_objc_files
filtered_pod_files = {}
for key in all_objc_files:
filtered_pod_files[key] = filter_files(all_objc_files[key], training_only_objc_files[key])
return filtered_pod_files
def get_pod_config_file(package_variant: PackageVariant):
"""
Gets the pod configuration file path for the given package variant.
"""
if package_variant == PackageVariant.Full:
return _script_dir / "onnxruntime-objc.config.json"
elif package_variant == PackageVariant.Mobile:
return _script_dir / "onnxruntime-mobile-objc.config.json"
elif package_variant == PackageVariant.Training:
return _script_dir / "onnxruntime-training-objc.config.json"
else:
raise ValueError(f"Unhandled package variant: {package_variant}")
def assemble_objc_pod_package(
staging_dir: pathlib.Path, pod_version: str, framework_info_file: pathlib.Path, package_variant: PackageVariant
):
"""
Assembles the files for the Objective-C pod package in a staging directory.
:param staging_dir Path to the staging directory for the Objective-C pod files.
:param pod_version Objective-C pod version.
:param framework_info_file Path to the framework_info.json file containing additional values for the podspec.
:param package_variant The pod package variant.
:return Tuple of (package name, path to the podspec file).
"""
staging_dir = staging_dir.resolve()
framework_info_file = framework_info_file.resolve(strict=True)
framework_info = load_json_config(framework_info_file)
pod_config = load_json_config(get_pod_config_file(package_variant))
c_pod_config = load_json_config(get_c_pod_config_file(package_variant))
pod_name = pod_config["name"]
print(f"Assembling files in staging directory: {staging_dir}")
if staging_dir.exists():
print("Warning: staging directory already exists", file=sys.stderr)
pod_files = get_pod_files(package_variant)
# copy the necessary files to the staging directory
copy_repo_relative_to_dir(
[license_file, *pod_files["source_files"], *pod_files["test_source_files"], *pod_files["test_resource_files"]],
staging_dir,
)
# generate the podspec file from the template
def path_patterns_as_variable_value(patterns: list[str]):
return ", ".join([f'"{pattern}"' for pattern in patterns])
variable_substitutions = {
"C_POD_NAME": c_pod_config["name"],
"DESCRIPTION": pod_config["description"],
"INCLUDE_DIR_LIST": path_patterns_as_variable_value(include_dirs),
"IOS_DEPLOYMENT_TARGET": framework_info["IOS_DEPLOYMENT_TARGET"],
"LICENSE_FILE": license_file,
"NAME": pod_name,
"PUBLIC_HEADER_FILE_LIST": path_patterns_as_variable_value(pod_files["public_header_files"]),
"SOURCE_FILE_LIST": path_patterns_as_variable_value(pod_files["source_files"]),
"SUMMARY": pod_config["summary"],
"TEST_RESOURCE_FILE_LIST": path_patterns_as_variable_value(pod_files["test_resource_files"]),
"TEST_SOURCE_FILE_LIST": path_patterns_as_variable_value(pod_files["test_source_files"]),
"VERSION": pod_version,
}
podspec_template = _script_dir / "objc.podspec.template"
podspec = staging_dir / f"{pod_name}.podspec"
gen_file_from_template(podspec_template, podspec, variable_substitutions)
return pod_name, podspec
def parse_args():
parser = argparse.ArgumentParser(
description="""
Assembles the files for the Objective-C pod package in a staging directory.
This directory can be validated (e.g., with `pod lib lint`) and then zipped to create a package for release.
"""
)
parser.add_argument(
"--staging-dir",
type=pathlib.Path,
default=pathlib.Path("./onnxruntime-mobile-objc-staging"),
help="Path to the staging directory for the Objective-C pod files.",
)
parser.add_argument("--pod-version", required=True, help="Objective-C pod version.")
parser.add_argument(
"--framework-info-file",
type=pathlib.Path,
required=True,
help="Path to the framework_info.json file containing additional values for the podspec. "
"This file should be generated by CMake in the build directory.",
)
parser.add_argument(
"--variant", choices=PackageVariant.release_variant_names(), required=True, help="Pod package variant."
)
return parser.parse_args()
def main():
args = parse_args()
assemble_objc_pod_package(
staging_dir=args.staging_dir,
pod_version=args.pod_version,
framework_info_file=args.framework_info_file,
package_variant=PackageVariant[args.variant],
)
return 0
if __name__ == "__main__":
sys.exit(main())
| 7,242 | 33.004695 | 119 | py |
onnxruntime | onnxruntime-main/tools/ci_build/github/apple/objectivec/__init__.py | 0 | 0 | 0 | py |