repo
stringlengths 2
91
| file
stringlengths 14
211
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
1.36M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Reflect | Reflect-master/__init__.py | 0 | 0 | 0 | py |
|
Reflect | Reflect-master/util/text_util.py | from collections import Counter
import csv
import subprocess
from util import inflect
import pandas as pd
from statsmodels.stats.proportion import proportion_confint
infl_eng = inflect.engine()
dependency_fields = ['sentence', 'orig_sentence', 'pos_sentence',
'subj', 'verb', 'subj_pos', 'has_rel', 'has_nsubj',
'verb_pos', 'subj_index', 'verb_index', 'n_intervening',
'last_intervening', 'n_diff_intervening', 'distance',
'max_depth', 'all_nouns', 'nouns_up_to_verb']
def deps_to_tsv(deps, outfile):
writer = csv.writer(open(outfile, 'w'), delimiter='\t')
writer.writerow(dependency_fields)
for dep in deps:
writer.writerow([dep[key] for key in dependency_fields])
def deps_from_tsv(infile, limit=None):
res = []
for i, d in enumerate(csv.DictReader(open(infile), delimiter='\t')):
if limit is not None and i >= limit:
break
res.append({x: int(y) if y.isdigit() else y for x, y in d.items()})
return res
def zread(fname):
p = subprocess.Popen(['gunzip', '-c', fname], stdout=subprocess.PIPE)
for line in p.stdout:
yield line
p.wait()
def tokenize_blanks(fh):
sent = []
for line in fh:
line = line.strip().split()
if not line:
if sent:
yield sent
sent = []
else:
sent.append(line)
yield sent
def create_freq_dict(infile, outfile, minfreq=50):
d = Counter()
for i, line in enumerate(zread(infile)):
stripped = line.strip()
if stripped:
s = stripped.split()
d[s[1], s[3]] += 1
if i % 1000000 == 0:
print(i)
outfile = file(outfile, 'w')
for (w, pos), count in d.iteritems():
if count > minfreq:
outfile.write('%s\t%s\t%d\n' % (w, pos, count))
def confint(row):
n_errors = int(row['errorprob'] * row['count'])
return proportion_confint(n_errors, row['count'])
def add_confints(df):
df['minconf'] = df.apply(lambda row: confint(row)[0], axis=1)
df['maxconf'] = df.apply(lambda row: confint(row)[1], axis=1)
def get_grouping(df, grouping_vars):
funcs = {'correct': {'accuracy': 'mean', 'count': 'count'},
'distance': {'mean_distance': 'mean'}}
x = df.groupby(grouping_vars).aggregate(funcs)
x.columns = x.columns.droplevel()
x = x.reset_index()
x['errorprob'] = 1 - x['accuracy']
add_confints(x)
return x
def gen_inflect_from_vocab(vocab_file, freq_threshold=1000):
vbp = {}
vbz = {}
nn = {}
nns = {}
from_pos = {'NNS': nns, 'NN': nn, 'VBP': vbp, 'VBZ': vbz}
for line in open(vocab_file):
if line.startswith(' '): # empty string token
continue
word, pos, count = line.strip().split()
count = int(count)
if len(word) > 1 and pos in from_pos and count >= freq_threshold:
from_pos[pos][word] = count
verb_infl = {'VBP': 'VBZ', 'VBZ': 'VBP'}
for word, count in vbz.items():
candidate = infl_eng.plural_verb(word)
if candidate in vbp:
verb_infl[candidate] = word
verb_infl[word] = candidate
noun_infl = {'NN': 'NNS', 'NNS': 'NN'}
for word, count in nn.items():
candidate = infl_eng.plural_noun(word)
if candidate in nns:
noun_infl[candidate] = word
noun_infl[word] = candidate
return verb_infl, noun_infl
def annotate_relpron(df):
pd.options.mode.chained_assignment = None
def f(x):
blacklist = set(['NNP', 'PRP'])
relprons = set(['WDT', 'WP', 'WRB', 'WP$'])
vi = x['verb_index'] - 1
words_in_dep = x['orig_sentence'].split()[x['subj_index']:vi]
pos_in_dep = x['pos_sentence'].split()[x['subj_index']:vi]
first_is_that = words_in_dep[:1] == ['that']
return (bool(blacklist & set(pos_in_dep)),
bool(relprons & set(pos_in_dep[:2])) | first_is_that,
bool(relprons & set(pos_in_dep)) | first_is_that)
df['blacklisted'], df['has_early_relpron'], df['has_relpron'] = \
zip(*df.apply(f, axis=1))
df['has_early_relpron'] = True
def g(x):
if x['has_rel'] and x['has_relpron'] and x['has_early_relpron']:
return 'With relativizer'
elif x['has_rel'] and not x['has_relpron']:
return 'Without relativizer'
elif not x['has_rel']:
if x['has_relpron']:
return 'Error'
else:
return 'No relative clause'
else:
return 'Error'
df['condition'] = df.apply(g, axis=1)
return df | 4,707 | 30.178808 | 77 | py |
Reflect | Reflect-master/util/constants.py | pad = '<pad>'
unk = '<unk>'
bos = '<bos>'
eos = '<eos>'
pad_idx = 0
unk_idx = 1
bos_idx = 2
eos_idx = 3
all = [pad, unk, bos, eos] | 132 | 11.090909 | 26 | py |
Reflect | Reflect-master/util/model_configs.py | class ModelConfig(object):
def __init__(self,
hidden_dim=1024,
embedding_dim=512,
input_dim=None,
output_dim=None,
depth=1,
hidden_dropout_rate=0.5,
input_dropout_rate=0.2,
initializer_range=None,
filters=[32],
maxout_size=[32],
kernel_size=[(3,3)],
pool_size=[(2,2)],
proj_depth=1,
routings=3,
fc_dim=[],
**kwargs):
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.input_dim = input_dim
self.output_dim = output_dim
self.depth = depth
self.proj_depth = proj_depth
self.fc_dim = fc_dim
self.hidden_dropout_rate = hidden_dropout_rate
self.input_dropout_rate = input_dropout_rate
self.initializer_range = initializer_range
self.kernel_size = kernel_size
self.filters = filters
self.maxout_size = maxout_size
self.pool_size = pool_size
self.output_hidden_states = kwargs.pop('output_hidden_states', False)
self.output_embeddings = kwargs.pop('output_embeddings', False)
self.routings = routings
class GPT2Config(object):
"""Configuration class to store the configuration of a `GPT2Model`.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
def __init__(
self,
vocab_size,
n_positions=1024,
n_ctx=1024,
embedding_dim=512,
depth=6,
n_head=8,
resid_pdrop=0.1,
embd_pdrop=0.2,
attn_pdrop=0.2,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
num_labels=1,
summary_type='cls_index',
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
**kwargs
):
"""Constructs GPT2Config.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
n_positions: Number of positional embeddings.
n_ctx: Size of the causal mask (usually same as n_positions).
n_embd: Dimensionality of the embeddings and hidden states.
n_layer: Number of hidden layers in the Transformer encoder.
n_head: Number of attention heads for each attention layer in
the Transformer encoder.
layer_norm_epsilon: epsilon to use in the layer norm layers
resid_pdrop: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attn_pdrop: The dropout ratio for the attention
probabilities.
embd_pdrop: The dropout ratio for the embeddings.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.n_ctx = n_ctx
self.n_positions = n_positions
self.embedding_dim = embedding_dim
self.depth = depth
self.n_head = n_head
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.num_labels = num_labels
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
self.output_attentions = kwargs.pop('output_attentions', False)
self.output_hidden_states = kwargs.pop('output_hidden_states', False)
self.output_embeddings = kwargs.pop('output_embeddings', False)
@property
def max_position_embeddings(self):
return self.n_positions
@property
def hidden_size(self):
return self.embedding_dim
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.depth
class CapsConfig(object):
def __init__(self,
output_dim=10,
A=32,
B=32,
C=32,
D=32,
epsilon=1e-9,
l2=0.0000002,
final_lambda=0.01,
iter_routing=2):
self.output_dim = output_dim
self.A = A
self.B = B
self.C = C
self.D = D
self.epsilon = epsilon
self.l2 = l2
self.final_lambda = final_lambda
self.iter_routing = iter_routing
class ResnetConfig(object):
def __init__(self, **kwargs):
self.output_dim =kwargs.get('output_dim', 1)
self.hidden_dim = kwargs.get('hidden_dim', 512)
self.pool_size = kwargs.get('pool_size', 3)
self.filters = kwargs.get('filters', [32, 32, 32, 32])
self.kernel_size = kwargs.get('kernel_size', [(3, 3), (3, 3), (3, 3), (3, 3)])
self.hidden_dropout_rate = kwargs.get('hidden_dropout_rate', 0.2)
self.input_dropout_rate = kwargs.get('input_dropout_rate', 0.0)
self.num_res_net_blocks = kwargs.get('num_res_net_blocks', 2)
small_gpt = {
'embedding_dim': 128,
'resid_pdrop': 0.1,
'embd_pdrop': 0.1,
'attn_pdrop': 0.1
}
small_gpt_v3 = {
'embedding_dim': 128,
'resid_pdrop': 0.1,
'embd_pdrop': 0.2,
'attn_pdrop': 0.2
}
small_gpt_v4 = {
'embedding_dim': 128,
'resid_pdrop': 0.2,
'embd_pdrop': 0.2,
'attn_pdrop': 0.2
}
small_gpt_v5 = {
'embedding_dim': 128,
'resid_pdrop': 0.3,
'embd_pdrop': 0.2,
'attn_pdrop': 0.3
}
small_gpt_v6 = {
'embedding_dim': 128,
'resid_pdrop': 0.3,
'embd_pdrop': 0.2,
'attn_pdrop': 0.5,
'initializer_range': 0.05
}
small_gpt_v7 = {
'embedding_dim': 128,
'resid_pdrop': 0.5,
'embd_pdrop': 0.2,
'attn_pdrop': 0.5,
'initializer_range': 0.05
}
small_gpt_v8 = {
'embedding_dim': 128,
'resid_pdrop': 0.4,
'embd_pdrop': 0.2,
'attn_pdrop': 0.5,
'initializer_range': 0.01
}
small_gpt_v9 = {
'embedding_dim': 128,
'resid_pdrop': 0.4,
'embd_pdrop': 0.2,
'attn_pdrop': 0.6,
'initializer_range': 0.05
}
small_ugpt_v9 = {
'embedding_dim': 256,
'resid_pdrop': 0.4,
'embd_pdrop': 0.2,
'attn_pdrop': 0.6,
'initializer_range': 0.05
}
short_gpt_v9 = {
'embedding_dim': 128,
'resid_pdrop': 0.4,
'embd_pdrop': 0.2,
'attn_pdrop': 0.6,
'initializer_range': 0.05,
'depth': 4
}
big_gpt_v2 = {
'embedding_dim': 256,
'resid_pdrop': 0.2,
'embd_pdrop': 0.2,
'attn_pdrop': 0.2
}
big_gpt_v3 = {
'embedding_dim': 256,
'resid_pdrop': 0.3,
'embd_pdrop': 0.2,
'attn_pdrop': 0.3
}
big_gpt_v4 = {
'embedding_dim': 256,
'resid_pdrop': 0.3,
'embd_pdrop': 0.2,
'attn_pdrop': 0.3,
'initializer_range': 0.05
}
big_gpt_v5 = {
'embedding_dim': 256,
'resid_pdrop': 0.2,
'embd_pdrop': 0.2,
'attn_pdrop': 0.3,
'initializer_range': 0.05
}
big_gpt_v6 = {
'embedding_dim': 256,
'resid_pdrop': 0.2,
'embd_pdrop': 0.2,
'attn_pdrop': 0.4,
'initializer_range': 0.05
}
very_big_gpt = {
'embedding_dim': 512
}
very_big_gpt_v2 = {
'embedding_dim': 512,
'resid_pdrop': 0.2,
'embd_pdrop': 0.2,
'attn_pdrop': 0.2
}
very_big_gpt_v3 = {
'embedding_dim': 512,
'resid_pdrop': 0.3,
'embd_pdrop': 0.3,
'attn_pdrop': 0.3
}
very_big_gpt_v4 = {
'embedding_dim': 512,
'resid_pdrop': 0.3,
'embd_pdrop': 0.3,
'attn_pdrop': 0.4
}
very_big_gpt_v5 = {
'embedding_dim': 512,
'resid_pdrop': 0.3,
'embd_pdrop': 0.3,
'attn_pdrop': 0.4,
'initializer_range': 0.05
}
very_big_gpt_v5 = {
'embedding_dim': 512,
'resid_pdrop': 0.3,
'embd_pdrop': 0.5,
'attn_pdrop': 0.4
}
very_big_gpt_v6 = {
'embedding_dim': 512,
'resid_pdrop': 0.5,
'embd_pdrop': 0.5,
'attn_pdrop': 0.5
}
very_big_gpt_v7 = {
'embedding_dim': 512,
'resid_pdrop': 0.4,
'embd_pdrop': 0.5,
'attn_pdrop': 0.4
}
very_big_gpt_v8 = {
'embedding_dim': 512,
'resid_pdrop': 0.5,
'embd_pdrop': 0.2,
'attn_pdrop': 0.5
}
very_big_gpt_v9 = {
'embedding_dim': 512,
'resid_pdrop': 0.4,
'embd_pdrop': 0.2,
'attn_pdrop': 0.5,
'initializer_range': 0.05
}
very_big_gpt_v10 = {
'embedding_dim': 512,
'resid_pdrop': 0.4,
'embd_pdrop': 0.2,
'attn_pdrop': 0.6,
'initializer_range': 0.05
}
big_gpt_v9 = {
'embedding_dim': 256,
'resid_pdrop': 0.4,
'embd_pdrop': 0.2,
'attn_pdrop': 0.5,
'initializer_range': 0.05
}
small_gpt_v2 = {
'embedding_dim': 128,
'resid_pdrop': 0.1,
'embd_pdrop': 0.0,
'attn_pdrop': 0.1
}
small_lstm = {
'hidden_dim': 256,
'embedding_dim': 256,
'depth': 2,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0,
'initializer_range': 0.1
}
small_lstm_v2 = {
'hidden_dim': 256,
'embedding_dim': 128,
'depth': 2,
'hidden_dropout_rate': 0.5,
'input_dropout_rate': 0.0,
'initializer_range': 0.1
}
small_lstm_v3 = {
'hidden_dim': 256,
'embedding_dim': 128,
'depth': 2,
'hidden_dropout_rate': 0.8,
'input_dropout_rate': 0.0,
'initializer_range': 0.1
}
small_lstm_v4 = {
'hidden_dim': 256,
'embedding_dim': 256,
'depth': 2,
'hidden_dropout_rate': 0.8,
'input_dropout_rate': 0.2,
'initializer_range': 0.1
}
small_lstm_v5 = {
'hidden_dim': 256,
'embedding_dim': 256,
'depth': 2,
'hidden_dropout_rate': 0.6,
'input_dropout_rate': 0.2,
'initializer_range': 0.1
}
small_lstm_v6 = {
'hidden_dim': 256,
'embedding_dim': 256,
'depth': 2,
'hidden_dropout_rate': 0.8,
'input_dropout_rate': 0.25,
'initializer_range': 0.1
}
tiny_lstm = {
'hidden_dim': 128,
'embedding_dim': 128,
'depth': 2,
'hidden_dropout_rate': 0.1,
'input_dropout_rate': 0.0,
'initializer_range': 0.1
}
tiny_lstm_v2 = {
'hidden_dim': 128,
'embedding_dim': 128,
'depth': 2,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0,
'initializer_range': 0.1
}
big_lstm = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.25,
'input_dropout_rate': 0.2,
}
big_lstm_v2 = {
'hidden_dim': 512,
'embedding_dim': 128,
'depth': 2,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.2,
}
bigger_lstm_v2 = {
'hidden_dim': 728,
'embedding_dim': 256,
'depth': 2,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.2,
}
bigger_lstm_v4 = {
'hidden_dim': 728,
'embedding_dim': 256,
'depth': 2,
'hidden_dropout_rate': 0.5,
'input_dropout_rate': 0.2,
}
bigger_lstm_v3 = {
'hidden_dim': 728,
'embedding_dim': 256,
'depth': 2,
'hidden_dropout_rate': 0.4,
'input_dropout_rate': 0.2,
}
lstm_simple = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.0,
'input_dropout_rate': 0.0,
}
lstm_drop1 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.1,
'input_dropout_rate': 0.1,
}
lstm_drop2 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.2,
}
lstm_drop12 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.1,
'input_dropout_rate': 0.2,
}
lstm_drop3 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.3,
}
lstm_drop30 = {
'hidden_dim': 512,
'embedding_dim': 256,
'depth': 2,
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.0,
}
lstm_drop31_v2 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.2,
}
biglstm_drop31_v2 = {
'hidden_dim': 1024,
'embedding_dim': 1024,
'depth': 2,
'hidden_dropout_rate': 0.5,
'input_dropout_rate': 0.2,
}
lstm_drop31_v3 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.6,
'input_dropout_rate': 0.2,
}
biglstm_drop31_v3 = {
'hidden_dim': 1024,
'embedding_dim': 1024,
'depth': 2,
'hidden_dropout_rate': 0.6,
'input_dropout_rate': 0.25,
}
lstm_drop31_v4 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.4,
'input_dropout_rate': 0.2,
}
lstm_drop31_v5 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 3,
'hidden_dropout_rate': 0.4,
'input_dropout_rate': 0.2,
}
lstm3_drop30 = {
'hidden_dim': 512,
'embedding_dim': 256,
'depth': 3,
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.0,
}
lstm_drop30_v2 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.0,
'initializer_range': 0.1
}
lstm_drop30_v3 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.5,
'input_dropout_rate': 0.0,
'initializer_range': 0.1
}
lstm_drop30_v4 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.5,
'input_dropout_rate': 0.0,
'initializer_range': 0.05
}
lstm3_drop60 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.6,
'input_dropout_rate': 0.0,
'initializer_range': 0.05
}
lstm3_drop20 = {
'hidden_dim': 128,
'embedding_dim': 256,
'depth': 3,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0,
'initializer_range': 0.1
}
lstm3_big_drop2 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 3,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.2,
'initializer_range': 0.1
}
big_lstm_drop5 = {
'hidden_dim': 1024,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.5,
'input_dropout_rate': 0.2,
}
lstm2_big_drop20 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0,
'initializer_range': 0.1
}
lstm2_drop20 = {
'hidden_dim': 256,
'embedding_dim': 256,
'depth': 2,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0,
'initializer_range': 0.1
}
lstm3_drop50 = {
'hidden_dim': 256,
'embedding_dim': 256,
'depth': 3,
'hidden_dropout_rate': 0.5,
'input_dropout_rate': 0.1,
'initializer_range': 0.05
}
lstm3_drop41 = {
'hidden_dim': 256,
'embedding_dim': 256,
'depth': 3,
'hidden_dropout_rate': 0.4,
'input_dropout_rate': 0.1,
'initializer_range': 0.05
}
lstm2_big_drop20_v2 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0,
'initializer_range': 0.01
}
lstm2_big_drop30_v2 = {
'hidden_dim': 512,
'embedding_dim': 512,
'depth': 2,
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.0,
'initializer_range': 0.01
}
ff_mnist = {'hidden_dim': 256,
'depth': 3,
'hidden_dropout_rate': 0.5,
'input_dropout_rate': 0.2}
ff_mnist1 = {'hidden_dim': [512, 256, 128],
'depth': 3,
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.0}
ff_mnist2 = {'hidden_dim': [1024, 256, 64],
'depth': 3,
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.0}
ff_mnist3 = {'hidden_dim': [512, 128, 64],
'depth': 3,
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.0}
ff_mnist4 = {'hidden_dim': [512, 128, 32],
'depth': 3,
'hidden_dropout_rate': 0.1,
'input_dropout_rate': 0.0}
ff_mnist5 = {'hidden_dim': [512, 512, 64, 32],
'depth': 4,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0}
ff_svhn = {'hidden_dim': 512,
'depth': 3,
'hidden_dropout_rate': 0.5,
'input_dropout_rate': 0.0}
ff_svhn2 = {'hidden_dim': 512,
'depth': 3,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0}
ff_svhn3 = {'hidden_dim': 256,
'depth': 3,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0}
ff_svhn4 = {'hidden_dim': 128,
'depth': 3,
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0}
vcnn_mnist1 = {
'fc_dim': [128],
'depth': 3,
'proj_depth': 1,
'filters': [128, 64, 32],
'maxout_size': [128, 64, 32],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(1,1), (2,2), (2,2)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.1,
}
vcnn_mnist2 = {
'fc_dim': [128, 128],
'depth': 2,
'proj_depth': 2,
'filters': [64, 64, 64],
'maxout_size': [64, 64, 64],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(2,2), (2,2), (2,2)],
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.1}
vcnn_mnist3 = {
'fc_dim': [],
'depth': 3,
'proj_depth': 0,
'filters': [128, 64, 32],
'maxout_size': [128, 64, 32],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(1,1), (2,2), (2,2)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.1}
vcnn_mnist4 = {
'fc_dim': [128],
'depth': 3,
'proj_depth': 1,
'filters': [128, 64, 32],
'maxout_size': [128, 64, 32],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(1,1), (2,2), (2,2)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0}
vcnn_mnist5 = {
'fc_dim': [],
'depth': 3,
'proj_depth': 0,
'filters': [128, 64, 64],
'maxout_size': [128, 64, 16],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(1,1), (2,2), (2,2)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0}
vcnn_mnist6 = {
'fc_dim': [],
'depth': 3,
'proj_depth': 0,
'filters': [128, 64, 64],
'maxout_size': [128, 64, 8],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(1,1), (2,2), (2,2)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0}
vcnn_mnist7 = {
'fc_dim': [],
'depth': 3,
'proj_depth': 0,
'filters': [128, 64, 64],
'maxout_size': [128, 64, 16],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(1,1), (2,2), (2,2)],
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.0}
vcnn_mnist8 = {
'fc_dim': [],
'depth': 3,
'proj_depth': 0,
'filters': [128, 64, 64],
'maxout_size': [128, 64, 8],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(1,1), (2,2), (2,2)],
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.0}
vcnn_lenet5 = {'hidden_dim': [128, 128],
'depth': 2,
'proj_depth': 2,
'filters': [16, 16],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(2,2), (2,2), (2,2)],
'hidden_dropout_rate': 0.8,
'input_dropout_rate': 0.25}
vcnn_svhn1 = {'hidden_dim': [256, 256],
'depth': 3,
'proj_depth': 2,
'filters': [32, 32, 32],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(2,2), (2,2), (2,2)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0}
vcnn_svhn2 = {'hidden_dim': [256, 256],
'depth': 3,
'proj_depth': 2,
'filters': [32, 32,32],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(2,2), (2,2), (2,2)],
'hidden_dropout_rate': 0.1,
'input_dropout_rate': 0.0}
vcnn_svhn3 = {'hidden_dim': [256, 256],
'depth': 3,
'proj_depth': 2,
'filters': [32, 32,32],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(2,2), (2,2), (2,2)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.1}
vcnn_svhn4 = {'hidden_dim': [256, 256],
'depth': 3,
'proj_depth': 2,
'filters': [32, 32,32],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(2,2), (2,2), (2,2)],
'hidden_dropout_rate': 0.1,
'input_dropout_rate': 0.1}
vcnn_svhn5 = {'hidden_dim': [512, 512],
'depth': 3,
'proj_depth': 2,
'filters': [32, 32, 32],
'kernel_size': [(3,3), (3,3), (3,3)],
'pool_size': [(2,2), (2,2), (2,2)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0}
rsnt_svhn1 = {'hidden_dim': 512,
'pool_size': 3,
'filters': [32, 32, 32, 32],
'kernel_size': [(3,3), (3,3), (3,3), (3,3)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0,
'num_res_net_blocks': 2}
rsnt_svhn2 = {'hidden_dim': 512,
'pool_size': 3,
'filters': [32, 32, 32, 32],
'kernel_size': [(3,3), (3,3), (3,3), (3,3)],
'hidden_dropout_rate': 0.25,
'input_dropout_rate': 0.1,
'num_res_net_blocks': 2}
rsnt_svhn3 = {'hidden_dim': 512,
'pool_size': 3,
'filters': [32, 32, 32, 32],
'kernel_size': [(3,3), (3,3), (3,3), (3,3)],
'hidden_dropout_rate': 0.5,
'input_dropout_rate': 0.2,
'num_res_net_blocks': 3}
rsnt_svhn4 = {'hidden_dim': 512,
'pool_size': 3,
'filters': [32, 32, 32, 32],
'kernel_size': [(3,3), (3,3), (3,3), (3,3)],
'hidden_dropout_rate': 0.4,
'input_dropout_rate': 0.1,
'num_res_net_blocks': 3}
rsnt_svhn5 = {'hidden_dim': 128,
'pool_size': 3,
'filters': [32, 32, 32, 32],
'kernel_size': [(3,3), (3,3), (3,3), (3,3)],
'hidden_dropout_rate': 0.3,
'input_dropout_rate': 0.0,
'num_res_net_blocks': 3}
rsnt_mnist1 = {'hidden_dim': 512,
'pool_size': 3,
'filters': [32, 32, 32, 32],
'kernel_size': [(3,3), (3,3), (3,3), (3,3)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0,
'num_res_net_blocks': 2}
rsnt_mnist2 = {'hidden_dim': 512,
'pool_size': 3,
'filters': [32, 32, 32, 32],
'kernel_size': [(3,3), (3,3), (3,3), (3,3)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0,
'num_res_net_blocks': 3}
rsnt_mnist3 = {'hidden_dim': 128,
'pool_size': 3,
'filters': [32, 32, 32, 32],
'kernel_size': [(3,3), (3,3), (3,3), (3,3)],
'hidden_dropout_rate': 0.2,
'input_dropout_rate': 0.0,
'num_res_net_blocks': 3}
caps_base = {'hidden_dim': 16,
'routing': 3,
'filters': 10,
'hidden_dropout_rate': 0.5,
'input_dropout_rate': 0.2}
mat_caps_base = {'A':32,
'B':32,
'C':32,
'D':32,
'epsilon':1e-9,
'l2':0.0000002,
'final_lambda':0.01,
'iter_routing':2}
MODEL_CONFIGS = {
'base':{},
'small_lstm':small_lstm,
'tiny_lstm': tiny_lstm,
'big_lstm':big_lstm,
'lstm_simple': lstm_simple,
'lstm_drop1': lstm_drop1,
'lstm_drop2': lstm_drop2,
'lstm_drop12': lstm_drop12,
'lstm_drop3': lstm_drop3,
'big_lstm_v2': big_lstm_v2,
'bigger_lstm_v2': bigger_lstm_v2,
'bigger_lstm_v3': bigger_lstm_v3,
'bigger_lstm_v4': bigger_lstm_v4,
'big_lstm_drop5': big_lstm_drop5,
'lstm_drop30': lstm_drop30,
'small_gpt': small_gpt,
'big_gpt_v2': big_gpt_v2,
'very_big_gpt': very_big_gpt,
'lstm_drop30_v2': lstm_drop30_v2,
'lstm3_drop20': lstm3_drop20,
'very_big_gpt_v2': very_big_gpt_v2,
'small_gpt_v2': small_gpt_v2,
'very_big_gpt_v3': very_big_gpt_v3,
'lstm3_big_drop2': lstm3_big_drop2,
'very_big_gpt_v4': very_big_gpt_v4,
'very_big_gpt_v5': very_big_gpt_v5,
'very_big_gpt_v6': very_big_gpt_v6,
'lstm2_big_drop20': lstm2_big_drop20,
'very_big_gpt_v7': very_big_gpt_v7,
'lstm2_big_drop20_v2': lstm2_big_drop20_v2,
'very_big_gpt_v8': very_big_gpt_v8,
'lstm2_big_drop30_v2': lstm2_big_drop30_v2,
'lstm2_drop20': lstm2_drop20,
'tiny_lstm_v2': tiny_lstm_v2,
'lstm3_drop30': lstm3_drop30,
'small_lstm_v2': small_lstm_v2,
'lstm_drop30_v3': lstm_drop30_v3,
'lstm_drop30_v4': lstm_drop30_v4,
'big_gpt_v3': big_gpt_v3,
'small_gpt_v3': small_gpt_v3,
'big_gpt_v4': big_gpt_v4,
'small_gpt_v4': small_gpt_v4,
'small_gpt_v5': small_gpt_v5,
'very_big_gpt_v9': very_big_gpt_v9,
'small_gpt_v6': small_gpt_v6,
'small_lstm_v3': small_lstm_v3,
'lstm3_drop60': lstm3_drop60,
'small_gpt_v7': small_gpt_v7,
'small_gpt_v8': small_gpt_v8,
'small_gpt_v9': small_gpt_v9,
'small_ugpt_v9': small_ugpt_v9,
'small_lstm_v4': small_lstm_v4,
'big_gpt_v9': big_gpt_v9,
'very_big_gpt_v10': very_big_gpt_v10,
'lstm3_drop50': lstm3_drop50,
'lstm3_drop41': lstm3_drop41,
'lstm_drop31_v2': lstm_drop31_v2,
'big_gpt_v5': big_gpt_v5,
'lstm_drop31_v3': lstm_drop31_v3,
'big_gpt_v6': big_gpt_v6,
'lstm_drop31_v4': lstm_drop31_v4,
'lstm_drop31_v5': lstm_drop31_v5,
'biglstm_drop31_v2': biglstm_drop31_v2,
'short_gpt_v9': short_gpt_v9,
'ff_mnist': ff_mnist,
'vcnn_mnist1': vcnn_mnist1,
'vcnn_mnist2': vcnn_mnist2,
'vcnn_mnist3': vcnn_mnist3,
'vcnn_mnist5': vcnn_mnist5,
'vcnn_mnist6': vcnn_mnist6,
'vcnn_mnist7': vcnn_mnist7,
'vcnn_mnist8': vcnn_mnist8,
'vcnn_mnist4': vcnn_mnist4,
'caps_base': caps_base,
'biglstm_drop31_v3': biglstm_drop31_v3,
'mat_caps_base': mat_caps_base,
'small_lstm_v6': small_lstm_v6,
'vcnn_svhn1': vcnn_svhn1,
'vcnn_svhn2': vcnn_svhn2,
'vcnn_svhn3': vcnn_svhn3,
'vcnn_svhn4': vcnn_svhn4,
'vcnn_svhn5': vcnn_svhn5,
'rsnt_svhn1': rsnt_svhn1,
'rsnt_svhn2': rsnt_svhn2,
'rsnt_svhn3': rsnt_svhn3,
'rsnt_svhn4': rsnt_svhn4,
'rsnt_svhn5': rsnt_svhn5,
'ff_svhn': ff_svhn,
'ff_svhn2': ff_svhn2,
'ff_svhn3': ff_svhn3,
'ff_svhn4': ff_svhn4,
'rsnt_mnist1': rsnt_mnist1,
'rsnt_mnist2': rsnt_mnist2,
'rsnt_mnist3': rsnt_mnist3,
'ff_mnist1': ff_mnist1,
'ff_mnist2': ff_mnist2,
'ff_mnist3': ff_mnist3,
'ff_mnist4': ff_mnist4,
'ff_mnist5': ff_mnist5
} | 27,929 | 24.813309 | 118 | py |
Reflect | Reflect-master/util/distill_params.py | pure_dstl_1 = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0005,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 1000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0005,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'n_epochs': 100
}
pure_dstl_2 = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0005,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 1000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0005,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'n_epochs': 100
}
pure_dstl_3 = {
'distill_temp' : 5.0,
'student_distill_rate' : 0.9,
'student_gold_rate' : 0.1,
'student_learning_rate' : 0.0005,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 1000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0005,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'n_epochs': 100,
}
pure_dstl_4 = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 1000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.00005,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'n_epochs': 100
}
pure_dstl_6 = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.00005,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 10000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.00005,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'n_epochs': 100
}
pure_dstl_4_radamfst = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 1000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'radam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'adam'
}
pure_dstl_4_adamfst = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 1000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'adam'
}
pure_dstl_4_crs_fst = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
pure_dstl_4_crs_fst2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
pure_dstl_4_crs_fst3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
schdl1_dstl_4_crs_fst3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs',
'distill_schedule': 'exp'
}
schdl2_dstl_4_crs_fst3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs',
'distill_schedule': 'crs'
}
pure_dstl_4_crs_fst4 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
schdl1_dstl_4_crs_fst4 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs',
'distill_schedule': 'exp'
}
schdl2_dstl_4_crs_fst4 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs',
'distill_schedule': 'crs'
}
pure_dstl_4_crs_fst5 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
pure_dstl5_4_crs_slw_mnst = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 100000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_slw',
'n_epochs': 300,
}
pure_dstl5_4_crs_slw_2 = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 100000,
'student_hold_base_rate_steps' : 0,
'student_decay_rate': 0.2,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_slw',
'n_epochs': 400,
}
pure_dstl5_4_crs_slw_3 = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 200000,
'student_hold_base_rate_steps' : 0,
'student_decay_rate': 0.2,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_slw',
'n_epochs': 400,
}
pure_dstl2_4_crs_slw_3 = {
'distill_temp' : 2.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 200000,
'student_hold_base_rate_steps' : 0,
'student_decay_rate': 0.2,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_slw',
'n_epochs': 400,
}
pure_dstl1_4_crs_slw_3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 200000,
'student_hold_base_rate_steps' : 0,
'student_decay_rate': 0.2,
'student_warmup_steps' : 1000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_slw',
'n_epochs': 400,
}
pure_dstl5_4_crs_fst_2 = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0005,
'student_decay_steps' : 50000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_decay_rate': 0.6,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0005,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs',
'n_epochs': 300,
}
pure_dstl5_4_crs_fst_3_mnst = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0005,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 1000,
'student_decay_rate': 0.6,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0005,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs',
'n_epochs': 300,
}
pure_dstl5_4_crs_fst_3_mnst = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0005,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 1000,
'student_decay_rate': 0.6,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0005,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs',
}
pure_dstl5_4_crs_fst = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 1000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs',
'n_epochs': 300,
}
pure_dstl_4_crs_slw = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
pure_dstl_4_crs_slw_hld = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
pure_dstl_4_crs_slw_hld1 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'adam',
'schedule': 'crs'
}
pure_dstl_4_crs_slw_hld2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 10000,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
pure_dstl_4_crs_slw_hld3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 10000,
'teacher_optimizer' : 'adam',
'schedule': 'crs'
}
pure_dstl_4_crs_slw_vp1 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'crs',
'n_epochs': 30
}
pure_dstl_4_crs_slw_vp2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'crs',
'n_epochs': 30
}
pure_dstl_4_crs_slw_vp3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 1000,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'crs',
'n_epochs': 30
}
pure_dstl_4_crs_slw_vp4 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 1000,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'crs',
'n_epochs': 30
}
pure_dstl_4_crs_slw_vp5 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_decay_rate': 0.8,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'crs',
'n_epochs': 30
}
pure_dstl_4_crs_slw_vp6 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_decay_rate': 0.8,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'crs',
'n_epochs': 30
}
pure_dstl_4_exp_slw_vp6 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_decay_rate': 0.8,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'exp',
'n_epochs': 40
}
pure_dstl_4_crs_slw_vp7 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_decay_rate': 0.8,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_decay_rate': 0.5,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'crs',
'n_epochs': 30
}
pure_dstl_4_crs_slw_vp8 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_decay_rate': 0.8,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_decay_rate': 0.5,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'crs',
'n_epochs': 30
}
pure_dstl_4_exp_vp8 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_decay_rate': 0.8,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_decay_rate': 0.5,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'exp',
'n_epochs': 40
}
pure_dstl_4_exp_vp9 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_decay_rate': 0.96,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_decay_rate': 0.5,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'exp',
'n_epochs': 40
}
pure_dstl_4_exp_vp3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_decay_rate': 0.7,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_decay_rate': 0.9,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'exp',
'n_epochs': 40
}
pure_dstl_4_exp_vp5 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_decay_rate': 0.7,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_decay_rate': 0.96,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'exp',
'n_epochs': 40
}
pure_dstl_4_exp_vp4 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_decay_rate': 0.8,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_decay_rate': 0.8,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'adam',
'schedule': 'exp',
'n_epochs': 40
}
pure_dstl_4_crs_slw_hld31 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 5000,
'teacher_hold_base_rate_steps' : 1000,
'teacher_decay_rate': 0.8,
'teacher_optimizer' : 'adam',
'schedule': 'crs'
}
pure_dstl_4_crs_slw_hld4 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 10000,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
pure_dstl5_4_crs_slw = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
pure_dstl_4_crs_slwfst = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'adam',
'schedule': 'crs_fst'
}
dstl_6_crs_slw = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.9,
'student_gold_rate' : 0.1,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
schdld_dstl_6_crs_slw = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.9,
'student_gold_rate' : 0.1,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs',
'dstl_decay_steps': 10000,
'dstl_warmup_steps': 0,
'hold_base_dstlrate_steps': 10000,
}
pure_dstl_4_crs_vslw = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 100000,
'student_hold_base_rate_steps' : 1000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs'
}
pure_dstl_5 = {
'distill_temp' : 2.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 1000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam'
}
pure_dstl_4_fstonln = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 1000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam'
}
pure_dstl_mn_fstonln = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'student_gold_rate' : 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 1000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
}
pure_rpdst_crs_slwfst = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.0,
'student_distill_rep_rate': 1.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'adam',
'schedule': 'crs_fst'
}
dstl_910_crs_slwfst_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.9,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
dstl5_910_crs_slwfst_2 = {
'distill_temp' : 5.0,
'student_distill_rate' : 0.9,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
dstl5_910_crs_slwfst_3 = {
'distill_temp' : 5.0,
'student_distill_rate' : 0.9,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 5000,
'teacher_hold_base_rate_steps' : 1000000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
schdexp_dstl_10_crs_slwfst_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'distill_min_rate': 0.0,
'distill_schedule': 'exp',
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
schdexp_dstl_10_crs_slwfst_3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'distill_min_rate': 0.0,
'distill_schedule': 'exp',
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
schdcrs_dstl_10_crs_slwfst_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'distill_min_rate': 0.0,
'distill_schedule': 'crs',
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
schdcrs_dstl_10_crs_slwfst_3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 1.0,
'distill_min_rate': 0.0,
'distill_schedule': 'crs',
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
schdexp_dstl5_910_crs_slwfst_2 = {
'distill_temp' : 5.0,
'student_distill_rate' : 1.0,
'distill_min_rate': 0.0,
'distill_schedule': 'exp',
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst5_019_crs_slwfst_2 = {
'distill_temp' : 5.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 10000,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst5_019_crs_slwfst_3 = {
'distill_temp' : 5.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 1000000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_0010_crs_slwfst_3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.0,
'student_distill_rep_rate': 1.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_019_crs_slwfst = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'adam',
'schedule': 'crs_fst'
}
rpdst_019_crs_slwslw_2_trns = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0005,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_slw'
}
rpdst_019_crs_slwslw_3_trns = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 1000,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_slw'
}
rpdst_019_crs_slwfst_5 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_00199_crs_slw_550 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.01,
'student_distill_rep_rate': 0.99,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 100000,
'student_decay_rate': 0.1,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_slw'
}
rpdst_00199_crs_fst_550 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.01,
'student_distill_rep_rate': 0.99,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 20000,
'student_decay_rate': 0.1,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_019_crs_slwfst_51 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 10000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'exp'
}
rpdst_019_crs_slwfst_52 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 100000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 1000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_slw'
}
rpdst_019_crs_slwfst_53 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 1000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_019_crs_slwfst_81 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 10000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 1000,
'teacher_optimizer' : 'radam',
'schedule': 'exp'
}
rpdst_019_crs_slwfst_56 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 20000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_decay_rate': 0.9,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_decay_rate': 0.96,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_019_crs_slwfst_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_019_crs_slwfst_25 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'radam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'exp_fst'
}
rpdst_00199_crs_slwfst_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.01,
'student_distill_rep_rate': 0.99,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_off_00199_crs_slwfst_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.01,
'student_distill_rep_rate': 0.99,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.00,
'schedule': 'crs_fst'
}
rpdst_off_019_crs_slwfst_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0,
'schedule': 'crs_fst'
}
rpdst_010_crs_slwfst_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 1.0,
'student_distill_rep_rate': 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_off_010_crs_slwfst_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 1.0,
'student_distill_rep_rate': 0.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.00,
'schedule': 'crs_fst'
}
rpdst_019_crs_slwfst_3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_00199_crs_slwfst_3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.01,
'student_distill_rep_rate': 0.99,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_off_010_crs_slwfst_3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 1.0,
'student_distill_rep_rate': 0.0,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.000,
'schedule': 'crs_fst'
}
rpdst_off_019_crs_slwfst_3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.000,
'schedule': 'crs_fst'
}
rpdst_off_00199_crs_slwfst_3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.01,
'student_distill_rep_rate': 0.99,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.00,
'schedule': 'crs_fst'
}
rpdst_019_crs_slwfst_4 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 100000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_019_crs_slwfst_6 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 1000,
'student_decay_rate': 0.5,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_decay_rate': 0.96,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_019_crs_slwfst_7 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_decay_rate': 0.6,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_decay_rate': 0.96,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_0010_crs_slwfst_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.0,
'student_distill_rep_rate': 1.0,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst'
}
rpdst_019_crs_slwfst_sst1 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 1000,
'teacher_warmup_steps' : 000,
'teacher_hold_base_rate_steps' : 5000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
rpdst_019_crs_slwfst_sst2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 1000,
'teacher_warmup_steps' : 000,
'teacher_hold_base_rate_steps' : 5000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
rpdst_019_crs_slwfst_sst3 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 000,
'teacher_hold_base_rate_steps' : 10000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
rpdst_019_crs_slwfst_sst4 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 000,
'teacher_hold_base_rate_steps' : 10000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
rpdst_019_crs_slwfst_sst5 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0005,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 000,
'teacher_hold_base_rate_steps' : 10000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
rpdst_019_crs_slwfst_sst6 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 500,
'teacher_warmup_steps' : 000,
'teacher_hold_base_rate_steps' : 5000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
rpdst_019_crs_slwfst_sst7 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 20000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 500,
'teacher_warmup_steps' : 000,
'teacher_hold_base_rate_steps' : 6000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
rpdst_00199_crs_slwfst_sst6 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.01,
'student_distill_rep_rate': 0.99,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 500,
'teacher_warmup_steps' : 000,
'teacher_hold_base_rate_steps' : 5000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
rpdst_00199_crs_slwfst_sst6_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.01,
'student_distill_rep_rate': 0.99,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
rpdst_019_crs_slwfst_sst6_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
rpdst_00199_crs_slwfst_sst8_2 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.01,
'student_distill_rep_rate': 0.99,
'student_learning_rate' : 0.00005,
'student_decay_steps' : 10000,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 0,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.001,
'teacher_decay_steps' : 10000,
'teacher_warmup_steps' : 0.0,
'teacher_hold_base_rate_steps' : 0,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
rpdst_019_exp_sst10 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 500,
'student_hold_base_rate_steps' : 5000,
'student_warmup_steps' : 0,
'student_optimizer' : 'radam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 500,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 5000,
'teacher_optimizer' : 'radam',
'schedule': 'exp',
}
rpdst_019_exp_sst11 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.0001,
'student_decay_steps' : 1000,
'student_decay_rate': 0.9,
'student_hold_base_rate_steps' : 5000,
'student_warmup_steps' : 0,
'student_optimizer' : 'radam',
'teacher_learning_rate' : 0.0001,
'teacher_decay_steps' : 1000,
'teacher_decay_rate': 0.9,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 5000,
'teacher_optimizer' : 'radam',
'schedule': 'exp',
}
rpdst_019_exp_sst12 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.00005,
'student_decay_steps' : 10000,
'student_decay_rate': 0.5,
'student_hold_base_rate_steps' : 10000,
'student_warmup_steps' : 0,
'student_optimizer' : 'radam',
'teacher_learning_rate' : 0.00005,
'teacher_decay_steps' : 10000,
'teacher_decay_rate': 0.5,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 10000,
'teacher_optimizer' : 'radam',
'schedule': 'exp',
}
rpdst_019_exp_sst13 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.00005,
'student_decay_steps' : 20000,
'student_decay_rate': 0.3,
'student_hold_base_rate_steps' : 15000,
'student_warmup_steps' : 1000,
'student_optimizer' : 'radam',
'teacher_learning_rate' : 0.00005,
'teacher_decay_steps' : 20000,
'teacher_decay_rate': 0.3,
'teacher_warmup_steps' : 1000,
'teacher_hold_base_rate_steps' : 10000,
'teacher_optimizer' : 'adam',
'schedule': 'exp',
}
rpdst_019_exp_sst14 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.00005,
'student_decay_steps' : 10000,
'student_decay_rate': 0.5,
'student_hold_base_rate_steps' : 10000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.00005,
'teacher_decay_steps' : 10000,
'teacher_decay_rate': 0.5,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 10000,
'teacher_optimizer' : 'radam',
'schedule': 'exp',
}
rpdst_019_exp_sst15 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.00005,
'student_decay_steps' : 10000,
'student_decay_rate': 0.3,
'student_hold_base_rate_steps' : 10000,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.00005,
'teacher_decay_steps' : 10000,
'teacher_decay_rate': 0.5,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 10000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_slw',
}
rpdst_019_exp_sst16 = {
'distill_temp' : 1.0,
'student_distill_rate' : 0.0,
'student_gold_rate' : 0.1,
'student_distill_rep_rate': 0.9,
'student_learning_rate' : 0.00005,
'student_decay_steps' : 1000,
'student_decay_rate': 0.3,
'student_hold_base_rate_steps' : 0,
'student_warmup_steps' : 10000,
'student_optimizer' : 'adam',
'teacher_learning_rate' : 0.00005,
'teacher_decay_steps' : 10000,
'teacher_decay_rate': 0.5,
'teacher_warmup_steps' : 0,
'teacher_hold_base_rate_steps' : 10000,
'teacher_optimizer' : 'radam',
'schedule': 'crs_fst',
}
DISTILL_PARAMS = {'pure_dstl_1' : pure_dstl_1,
'pure_dstl_2' : pure_dstl_2,
'pure_dstl_3' : pure_dstl_3,
'pure_dstl_4': pure_dstl_4,
'pure_dstl_6': pure_dstl_6,
'pure_dstl_4_radamfst': pure_dstl_4_radamfst,
'pure_dstl_5': pure_dstl_5,
'pure_dstl_4_fstonln': pure_dstl_4_fstonln,
'pure_dstl_4_crs_fst': pure_dstl_4_crs_fst,
'pure_dstl_4_crs_slw': pure_dstl_4_crs_slw,
'pure_dstl_4_crs_vslw': pure_dstl_4_crs_vslw,
'dstl_6_crs_slw': dstl_6_crs_slw,
'pure_dstl_4_crs_slwfst': pure_dstl_4_crs_slwfst,
'pure_dstl_mn_fstonln': pure_dstl_mn_fstonln,
'pure_rpdst_crs_slwfst': pure_rpdst_crs_slwfst,
'dstl_910_crs_slwfst_2': dstl_910_crs_slwfst_2,
'dstl5_910_crs_slwfst_2': dstl5_910_crs_slwfst_2,
'dstl5_910_crs_slwfst_3': dstl5_910_crs_slwfst_3,
'rpdst_019_crs_slwfst': rpdst_019_crs_slwfst,
'rpdst5_019_crs_slwfst_2': rpdst5_019_crs_slwfst_2,
'rpdst_019_crs_slwfst_2': rpdst_019_crs_slwfst_2,
'schdexp_dstl5_910_crs_slwfst_2': schdexp_dstl5_910_crs_slwfst_2,
'schdexp_dstl_10_crs_slwfst_2': schdexp_dstl_10_crs_slwfst_2,
'schdexp_dstl_10_crs_slwfst_3': schdexp_dstl_10_crs_slwfst_3,
'schdcrs_dstl_10_crs_slwfst_2': schdcrs_dstl_10_crs_slwfst_2,
'schdcrs_dstl_10_crs_slwfst_3': schdcrs_dstl_10_crs_slwfst_3,
'rpdst5_019_crs_slwfst_3': rpdst5_019_crs_slwfst_3,
'rpdst_019_crs_slwfst_3': rpdst_019_crs_slwfst_3,
'rpdst_0010_crs_slwfst_2': rpdst_0010_crs_slwfst_2,
'rpdst_0010_crs_slwfst_3': rpdst_0010_crs_slwfst_3,
'rpdst_019_crs_slwfst_sst1': rpdst_019_crs_slwfst_sst1,
'rpdst_019_crs_slwfst_sst2': rpdst_019_crs_slwfst_sst2,
'rpdst_019_crs_slwfst_sst3': rpdst_019_crs_slwfst_sst3,
'rpdst_019_crs_slwfst_sst4': rpdst_019_crs_slwfst_sst4,
'rpdst_019_crs_slwfst_sst5': rpdst_019_crs_slwfst_sst5,
'rpdst_019_crs_slwslw_2_trns': rpdst_019_crs_slwslw_2_trns,
'rpdst_019_crs_slwslw_3_trns': rpdst_019_crs_slwslw_3_trns,
'rpdst_019_crs_slwfst_4': rpdst_019_crs_slwfst_4,
'rpdst_019_crs_slwfst_sst6': rpdst_019_crs_slwfst_sst6,
'rpdst_00199_crs_slwfst_sst6': rpdst_00199_crs_slwfst_sst6,
'rpdst_010_crs_slwfst_2': rpdst_010_crs_slwfst_2,
'rpdst_off_019_crs_slwfst_3': rpdst_off_019_crs_slwfst_3,
'rpdst_off_019_crs_slwfst_2': rpdst_off_019_crs_slwfst_2,
'rpdst_019_crs_slwfst_sst7': rpdst_019_crs_slwfst_sst7,
'rpdst_00199_crs_slwfst_2': rpdst_00199_crs_slwfst_2,
'rpdst_off_00199_crs_slwfst_2': rpdst_off_00199_crs_slwfst_2,
'rpdst_off_00199_crs_slwfst_3': rpdst_off_00199_crs_slwfst_3,
'rpdst_00199_crs_slwfst_3': rpdst_00199_crs_slwfst_3,
'rpdst_off_010_crs_slwfst_2': rpdst_off_010_crs_slwfst_2,
'rpdst_off_010_crs_slwfst_3': rpdst_off_010_crs_slwfst_3,
'rpdst_00199_crs_slwfst_sst6_2': rpdst_00199_crs_slwfst_sst6_2,
'rpdst_00199_crs_slwfst_sst8_2': rpdst_00199_crs_slwfst_sst8_2,
'rpdst_019_crs_slwfst_sst6_2': rpdst_019_crs_slwfst_sst6_2,
'rpdst_019_exp_sst10': rpdst_019_exp_sst10,
'rpdst_019_exp_sst11': rpdst_019_exp_sst11,
'rpdst_019_exp_sst12': rpdst_019_exp_sst12,
'rpdst_019_crs_slwfst_5': rpdst_019_crs_slwfst_5,
'rpdst_019_crs_slwfst_56': rpdst_019_crs_slwfst_56,
'rpdst_019_exp_sst13': rpdst_019_exp_sst13,
'rpdst_019_crs_slwfst_6': rpdst_019_crs_slwfst_6,
'rpdst_019_crs_slwfst_7': rpdst_019_crs_slwfst_7,
'rpdst_019_exp_sst14': rpdst_019_exp_sst14,
'rpdst_019_exp_sst15': rpdst_019_exp_sst15,
'rpdst_00199_crs_fst_550': rpdst_00199_crs_fst_550,
'rpdst_00199_crs_slw_550': rpdst_00199_crs_slw_550,
'rpdst_019_crs_slwfst_25': rpdst_019_crs_slwfst_25,
'rpdst_019_exp_sst16': rpdst_019_exp_sst16,
'pure_dstl5_4_crs_fst': pure_dstl5_4_crs_fst,
'rpdst_019_crs_slwfst_81': rpdst_019_crs_slwfst_81,
'rpdst_019_crs_slwfst_51': rpdst_019_crs_slwfst_51,
'rpdst_019_crs_slwfst_52': rpdst_019_crs_slwfst_52,
'rpdst_019_crs_slwfst_53': rpdst_019_crs_slwfst_53,
'pure_dstl5_4_crs_fst_2': pure_dstl5_4_crs_fst_2,
'pure_dstl5_4_crs_fst_3_mnst': pure_dstl5_4_crs_fst_3_mnst,
'pure_dstl5_4_crs_slw_mnst': pure_dstl5_4_crs_slw_mnst,
'pure_dstl5_4_crs_slw': pure_dstl5_4_crs_slw,
'pure_dstl5_4_crs_slw_2': pure_dstl5_4_crs_slw_2,
'pure_dstl5_4_crs_slw_3': pure_dstl5_4_crs_slw_3,
'pure_dstl1_4_crs_slw_3': pure_dstl1_4_crs_slw_3,
'pure_dstl2_4_crs_slw_3': pure_dstl2_4_crs_slw_3,
'pure_dstl_4_crs_fst2': pure_dstl_4_crs_fst2,
'pure_dstl_4_crs_fst3': pure_dstl_4_crs_fst3,
'pure_dstl_4_crs_fst4': pure_dstl_4_crs_fst4,
'pure_dstl_4_crs_fst5': pure_dstl_4_crs_fst5,
'pure_dstl_4_adamfst': pure_dstl_4_adamfst,
'schdl1_dstl_4_crs_fst3': schdl1_dstl_4_crs_fst3,
'schdl2_dstl_4_crs_fst3': schdl2_dstl_4_crs_fst3,
'schdl1_dstl_4_crs_fst4': schdl1_dstl_4_crs_fst4,
'schdl2_dstl_4_crs_fst4': schdl2_dstl_4_crs_fst4,
'pure_dstl_4_crs_slw_hld': pure_dstl_4_crs_slw_hld,
'pure_dstl_4_crs_slw_hld': pure_dstl_4_crs_slw_hld1,
'pure_dstl_4_crs_slw_hld2': pure_dstl_4_crs_slw_hld2,
'pure_dstl_4_crs_slw_hld3': pure_dstl_4_crs_slw_hld3,
'pure_dstl_4_crs_slw_hld4': pure_dstl_4_crs_slw_hld4,
'pure_dstl_4_crs_slw_hld31': pure_dstl_4_crs_slw_hld31,
'pure_dstl_4_crs_slw_vp1': pure_dstl_4_crs_slw_vp1,
'pure_dstl_4_crs_slw_vp2': pure_dstl_4_crs_slw_vp2,
'pure_dstl_4_crs_slw_vp3': pure_dstl_4_crs_slw_vp3,
'pure_dstl_4_crs_slw_vp4': pure_dstl_4_crs_slw_vp4,
'pure_dstl_4_crs_slw_vp5': pure_dstl_4_crs_slw_vp5,
'pure_dstl_4_crs_slw_vp6': pure_dstl_4_crs_slw_vp6,
'pure_dstl_4_crs_slw_vp7': pure_dstl_4_crs_slw_vp7,
'pure_dstl_4_crs_slw_vp8': pure_dstl_4_crs_slw_vp8,
'pure_dstl_4_exp_slw_vp6': pure_dstl_4_exp_slw_vp6,
'pure_dstl_4_exp_vp9': pure_dstl_4_exp_vp9,
'pure_dstl_4_exp_vp3': pure_dstl_4_exp_vp3,
'pure_dstl_4_exp_vp4': pure_dstl_4_exp_vp4,
'pure_dstl_4_exp_vp8': pure_dstl_4_exp_vp8,
'pure_dstl_4_exp_vp5': pure_dstl_4_exp_vp5
}
| 60,330 | 27.364363 | 83 | py |
Reflect | Reflect-master/util/inflect.py | '''
inflect.py: correctly generate plurals, ordinals, indefinite articles;
convert numbers to words
Copyright (C) 2010 Paul Dyson
Based upon the Perl module Lingua::EN::Inflect by Damian Conway.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The original Perl module Lingua::EN::Inflect by Damian Conway is
available from http://search.cpan.org/~dconway/
This module can be downloaded at http://pypi.python.org/pypi/inflect
methods:
classical inflect
plural plural_noun plural_verb plural_adj singular_noun no num a an
compare compare_nouns compare_verbs compare_adjs
present_participle
ordinal
number_to_words
join
defnoun defverb defadj defa defan
INFLECTIONS: classical inflect
plural plural_noun plural_verb plural_adj singular_noun compare
no num a an present_participle
PLURALS: classical inflect
plural plural_noun plural_verb plural_adj singular_noun no num
compare compare_nouns compare_verbs compare_adjs
COMPARISONS: classical
compare compare_nouns compare_verbs compare_adjs
ARTICLES: classical inflect num a an
NUMERICAL: ordinal number_to_words
USER_DEFINED: defnoun defverb defadj defa defan
Exceptions:
UnknownClassicalModeError
BadNumValueError
BadChunkingOptionError
NumOutOfRangeError
BadUserDefinedPatternError
BadRcFileError
BadGenderError
'''
from re import match, search, subn, IGNORECASE, VERBOSE
from re import split as splitre
from re import error as reerror
from re import sub as resub
class UnknownClassicalModeError(Exception):
pass
class BadNumValueError(Exception):
pass
class BadChunkingOptionError(Exception):
pass
class NumOutOfRangeError(Exception):
pass
class BadUserDefinedPatternError(Exception):
pass
class BadRcFileError(Exception):
pass
class BadGenderError(Exception):
pass
__ver_major__ = 0
__ver_minor__ = 2
__ver_patch__ = 5
__ver_sub__ = ""
__version__ = "%d.%d.%d%s" % (__ver_major__, __ver_minor__,
__ver_patch__, __ver_sub__)
STDOUT_ON = False
def print3(txt):
if STDOUT_ON:
print(txt)
def enclose(s):
return "(?:%s)" % s
def joinstem(cutpoint=0, words=''):
'''
join stem of each word in words into a string for regex
each word is truncated at cutpoint
cutpoint is usually negative indicating the number of letters to remove
from the end of each word
e.g.
joinstem(-2, ["ephemeris", "iris", ".*itis"]) returns
(?:ephemer|ir|.*it)
'''
return enclose('|'.join(w[:cutpoint] for w in words))
def bysize(words):
'''
take a list of words and return a dict of sets sorted by word length
e.g.
ret[3]=set(['ant', 'cat', 'dog', 'pig'])
ret[4]=set(['frog', 'goat'])
ret[5]=set(['horse'])
ret[8]=set(['elephant'])
'''
ret = {}
for w in words:
if len(w) not in ret:
ret[len(w)] = set()
ret[len(w)].add(w)
return ret
def make_pl_si_lists(lst, plending, siendingsize, dojoinstem=True):
'''
given a list of singular words: lst
an ending to append to make the plural: plending
the number of characters to remove from the singular before appending plending: siendingsize
a flag whether to create a joinstem: dojoinstem
return:
a list of pluralised words: si_list (called si because this is what you need to
look for to make the singular)
the pluralised words as a dict of sets sorted by word length: si_bysize
the singular words as a dict of sets sorted by word length: pl_bysize
if dojoinstem is True: a regular expression that matches any of the stems: stem
'''
if siendingsize is not None:
siendingsize = -siendingsize
si_list = [w[:siendingsize] + plending for w in lst]
pl_bysize = bysize(lst)
si_bysize = bysize(si_list)
if dojoinstem:
stem = joinstem(siendingsize, lst)
return si_list, si_bysize, pl_bysize, stem
else:
return si_list, si_bysize, pl_bysize
# 1. PLURALS
pl_sb_irregular_s = {
"corpus": "corpuses|corpora",
"opus": "opuses|opera",
"genus": "genera",
"mythos": "mythoi",
"penis": "penises|penes",
"testis": "testes",
"atlas": "atlases|atlantes",
"yes": "yeses",
}
pl_sb_irregular = {
"child": "children",
"brother": "brothers|brethren",
"loaf": "loaves",
"hoof": "hoofs|hooves",
"beef": "beefs|beeves",
"thief": "thiefs|thieves",
"money": "monies",
"mongoose": "mongooses",
"ox": "oxen",
"cow": "cows|kine",
"graffito": "graffiti",
"octopus": "octopuses|octopodes",
"genie": "genies|genii",
"ganglion": "ganglions|ganglia",
"trilby": "trilbys",
"turf": "turfs|turves",
"numen": "numina",
"atman": "atmas",
"occiput": "occiputs|occipita",
"sabretooth": "sabretooths",
"sabertooth": "sabertooths",
"lowlife": "lowlifes",
"flatfoot": "flatfoots",
"tenderfoot": "tenderfoots",
"romany": "romanies",
"jerry": "jerries",
"mary": "maries",
"talouse": "talouses",
"blouse": "blouses",
"rom": "roma",
"carmen": "carmina",
}
pl_sb_irregular.update(pl_sb_irregular_s)
# pl_sb_irregular_keys = enclose('|'.join(pl_sb_irregular.keys()))
pl_sb_irregular_caps = {
'Romany': 'Romanies',
'Jerry': 'Jerrys',
'Mary': 'Marys',
'Rom': 'Roma',
}
pl_sb_irregular_compound = {
"prima donna": "prima donnas|prime donne",
}
si_sb_irregular = dict([(v, k) for (k, v) in pl_sb_irregular.items()])
keys = list(si_sb_irregular.keys())
for k in keys:
if '|' in k:
k1, k2 = k.split('|')
si_sb_irregular[k1] = si_sb_irregular[k2] = si_sb_irregular[k]
del si_sb_irregular[k]
si_sb_irregular_caps = dict([(v, k) for (k, v) in pl_sb_irregular_caps.items()])
si_sb_irregular_compound = dict([(v, k) for (k, v) in pl_sb_irregular_compound.items()])
keys = list(si_sb_irregular_compound.keys())
for k in keys:
if '|' in k:
k1, k2 = k.split('|')
si_sb_irregular_compound[k1] = si_sb_irregular_compound[k2] = si_sb_irregular_compound[k]
del si_sb_irregular_compound[k]
# si_sb_irregular_keys = enclose('|'.join(si_sb_irregular.keys()))
# Z's that don't double
pl_sb_z_zes_list = (
"quartz", "topaz",
)
pl_sb_z_zes_bysize = bysize(pl_sb_z_zes_list)
pl_sb_ze_zes_list = ('snooze',)
pl_sb_ze_zes_bysize = bysize(pl_sb_ze_zes_list)
# CLASSICAL "..is" -> "..ides"
pl_sb_C_is_ides_complete = [
# GENERAL WORDS...
"ephemeris", "iris", "clitoris",
"chrysalis", "epididymis",
]
pl_sb_C_is_ides_endings = [
# INFLAMATIONS...
"itis",
]
pl_sb_C_is_ides = joinstem(-2, pl_sb_C_is_ides_complete + ['.*%s' % w for w in pl_sb_C_is_ides_endings])
pl_sb_C_is_ides_list = pl_sb_C_is_ides_complete + pl_sb_C_is_ides_endings
(si_sb_C_is_ides_list, si_sb_C_is_ides_bysize,
pl_sb_C_is_ides_bysize) = make_pl_si_lists(pl_sb_C_is_ides_list, 'ides', 2, dojoinstem=False)
# CLASSICAL "..a" -> "..ata"
pl_sb_C_a_ata_list = (
"anathema", "bema", "carcinoma", "charisma", "diploma",
"dogma", "drama", "edema", "enema", "enigma", "lemma",
"lymphoma", "magma", "melisma", "miasma", "oedema",
"sarcoma", "schema", "soma", "stigma", "stoma", "trauma",
"gumma", "pragma",
)
(si_sb_C_a_ata_list, si_sb_C_a_ata_bysize,
pl_sb_C_a_ata_bysize, pl_sb_C_a_ata) = make_pl_si_lists(pl_sb_C_a_ata_list, 'ata', 1)
# UNCONDITIONAL "..a" -> "..ae"
pl_sb_U_a_ae_list = (
"alumna", "alga", "vertebra", "persona"
)
(si_sb_U_a_ae_list, si_sb_U_a_ae_bysize,
pl_sb_U_a_ae_bysize, pl_sb_U_a_ae) = make_pl_si_lists(pl_sb_U_a_ae_list, 'e', None)
# CLASSICAL "..a" -> "..ae"
pl_sb_C_a_ae_list = (
"amoeba", "antenna", "formula", "hyperbola",
"medusa", "nebula", "parabola", "abscissa",
"hydra", "nova", "lacuna", "aurora", "umbra",
"flora", "fauna",
)
(si_sb_C_a_ae_list, si_sb_C_a_ae_bysize,
pl_sb_C_a_ae_bysize, pl_sb_C_a_ae) = make_pl_si_lists(pl_sb_C_a_ae_list, 'e', None)
# CLASSICAL "..en" -> "..ina"
pl_sb_C_en_ina_list = (
"stamen", "foramen", "lumen",
)
(si_sb_C_en_ina_list, si_sb_C_en_ina_bysize,
pl_sb_C_en_ina_bysize, pl_sb_C_en_ina) = make_pl_si_lists(pl_sb_C_en_ina_list, 'ina', 2)
# UNCONDITIONAL "..um" -> "..a"
pl_sb_U_um_a_list = (
"bacterium", "agendum", "desideratum", "erratum",
"stratum", "datum", "ovum", "extremum",
"candelabrum",
)
(si_sb_U_um_a_list, si_sb_U_um_a_bysize,
pl_sb_U_um_a_bysize, pl_sb_U_um_a) = make_pl_si_lists(pl_sb_U_um_a_list, 'a', 2)
# CLASSICAL "..um" -> "..a"
pl_sb_C_um_a_list = (
"maximum", "minimum", "momentum", "optimum",
"quantum", "cranium", "curriculum", "dictum",
"phylum", "aquarium", "compendium", "emporium",
"enconium", "gymnasium", "honorarium", "interregnum",
"lustrum", "memorandum", "millennium", "rostrum",
"spectrum", "speculum", "stadium", "trapezium",
"ultimatum", "medium", "vacuum", "velum",
"consortium", "arboretum",
)
(si_sb_C_um_a_list, si_sb_C_um_a_bysize,
pl_sb_C_um_a_bysize, pl_sb_C_um_a) = make_pl_si_lists(pl_sb_C_um_a_list, 'a', 2)
# UNCONDITIONAL "..us" -> "i"
pl_sb_U_us_i_list = (
"alumnus", "alveolus", "bacillus", "bronchus",
"locus", "nucleus", "stimulus", "meniscus",
"sarcophagus",
)
(si_sb_U_us_i_list, si_sb_U_us_i_bysize,
pl_sb_U_us_i_bysize, pl_sb_U_us_i) = make_pl_si_lists(pl_sb_U_us_i_list, 'i', 2)
# CLASSICAL "..us" -> "..i"
pl_sb_C_us_i_list = (
"focus", "radius", "genius",
"incubus", "succubus", "nimbus",
"fungus", "nucleolus", "stylus",
"torus", "umbilicus", "uterus",
"hippopotamus", "cactus",
)
(si_sb_C_us_i_list, si_sb_C_us_i_bysize,
pl_sb_C_us_i_bysize, pl_sb_C_us_i) = make_pl_si_lists(pl_sb_C_us_i_list, 'i', 2)
# CLASSICAL "..us" -> "..us" (ASSIMILATED 4TH DECLENSION LATIN NOUNS)
pl_sb_C_us_us = (
"status", "apparatus", "prospectus", "sinus",
"hiatus", "impetus", "plexus",
)
pl_sb_C_us_us_bysize = bysize(pl_sb_C_us_us)
# UNCONDITIONAL "..on" -> "a"
pl_sb_U_on_a_list = (
"criterion", "perihelion", "aphelion",
"phenomenon", "prolegomenon", "noumenon",
"organon", "asyndeton", "hyperbaton",
)
(si_sb_U_on_a_list, si_sb_U_on_a_bysize,
pl_sb_U_on_a_bysize, pl_sb_U_on_a) = make_pl_si_lists(pl_sb_U_on_a_list, 'a', 2)
# CLASSICAL "..on" -> "..a"
pl_sb_C_on_a_list = (
"oxymoron",
)
(si_sb_C_on_a_list, si_sb_C_on_a_bysize,
pl_sb_C_on_a_bysize, pl_sb_C_on_a) = make_pl_si_lists(pl_sb_C_on_a_list, 'a', 2)
# CLASSICAL "..o" -> "..i" (BUT NORMALLY -> "..os")
pl_sb_C_o_i = [
"solo", "soprano", "basso", "alto",
"contralto", "tempo", "piano", "virtuoso",
] # list not tuple so can concat for pl_sb_U_o_os
pl_sb_C_o_i_bysize = bysize(pl_sb_C_o_i)
si_sb_C_o_i_bysize = bysize(['%si' % w[:-1] for w in pl_sb_C_o_i])
pl_sb_C_o_i_stems = joinstem(-1, pl_sb_C_o_i)
# ALWAYS "..o" -> "..os"
pl_sb_U_o_os_complete = set((
"ado", "ISO", "NATO", "NCO", "NGO", "oto",
))
si_sb_U_o_os_complete = set('%ss' % w for w in pl_sb_U_o_os_complete)
pl_sb_U_o_os_endings = [
"aficionado", "aggro",
"albino", "allegro", "ammo",
"Antananarivo", "archipelago", "armadillo",
"auto", "avocado", "Bamako",
"Barquisimeto", "bimbo", "bingo",
"Biro", "bolero", "Bolzano",
"bongo", "Boto", "burro",
"Cairo", "canto", "cappuccino",
"casino", "cello", "Chicago",
"Chimango", "cilantro", "cochito",
"coco", "Colombo", "Colorado",
"commando", "concertino", "contango",
"credo", "crescendo", "cyano",
"demo", "ditto", "Draco",
"dynamo", "embryo", "Esperanto",
"espresso", "euro", "falsetto",
"Faro", "fiasco", "Filipino",
"flamenco", "furioso", "generalissimo",
"Gestapo", "ghetto", "gigolo",
"gizmo", "Greensboro", "gringo",
"Guaiabero", "guano", "gumbo",
"gyro", "hairdo", "hippo",
"Idaho", "impetigo", "inferno",
"info", "intermezzo", "intertrigo",
"Iquico", "jumbo",
"junto", "Kakapo", "kilo",
"Kinkimavo", "Kokako", "Kosovo",
"Lesotho", "libero", "libido",
"libretto", "lido", "Lilo",
"limbo", "limo", "lineno",
"lingo", "lino", "livedo",
"loco", "logo", "lumbago",
"macho", "macro", "mafioso",
"magneto", "magnifico", "Majuro",
"Malabo", "manifesto", "Maputo",
"Maracaibo", "medico", "memo",
"metro", "Mexico", "micro",
"Milano", "Monaco", "mono",
"Montenegro", "Morocco", "Muqdisho",
"myo",
"neutrino", "Ningbo",
"octavo", "oregano", "Orinoco",
"Orlando", "Oslo",
"panto", "Paramaribo", "Pardusco",
"pedalo", "photo", "pimento",
"pinto", "pleco", "Pluto",
"pogo", "polo", "poncho",
"Porto-Novo", "Porto", "pro",
"psycho", "pueblo", "quarto",
"Quito", "rhino", "risotto",
"rococo", "rondo", "Sacramento",
"saddo", "sago", "salvo",
"Santiago", "Sapporo", "Sarajevo",
"scherzando", "scherzo", "silo",
"sirocco", "sombrero", "staccato",
"sterno", "stucco", "stylo",
"sumo", "Taiko", "techno",
"terrazzo", "testudo", "timpano",
"tiro", "tobacco", "Togo",
"Tokyo", "torero", "Torino",
"Toronto", "torso", "tremolo",
"typo", "tyro", "ufo",
"UNESCO", "vaquero", "vermicello",
"verso", "vibrato", "violoncello",
"Virgo", "weirdo", "WHO",
"WTO", "Yamoussoukro", "yo-yo",
"zero", "Zibo",
] + pl_sb_C_o_i
pl_sb_U_o_os_bysize = bysize(pl_sb_U_o_os_endings)
si_sb_U_o_os_bysize = bysize(['%ss' % w for w in pl_sb_U_o_os_endings])
# UNCONDITIONAL "..ch" -> "..chs"
pl_sb_U_ch_chs_list = (
"czech", "eunuch", "stomach"
)
(si_sb_U_ch_chs_list, si_sb_U_ch_chs_bysize,
pl_sb_U_ch_chs_bysize, pl_sb_U_ch_chs) = make_pl_si_lists(pl_sb_U_ch_chs_list, 's', None)
# UNCONDITIONAL "..[ei]x" -> "..ices"
pl_sb_U_ex_ices_list = (
"codex", "murex", "silex",
)
(si_sb_U_ex_ices_list, si_sb_U_ex_ices_bysize,
pl_sb_U_ex_ices_bysize, pl_sb_U_ex_ices) = make_pl_si_lists(pl_sb_U_ex_ices_list, 'ices', 2)
pl_sb_U_ix_ices_list = (
"radix", "helix",
)
(si_sb_U_ix_ices_list, si_sb_U_ix_ices_bysize,
pl_sb_U_ix_ices_bysize, pl_sb_U_ix_ices) = make_pl_si_lists(pl_sb_U_ix_ices_list, 'ices', 2)
# CLASSICAL "..[ei]x" -> "..ices"
pl_sb_C_ex_ices_list = (
"vortex", "vertex", "cortex", "latex",
"pontifex", "apex", "index", "simplex",
)
(si_sb_C_ex_ices_list, si_sb_C_ex_ices_bysize,
pl_sb_C_ex_ices_bysize, pl_sb_C_ex_ices) = make_pl_si_lists(pl_sb_C_ex_ices_list, 'ices', 2)
pl_sb_C_ix_ices_list = (
"appendix",
)
(si_sb_C_ix_ices_list, si_sb_C_ix_ices_bysize,
pl_sb_C_ix_ices_bysize, pl_sb_C_ix_ices) = make_pl_si_lists(pl_sb_C_ix_ices_list, 'ices', 2)
# ARABIC: ".." -> "..i"
pl_sb_C_i_list = (
"afrit", "afreet", "efreet",
)
(si_sb_C_i_list, si_sb_C_i_bysize,
pl_sb_C_i_bysize, pl_sb_C_i) = make_pl_si_lists(pl_sb_C_i_list, 'i', None)
# HEBREW: ".." -> "..im"
pl_sb_C_im_list = (
"goy", "seraph", "cherub",
)
(si_sb_C_im_list, si_sb_C_im_bysize,
pl_sb_C_im_bysize, pl_sb_C_im) = make_pl_si_lists(pl_sb_C_im_list, 'im', None)
# UNCONDITIONAL "..man" -> "..mans"
pl_sb_U_man_mans_list = """
ataman caiman cayman ceriman
desman dolman farman harman hetman
human leman ottoman shaman talisman
""".split()
pl_sb_U_man_mans_caps_list = """
Alabaman Bahaman Burman German
Hiroshiman Liman Nakayaman Norman Oklahoman
Panaman Roman Selman Sonaman Tacoman Yakiman
Yokohaman Yuman
""".split()
(si_sb_U_man_mans_list, si_sb_U_man_mans_bysize,
pl_sb_U_man_mans_bysize) = make_pl_si_lists(pl_sb_U_man_mans_list, 's', None, dojoinstem=False)
(si_sb_U_man_mans_caps_list, si_sb_U_man_mans_caps_bysize,
pl_sb_U_man_mans_caps_bysize) = make_pl_si_lists(pl_sb_U_man_mans_caps_list, 's', None, dojoinstem=False)
pl_sb_uninflected_s_complete = [
# PAIRS OR GROUPS SUBSUMED TO A SINGULAR...
"breeches", "britches", "pajamas", "pyjamas", "clippers", "gallows",
"hijinks", "headquarters", "pliers", "scissors", "testes", "herpes",
"pincers", "shears", "proceedings", "trousers",
# UNASSIMILATED LATIN 4th DECLENSION
"cantus", "coitus", "nexus",
# RECENT IMPORTS...
"contretemps", "corps", "debris",
"siemens",
# DISEASES
"mumps",
# MISCELLANEOUS OTHERS...
"diabetes", "jackanapes", "series", "species", "subspecies", "rabies",
"chassis", "innings", "news", "mews", "haggis",
]
pl_sb_uninflected_s_endings = [
# RECENT IMPORTS...
"ois",
# DISEASES
"measles",
]
pl_sb_uninflected_s = pl_sb_uninflected_s_complete + ['.*%s' % w for w in pl_sb_uninflected_s_endings]
pl_sb_uninflected_herd = (
# DON'T INFLECT IN CLASSICAL MODE, OTHERWISE NORMAL INFLECTION
"wildebeest", "swine", "eland", "bison", "buffalo",
"elk", "rhinoceros", 'zucchini',
'caribou', 'dace', 'grouse', 'guinea fowl', 'guinea-fowl',
'haddock', 'hake', 'halibut', 'herring', 'mackerel',
'pickerel', 'pike', 'roe', 'seed', 'shad',
'snipe', 'teal', 'turbot', 'water fowl', 'water-fowl',
)
pl_sb_uninflected_complete = [
# SOME FISH AND HERD ANIMALS
"tuna", "salmon", "mackerel", "trout",
"bream", "sea-bass", "sea bass", "carp", "cod", "flounder", "whiting",
"moose",
# OTHER ODDITIES
"graffiti", "djinn", 'samuri',
'offspring', 'pence', 'quid', 'hertz',
] + pl_sb_uninflected_s_complete
# SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE)
pl_sb_uninflected_caps = [
# ALL NATIONALS ENDING IN -ese
"Portuguese", "Amoyese", "Borghese", "Congoese", "Faroese",
"Foochowese", "Genevese", "Genoese", "Gilbertese", "Hottentotese",
"Kiplingese", "Kongoese", "Lucchese", "Maltese", "Nankingese",
"Niasese", "Pekingese", "Piedmontese", "Pistoiese", "Sarawakese",
"Shavese", "Vermontese", "Wenchowese", "Yengeese",
]
pl_sb_uninflected_endings = [
# SOME FISH AND HERD ANIMALS
"fish",
"deer", "sheep",
# ALL NATIONALS ENDING IN -ese
"nese", "rese", "lese", "mese",
# DISEASES
"pox",
# OTHER ODDITIES
'craft',
] + pl_sb_uninflected_s_endings
# SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE)
pl_sb_uninflected_bysize = bysize(pl_sb_uninflected_endings)
# SINGULAR WORDS ENDING IN ...s (ALL INFLECT WITH ...es)
pl_sb_singular_s_complete = [
"acropolis", "aegis", "alias", "asbestos", "bathos", "bias",
"bronchitis", "bursitis", "caddis", "cannabis",
"canvas", "chaos", "cosmos", "dais", "digitalis",
"epidermis", "ethos", "eyas", "gas", "glottis",
"hubris", "ibis", "lens", "mantis", "marquis", "metropolis",
"pathos", "pelvis", "polis", "rhinoceros",
"sassafras", "trellis",
] + pl_sb_C_is_ides_complete
pl_sb_singular_s_endings = [
"ss", "us",
] + pl_sb_C_is_ides_endings
pl_sb_singular_s_bysize = bysize(pl_sb_singular_s_endings)
si_sb_singular_s_complete = ['%ses' % w for w in pl_sb_singular_s_complete]
si_sb_singular_s_endings = ['%ses' % w for w in pl_sb_singular_s_endings]
si_sb_singular_s_bysize = bysize(si_sb_singular_s_endings)
pl_sb_singular_s_es = [
"[A-Z].*es",
]
pl_sb_singular_s = enclose('|'.join(pl_sb_singular_s_complete +
['.*%s' % w for w in pl_sb_singular_s_endings] +
pl_sb_singular_s_es))
# PLURALS ENDING IN uses -> use
si_sb_ois_oi_case = (
'Bolshois', 'Hanois'
)
si_sb_uses_use_case = (
'Betelgeuses', 'Duses', 'Meuses', 'Syracuses', 'Toulouses',
)
si_sb_uses_use = (
'abuses', 'applauses', 'blouses',
'carouses', 'causes', 'chartreuses', 'clauses',
'contuses', 'douses', 'excuses', 'fuses',
'grouses', 'hypotenuses', 'masseuses',
'menopauses', 'misuses', 'muses', 'overuses', 'pauses',
'peruses', 'profuses', 'recluses', 'reuses',
'ruses', 'souses', 'spouses', 'suffuses', 'transfuses', 'uses',
)
si_sb_ies_ie_case = (
'Addies', 'Aggies', 'Allies', 'Amies', 'Angies', 'Annies',
'Annmaries', 'Archies', 'Arties', 'Aussies', 'Barbies',
'Barries', 'Basies', 'Bennies', 'Bernies', 'Berties', 'Bessies',
'Betties', 'Billies', 'Blondies', 'Bobbies', 'Bonnies',
'Bowies', 'Brandies', 'Bries', 'Brownies', 'Callies',
'Carnegies', 'Carries', 'Cassies', 'Charlies', 'Cheries',
'Christies', 'Connies', 'Curies', 'Dannies', 'Debbies', 'Dixies',
'Dollies', 'Donnies', 'Drambuies', 'Eddies', 'Effies', 'Ellies',
'Elsies', 'Eries', 'Ernies', 'Essies', 'Eugenies', 'Fannies',
'Flossies', 'Frankies', 'Freddies', 'Gillespies', 'Goldies',
'Gracies', 'Guthries', 'Hallies', 'Hatties', 'Hetties',
'Hollies', 'Jackies', 'Jamies', 'Janies', 'Jannies', 'Jeanies',
'Jeannies', 'Jennies', 'Jessies', 'Jimmies', 'Jodies', 'Johnies',
'Johnnies', 'Josies', 'Julies', 'Kalgoorlies', 'Kathies', 'Katies',
'Kellies', 'Kewpies', 'Kristies', 'Laramies', 'Lassies', 'Lauries',
'Leslies', 'Lessies', 'Lillies', 'Lizzies', 'Lonnies', 'Lories',
'Lorries', 'Lotties', 'Louies', 'Mackenzies', 'Maggies', 'Maisies',
'Mamies', 'Marcies', 'Margies', 'Maries', 'Marjories', 'Matties',
'McKenzies', 'Melanies', 'Mickies', 'Millies', 'Minnies', 'Mollies',
'Mounties', 'Nannies', 'Natalies', 'Nellies', 'Netties', 'Ollies',
'Ozzies', 'Pearlies', 'Pottawatomies', 'Reggies', 'Richies', 'Rickies',
'Robbies', 'Ronnies', 'Rosalies', 'Rosemaries', 'Rosies', 'Roxies',
'Rushdies', 'Ruthies', 'Sadies', 'Sallies', 'Sammies', 'Scotties',
'Selassies', 'Sherries', 'Sophies', 'Stacies', 'Stefanies', 'Stephanies',
'Stevies', 'Susies', 'Sylvies', 'Tammies', 'Terries', 'Tessies',
'Tommies', 'Tracies', 'Trekkies', 'Valaries', 'Valeries', 'Valkyries',
'Vickies', 'Virgies', 'Willies', 'Winnies', 'Wylies', 'Yorkies',
)
si_sb_ies_ie = (
'aeries', 'baggies', 'belies', 'biggies', 'birdies', 'bogies',
'bonnies', 'boogies', 'bookies', 'bourgeoisies', 'brownies',
'budgies', 'caddies', 'calories', 'camaraderies', 'cockamamies',
'collies', 'cookies', 'coolies', 'cooties', 'coteries', 'crappies',
'curies', 'cutesies', 'dogies', 'eyrie', 'floozies', 'footsies',
'freebies', 'genies', 'goalies', 'groupies',
'hies', 'jalousies', 'junkies',
'kiddies', 'laddies', 'lassies', 'lies',
'lingeries', 'magpies', 'menageries', 'mommies', 'movies', 'neckties',
'newbies', 'nighties', 'oldies', 'organdies', 'overlies',
'pies', 'pinkies', 'pixies', 'potpies', 'prairies',
'quickies', 'reveries', 'rookies', 'rotisseries', 'softies', 'sorties',
'species', 'stymies', 'sweeties', 'ties', 'underlies', 'unties',
'veggies', 'vies', 'yuppies', 'zombies',
)
si_sb_oes_oe_case = (
'Chloes', 'Crusoes', 'Defoes', 'Faeroes', 'Ivanhoes', 'Joes',
'McEnroes', 'Moes', 'Monroes', 'Noes', 'Poes', 'Roscoes',
'Tahoes', 'Tippecanoes', 'Zoes',
)
si_sb_oes_oe = (
'aloes', 'backhoes', 'canoes',
'does', 'floes', 'foes', 'hoes', 'mistletoes',
'oboes', 'pekoes', 'roes', 'sloes',
'throes', 'tiptoes', 'toes', 'woes',
)
si_sb_z_zes = (
"quartzes", "topazes",
)
si_sb_zzes_zz = (
'buzzes', 'fizzes', 'frizzes', 'razzes'
)
si_sb_ches_che_case = (
'Andromaches', 'Apaches', 'Blanches', 'Comanches',
'Nietzsches', 'Porsches', 'Roches',
)
si_sb_ches_che = (
'aches', 'avalanches', 'backaches', 'bellyaches', 'caches',
'cloches', 'creches', 'douches', 'earaches', 'fiches',
'headaches', 'heartaches', 'microfiches',
'niches', 'pastiches', 'psyches', 'quiches',
'stomachaches', 'toothaches',
)
si_sb_xes_xe = (
'annexes', 'axes', 'deluxes', 'pickaxes',
)
si_sb_sses_sse_case = (
'Hesses', 'Jesses', 'Larousses', 'Matisses',
)
si_sb_sses_sse = (
'bouillabaisses', 'crevasses', 'demitasses', 'impasses',
'mousses', 'posses',
)
si_sb_ves_ve_case = (
# *[nwl]ives -> [nwl]live
'Clives', 'Palmolives',
)
si_sb_ves_ve = (
# *[^d]eaves -> eave
'interweaves', 'weaves',
# *[nwl]ives -> [nwl]live
'olives',
# *[eoa]lves -> [eoa]lve
'bivalves', 'dissolves', 'resolves', 'salves', 'twelves', 'valves',
)
plverb_special_s = enclose('|'.join(
[pl_sb_singular_s] +
pl_sb_uninflected_s +
list(pl_sb_irregular_s.keys()) + [
'(.*[csx])is',
'(.*)ceps',
'[A-Z].*s',
]
))
pl_sb_postfix_adj = {
'general': ['(?!major|lieutenant|brigadier|adjutant|.*star)\S+'],
'martial': ['court'],
}
for k in list(pl_sb_postfix_adj.keys()):
pl_sb_postfix_adj[k] = enclose(
enclose('|'.join(pl_sb_postfix_adj[k])) +
"(?=(?:-|\\s+)%s)" % k)
pl_sb_postfix_adj_stems = '(' + '|'.join(list(pl_sb_postfix_adj.values())) + ')(.*)'
# PLURAL WORDS ENDING IS es GO TO SINGULAR is
si_sb_es_is = (
'amanuenses', 'amniocenteses', 'analyses', 'antitheses',
'apotheoses', 'arterioscleroses', 'atheroscleroses', 'axes',
# 'bases', # bases -> basis
'catalyses', 'catharses', 'chasses', 'cirrhoses',
'cocces', 'crises', 'diagnoses', 'dialyses', 'diereses',
'electrolyses', 'emphases', 'exegeses', 'geneses',
'halitoses', 'hydrolyses', 'hypnoses', 'hypotheses', 'hystereses',
'metamorphoses', 'metastases', 'misdiagnoses', 'mitoses',
'mononucleoses', 'narcoses', 'necroses', 'nemeses', 'neuroses',
'oases', 'osmoses', 'osteoporoses', 'paralyses', 'parentheses',
'parthenogeneses', 'periphrases', 'photosyntheses', 'probosces',
'prognoses', 'prophylaxes', 'prostheses', 'preces', 'psoriases',
'psychoanalyses', 'psychokineses', 'psychoses', 'scleroses',
'scolioses', 'sepses', 'silicoses', 'symbioses', 'synopses',
'syntheses', 'taxes', 'telekineses', 'theses', 'thromboses',
'tuberculoses', 'urinalyses',
)
pl_prep_list = """
about above across after among around at athwart before behind
below beneath beside besides between betwixt beyond but by
during except for from in into near of off on onto out over
since till to under until unto upon with""".split()
pl_prep_list_da = pl_prep_list + ['de', 'du', 'da']
pl_prep_bysize = bysize(pl_prep_list_da)
pl_prep = enclose('|'.join(pl_prep_list_da))
pl_sb_prep_dual_compound = r'(.*?)((?:-|\s+)(?:' + pl_prep + r')(?:-|\s+))a(?:-|\s+)(.*)'
singular_pronoun_genders = set(['neuter',
'feminine',
'masculine',
'gender-neutral',
'feminine or masculine',
'masculine or feminine'])
pl_pron_nom = {
# NOMINATIVE REFLEXIVE
"i": "we", "myself": "ourselves",
"you": "you", "yourself": "yourselves",
"she": "they", "herself": "themselves",
"he": "they", "himself": "themselves",
"it": "they", "itself": "themselves",
"they": "they", "themself": "themselves",
# POSSESSIVE
"mine": "ours",
"yours": "yours",
"hers": "theirs",
"his": "theirs",
"its": "theirs",
"theirs": "theirs",
}
si_pron = {}
si_pron['nom'] = dict([(v, k) for (k, v) in pl_pron_nom.items()])
si_pron['nom']['we'] = 'I'
pl_pron_acc = {
# ACCUSATIVE REFLEXIVE
"me": "us", "myself": "ourselves",
"you": "you", "yourself": "yourselves",
"her": "them", "herself": "themselves",
"him": "them", "himself": "themselves",
"it": "them", "itself": "themselves",
"them": "them", "themself": "themselves",
}
pl_pron_acc_keys = enclose('|'.join(list(pl_pron_acc.keys())))
pl_pron_acc_keys_bysize = bysize(list(pl_pron_acc.keys()))
si_pron['acc'] = dict([(v, k) for (k, v) in pl_pron_acc.items()])
for thecase, plur, gend, sing in (
('nom', 'they', 'neuter', 'it'),
('nom', 'they', 'feminine', 'she'),
('nom', 'they', 'masculine', 'he'),
('nom', 'they', 'gender-neutral', 'they'),
('nom', 'they', 'feminine or masculine', 'she or he'),
('nom', 'they', 'masculine or feminine', 'he or she'),
('nom', 'themselves', 'neuter', 'itself'),
('nom', 'themselves', 'feminine', 'herself'),
('nom', 'themselves', 'masculine', 'himself'),
('nom', 'themselves', 'gender-neutral', 'themself'),
('nom', 'themselves', 'feminine or masculine', 'herself or himself'),
('nom', 'themselves', 'masculine or feminine', 'himself or herself'),
('nom', 'theirs', 'neuter', 'its'),
('nom', 'theirs', 'feminine', 'hers'),
('nom', 'theirs', 'masculine', 'his'),
('nom', 'theirs', 'gender-neutral', 'theirs'),
('nom', 'theirs', 'feminine or masculine', 'hers or his'),
('nom', 'theirs', 'masculine or feminine', 'his or hers'),
('acc', 'them', 'neuter', 'it'),
('acc', 'them', 'feminine', 'her'),
('acc', 'them', 'masculine', 'him'),
('acc', 'them', 'gender-neutral', 'them'),
('acc', 'them', 'feminine or masculine', 'her or him'),
('acc', 'them', 'masculine or feminine', 'him or her'),
('acc', 'themselves', 'neuter', 'itself'),
('acc', 'themselves', 'feminine', 'herself'),
('acc', 'themselves', 'masculine', 'himself'),
('acc', 'themselves', 'gender-neutral', 'themself'),
('acc', 'themselves', 'feminine or masculine', 'herself or himself'),
('acc', 'themselves', 'masculine or feminine', 'himself or herself'),
):
try:
si_pron[thecase][plur][gend] = sing
except TypeError:
si_pron[thecase][plur] = {}
si_pron[thecase][plur][gend] = sing
si_pron_acc_keys = enclose('|'.join(list(si_pron['acc'].keys())))
si_pron_acc_keys_bysize = bysize(list(si_pron['acc'].keys()))
def get_si_pron(thecase, word, gender):
try:
sing = si_pron[thecase][word]
except KeyError:
raise # not a pronoun
try:
return sing[gender] # has several types due to gender
except TypeError:
return sing # answer independent of gender
plverb_irregular_pres = {
# 1st PERS. SING. 2ND PERS. SING. 3RD PERS. SINGULAR
# 3RD PERS. (INDET.)
"am": "are", "are": "are", "is": "are",
"was": "were", "were": "were", "was": "were",
"have": "have", "have": "have", "has": "have",
"do": "do", "do": "do", "does": "do",
}
plverb_ambiguous_pres = {
# 1st PERS. SING. 2ND PERS. SING. 3RD PERS. SINGULAR
# 3RD PERS. (INDET.)
"act": "act", "act": "act", "acts": "act",
"blame": "blame", "blame": "blame", "blames": "blame",
"can": "can", "can": "can", "can": "can",
"must": "must", "must": "must", "must": "must",
"fly": "fly", "fly": "fly", "flies": "fly",
"copy": "copy", "copy": "copy", "copies": "copy",
"drink": "drink", "drink": "drink", "drinks": "drink",
"fight": "fight", "fight": "fight", "fights": "fight",
"fire": "fire", "fire": "fire", "fires": "fire",
"like": "like", "like": "like", "likes": "like",
"look": "look", "look": "look", "looks": "look",
"make": "make", "make": "make", "makes": "make",
"reach": "reach", "reach": "reach", "reaches": "reach",
"run": "run", "run": "run", "runs": "run",
"sink": "sink", "sink": "sink", "sinks": "sink",
"sleep": "sleep", "sleep": "sleep", "sleeps": "sleep",
"view": "view", "view": "view", "views": "view",
}
plverb_ambiguous_pres_keys = enclose('|'.join(list(plverb_ambiguous_pres.keys())))
plverb_irregular_non_pres = (
"did", "had", "ate", "made", "put",
"spent", "fought", "sank", "gave", "sought",
"shall", "could", "ought", "should",
)
plverb_ambiguous_non_pres = enclose('|'.join((
"thought", "saw", "bent", "will", "might", "cut",
)))
# "..oes" -> "..oe" (the rest are "..oes" -> "o")
pl_v_oes_oe = ('canoes', 'floes', 'oboes', 'roes', 'throes', 'woes')
pl_v_oes_oe_endings_size4 = ('hoes', 'toes')
pl_v_oes_oe_endings_size5 = ('shoes')
pl_count_zero = (
"0", "no", "zero", "nil"
)
pl_count_one = (
"1", "a", "an", "one", "each", "every", "this", "that",
)
pl_adj_special = {
"a": "some", "an": "some",
"this": "these", "that": "those",
}
pl_adj_special_keys = enclose('|'.join(list(pl_adj_special.keys())))
pl_adj_poss = {
"my": "our",
"your": "your",
"its": "their",
"her": "their",
"his": "their",
"their": "their",
}
pl_adj_poss_keys = enclose('|'.join(list(pl_adj_poss.keys())))
# 2. INDEFINITE ARTICLES
# THIS PATTERN MATCHES STRINGS OF CAPITALS STARTING WITH A "VOWEL-SOUND"
# CONSONANT FOLLOWED BY ANOTHER CONSONANT, AND WHICH ARE NOT LIKELY
# TO BE REAL WORDS (OH, ALL RIGHT THEN, IT'S JUST MAGIC!)
A_abbrev = r"""
(?! FJO | [HLMNS]Y. | RY[EO] | SQU
| ( F[LR]? | [HL] | MN? | N | RH? | S[CHKLMNPTVW]? | X(YL)?) [AEIOU])
[FHLMNRSX][A-Z]
"""
# THIS PATTERN CODES THE BEGINNINGS OF ALL ENGLISH WORDS BEGINING WITH A
# 'y' FOLLOWED BY A CONSONANT. ANY OTHER Y-CONSONANT PREFIX THEREFORE
# IMPLIES AN ABBREVIATION.
A_y_cons = 'y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)'
# EXCEPTIONS TO EXCEPTIONS
A_explicit_a = enclose('|'.join((
"unabomber", "unanimous", "US",
)))
A_explicit_an = enclose('|'.join((
"euler",
"hour(?!i)", "heir", "honest", "hono[ur]",
"mpeg",
)))
A_ordinal_an = enclose('|'.join((
"[aefhilmnorsx]-?th",
)))
A_ordinal_a = enclose('|'.join((
"[bcdgjkpqtuvwyz]-?th",
)))
# NUMERICAL INFLECTIONS
nth = {
0: 'th',
1: 'st',
2: 'nd',
3: 'rd',
4: 'th',
5: 'th',
6: 'th',
7: 'th',
8: 'th',
9: 'th',
11: 'th',
12: 'th',
13: 'th',
}
ordinal = dict(ty='tieth',
one='first',
two='second',
three='third',
five='fifth',
eight='eighth',
nine='ninth',
twelve='twelfth')
ordinal_suff = '|'.join(list(ordinal.keys()))
# NUMBERS
unit = ['', 'one', 'two', 'three', 'four', 'five',
'six', 'seven', 'eight', 'nine']
teen = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen',
'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen']
ten = ['', '', 'twenty', 'thirty', 'forty',
'fifty', 'sixty', 'seventy', 'eighty', 'ninety']
mill = [' ', ' thousand', ' million', ' billion', ' trillion', ' quadrillion',
' quintillion', ' sextillion', ' septillion', ' octillion',
' nonillion', ' decillion']
# SUPPORT CLASSICAL PLURALIZATIONS
def_classical = dict(
all=False,
zero=False,
herd=False,
names=True,
persons=False,
ancient=False,
)
all_classical = dict((k, True) for k in list(def_classical.keys()))
no_classical = dict((k, False) for k in list(def_classical.keys()))
# TODO: .inflectrc file does not work
# can't just execute methods from another file like this
# for rcfile in (pathjoin(dirname(__file__), '.inflectrc'),
# expanduser(pathjoin(('~'), '.inflectrc'))):
# if isfile(rcfile):
# try:
# execfile(rcfile)
# except:
# print3("\nBad .inflectrc file (%s):\n" % rcfile)
# raise BadRcFileError
class engine:
def __init__(self):
self.classical_dict = def_classical.copy()
self.persistent_count = None
self.mill_count = 0
self.pl_sb_user_defined = []
self.pl_v_user_defined = []
self.pl_adj_user_defined = []
self.si_sb_user_defined = []
self.A_a_user_defined = []
self.thegender = 'neuter'
deprecated_methods = dict(pl='plural',
plnoun='plural_noun',
plverb='plural_verb',
pladj='plural_adj',
sinoun='single_noun',
prespart='present_participle',
numwords='number_to_words',
plequal='compare',
plnounequal='compare_nouns',
plverbequal='compare_verbs',
pladjequal='compare_adjs',
wordlist='join',
)
def __getattr__(self, meth):
if meth in self.deprecated_methods:
print3('%s() deprecated, use %s()' % (meth, self.deprecated_methods[meth]))
raise DeprecationWarning
raise AttributeError
def defnoun(self, singular, plural):
'''
Set the noun plural of singular to plural.
'''
self.checkpat(singular)
self.checkpatplural(plural)
self.pl_sb_user_defined.extend((singular, plural))
self.si_sb_user_defined.extend((plural, singular))
return 1
def defverb(self, s1, p1, s2, p2, s3, p3):
'''
Set the verb plurals for s1, s2 and s3 to p1, p2 and p3 respectively.
Where 1, 2 and 3 represent the 1st, 2nd and 3rd person forms of the verb.
'''
self.checkpat(s1)
self.checkpat(s2)
self.checkpat(s3)
self.checkpatplural(p1)
self.checkpatplural(p2)
self.checkpatplural(p3)
self.pl_v_user_defined.extend((s1, p1, s2, p2, s3, p3))
return 1
def defadj(self, singular, plural):
'''
Set the adjective plural of singular to plural.
'''
self.checkpat(singular)
self.checkpatplural(plural)
self.pl_adj_user_defined.extend((singular, plural))
return 1
def defa(self, pattern):
'''
Define the indefinate article as 'a' for words matching pattern.
'''
self.checkpat(pattern)
self.A_a_user_defined.extend((pattern, 'a'))
return 1
def defan(self, pattern):
'''
Define the indefinate article as 'an' for words matching pattern.
'''
self.checkpat(pattern)
self.A_a_user_defined.extend((pattern, 'an'))
return 1
def checkpat(self, pattern):
'''
check for errors in a regex pattern
'''
if pattern is None:
return
try:
match(pattern, '')
except reerror:
print3("\nBad user-defined singular pattern:\n\t%s\n" % pattern)
raise BadUserDefinedPatternError
def checkpatplural(self, pattern):
'''
check for errors in a regex replace pattern
'''
return
# can't find a pattern that doesn't pass the following test:
# if pattern is None:
# return
# try:
# resub('', pattern, '')
# except reerror:
# print3("\nBad user-defined plural pattern:\n\t%s\n" % pattern)
# raise BadUserDefinedPatternError
def ud_match(self, word, wordlist):
for i in range(len(wordlist) - 2, -2, -2): # backwards through even elements
mo = search(r'^%s$' % wordlist[i], word, IGNORECASE)
if mo:
if wordlist[i + 1] is None:
return None
pl = resub(r'\$(\d+)', r'\\1', wordlist[i + 1]) # change $n to \n for expand
return mo.expand(pl)
return None
def classical(self, **kwargs):
"""
turn classical mode on and off for various categories
turn on all classical modes:
classical()
classical(all=True)
turn on or off specific claassical modes:
e.g.
classical(herd=True)
classical(names=False)
By default all classical modes are off except names.
unknown value in args or key in kwargs rasies exception: UnknownClasicalModeError
"""
classical_mode = list(def_classical.keys())
if not kwargs:
self.classical_dict = all_classical.copy()
return
if 'all' in kwargs:
if kwargs['all']:
self.classical_dict = all_classical.copy()
else:
self.classical_dict = no_classical.copy()
for k, v in list(kwargs.items()):
if k in classical_mode:
self.classical_dict[k] = v
else:
raise UnknownClassicalModeError
def num(self, count=None, show=None): # (;$count,$show)
'''
Set the number to be used in other method calls.
Returns count.
Set show to False to return '' instead.
'''
if count is not None:
try:
self.persistent_count = int(count)
except ValueError:
raise BadNumValueError
if (show is None) or show:
return str(count)
else:
self.persistent_count = None
return ''
def gender(self, gender):
'''
set the gender for the singular of plural pronouns
can be one of:
'neuter' ('they' -> 'it')
'feminine' ('they' -> 'she')
'masculine' ('they' -> 'he')
'gender-neutral' ('they' -> 'they')
'feminine or masculine' ('they' -> 'she or he')
'masculine or feminine' ('they' -> 'he or she')
'''
if gender in singular_pronoun_genders:
self.thegender = gender
else:
raise BadGenderError
def nummo(self, matchobject):
'''
num but take a matchobject
use groups 1 and 2 in matchobject
'''
return self.num(matchobject.group(1), matchobject.group(2))
def plmo(self, matchobject):
'''
plural but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.plural(matchobject.group(1), matchobject.group(3))
def plnounmo(self, matchobject):
'''
plural_noun but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.plural_noun(matchobject.group(1), matchobject.group(3))
def plverbmo(self, matchobject):
'''
plural_verb but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.plural_verb(matchobject.group(1), matchobject.group(3))
def pladjmo(self, matchobject):
'''
plural_adj but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.plural_adj(matchobject.group(1), matchobject.group(3))
def sinounmo(self, matchobject):
'''
singular_noun but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.singular_noun(matchobject.group(1), matchobject.group(3))
def amo(self, matchobject):
'''
A but take a matchobject
use groups 1 and 3 in matchobject
'''
if matchobject.group(3) is None:
return self.a(matchobject.group(1))
return self.a(matchobject.group(1), matchobject.group(3))
def nomo(self, matchobject):
'''
NO but take a matchobject
use groups 1 and 3 in matchobject
'''
return self.no(matchobject.group(1), matchobject.group(3))
def ordinalmo(self, matchobject):
'''
ordinal but take a matchobject
use group 1
'''
return self.ordinal(matchobject.group(1))
def numwordsmo(self, matchobject):
'''
number_to_words but take a matchobject
use group 1
'''
return self.number_to_words(matchobject.group(1))
def prespartmo(self, matchobject):
'''
prespart but take a matchobject
use group 1
'''
return self.present_participle(matchobject.group(1))
# 0. PERFORM GENERAL INFLECTIONS IN A STRING
def inflect(self, text):
'''
Perform inflections in a string.
e.g. inflect('The plural of cat is plural(cat)') returns
'The plural of cat is cats'
can use plural, plural_noun, plural_verb, plural_adj, singular_noun, a, an, no, ordinal,
number_to_words and prespart
'''
save_persistent_count = self.persistent_count
sections = splitre(r"(num\([^)]*\))", text)
inflection = []
for section in sections:
(section, count) = subn(r"num\(\s*?(?:([^),]*)(?:,([^)]*))?)?\)", self.nummo, section)
if not count:
total = -1
while total:
(section, total) = subn(
r"(?x)\bplural \( ([^),]*) (, ([^)]*) )? \) ",
self.plmo, section)
(section, count) = subn(
r"(?x)\bplural_noun \( ([^),]*) (, ([^)]*) )? \) ",
self.plnounmo, section)
total += count
(section, count) = subn(
r"(?x)\bplural_verb \( ([^),]*) (, ([^)]*) )? \) ",
self.plverbmo, section)
total += count
(section, count) = subn(
r"(?x)\bplural_adj \( ([^),]*) (, ([^)]*) )? \) ",
self.pladjmo, section)
total += count
(section, count) = subn(
r"(?x)\bsingular_noun \( ([^),]*) (, ([^)]*) )? \) ",
self.sinounmo, section)
total += count
(section, count) = subn(
r"(?x)\ban? \( ([^),]*) (, ([^)]*) )? \) ",
self.amo, section)
total += count
(section, count) = subn(
r"(?x)\bno \( ([^),]*) (, ([^)]*) )? \) ",
self.nomo, section)
total += count
(section, count) = subn(
r"(?x)\bordinal \( ([^)]*) \) ",
self.ordinalmo, section)
total += count
(section, count) = subn(
r"(?x)\bnumber_to_words \( ([^)]*) \) ",
self.numwordsmo, section)
total += count
(section, count) = subn(
r"(?x)\bpresent_participle \( ([^)]*) \) ",
self.prespartmo, section)
total += count
inflection.append(section)
self.persistent_count = save_persistent_count
return "".join(inflection)
# ## PLURAL SUBROUTINES
def postprocess(self, orig, inflected):
"""
FIX PEDANTRY AND CAPITALIZATION :-)
"""
if '|' in inflected:
inflected = inflected.split('|')[self.classical_dict['all']]
if orig == "I":
return inflected
if orig == orig.upper():
return inflected.upper()
if orig[0] == orig[0].upper():
return '%s%s' % (inflected[0].upper(),
inflected[1:])
return inflected
def partition_word(self, text):
mo = search(r'\A(\s*)(.+?)(\s*)\Z', text)
try:
return mo.group(1), mo.group(2), mo.group(3)
except AttributeError: # empty string
return '', '', ''
# def pl(self, *args, **kwds):
# print 'pl() deprecated, use plural()'
# raise DeprecationWarning
# return self.plural(*args, **kwds)
#
# def plnoun(self, *args, **kwds):
# print 'plnoun() deprecated, use plural_noun()'
# raise DeprecationWarning
# return self.plural_noun(*args, **kwds)
#
# def plverb(self, *args, **kwds):
# print 'plverb() deprecated, use plural_verb()'
# raise DeprecationWarning
# return self.plural_verb(*args, **kwds)
#
# def pladj(self, *args, **kwds):
# print 'pladj() deprecated, use plural_adj()'
# raise DeprecationWarning
# return self.plural_adj(*args, **kwds)
#
# def sinoun(self, *args, **kwds):
# print 'sinoun() deprecated, use singular_noun()'
# raise DeprecationWarning
# return self.singular_noun(*args, **kwds)
#
# def prespart(self, *args, **kwds):
# print 'prespart() deprecated, use present_participle()'
# raise DeprecationWarning
# return self.present_participle(*args, **kwds)
#
# def numwords(self, *args, **kwds):
# print 'numwords() deprecated, use number_to_words()'
# raise DeprecationWarning
# return self.number_to_words(*args, **kwds)
def plural(self, text, count=None):
'''
Return the plural of text.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
'''
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(
word,
self._pl_special_adjective(word, count) or
self._pl_special_verb(word, count) or
self._plnoun(word, count))
return "%s%s%s" % (pre, plural, post)
def plural_noun(self, text, count=None):
'''
Return the plural of text, where text is a noun.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
'''
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._plnoun(word, count))
return "%s%s%s" % (pre, plural, post)
def plural_verb(self, text, count=None):
'''
Return the plural of text, where text is a verb.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
'''
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._pl_special_verb(word, count) or
self._pl_general_verb(word, count))
return "%s%s%s" % (pre, plural, post)
def plural_adj(self, text, count=None):
'''
Return the plural of text, where text is an adjective.
If count supplied, then return text if count is one of:
1, a, an, one, each, every, this, that
otherwise return the plural.
Whitespace at the start and end is preserved.
'''
pre, word, post = self.partition_word(text)
if not word:
return text
plural = self.postprocess(word, self._pl_special_adjective(word, count) or word)
return "%s%s%s" % (pre, plural, post)
def compare(self, word1, word2):
'''
compare word1 and word2 for equality regardless of plurality
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
'''
return (
self._plequal(word1, word2, self.plural_noun) or
self._plequal(word1, word2, self.plural_verb) or
self._plequal(word1, word2, self.plural_adj))
def compare_nouns(self, word1, word2):
'''
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as nouns
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
'''
return self._plequal(word1, word2, self.plural_noun)
def compare_verbs(self, word1, word2):
'''
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as verbs
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
'''
return self._plequal(word1, word2, self.plural_verb)
def compare_adjs(self, word1, word2):
'''
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as adjectives
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
'''
return self._plequal(word1, word2, self.plural_adj)
def singular_noun(self, text, count=None, gender=None):
'''
Return the singular of text, where text is a plural noun.
If count supplied, then return the singular if count is one of:
1, a, an, one, each, every, this, that or if count is None
otherwise return text unchanged.
Whitespace at the start and end is preserved.
'''
pre, word, post = self.partition_word(text)
if not word:
return text
sing = self._sinoun(word, count=count, gender=gender)
if sing is not False:
plural = self.postprocess(word, self._sinoun(word, count=count, gender=gender))
return "%s%s%s" % (pre, plural, post)
return False
def _plequal(self, word1, word2, pl):
classval = self.classical_dict.copy()
self.classical_dict = all_classical.copy()
if word1 == word2:
return "eq"
if word1 == pl(word2):
return "p:s"
if pl(word1) == word2:
return "s:p"
self.classical_dict = no_classical.copy()
if word1 == pl(word2):
return "p:s"
if pl(word1) == word2:
return "s:p"
self.classical_dict = classval.copy()
if pl == self.plural or pl == self.plural_noun:
if self._pl_check_plurals_N(word1, word2):
return "p:p"
if self._pl_check_plurals_N(word2, word1):
return "p:p"
if pl == self.plural or pl == self.plural_adj:
if self._pl_check_plurals_adj(word1, word2):
return "p:p"
return False
def _pl_reg_plurals(self, pair, stems, end1, end2):
if search(r"(%s)(%s\|\1%s|%s\|\1%s)" % (stems, end1, end2, end2, end1), pair):
return True
return False
def _pl_check_plurals_N(self, word1, word2):
pair = "%s|%s" % (word1, word2)
if pair in list(pl_sb_irregular_s.values()):
return True
if pair in list(pl_sb_irregular.values()):
return True
if pair in list(pl_sb_irregular_caps.values()):
return True
for (stems, end1, end2) in (
(pl_sb_C_a_ata, "as", "ata"),
(pl_sb_C_is_ides, "is", "ides"),
(pl_sb_C_a_ae, "s", "e"),
(pl_sb_C_en_ina, "ens", "ina"),
(pl_sb_C_um_a, "ums", "a"),
(pl_sb_C_us_i, "uses", "i"),
(pl_sb_C_on_a, "ons", "a"),
(pl_sb_C_o_i_stems, "os", "i"),
(pl_sb_C_ex_ices, "exes", "ices"),
(pl_sb_C_ix_ices, "ixes", "ices"),
(pl_sb_C_i, "s", "i"),
(pl_sb_C_im, "s", "im"),
('.*eau', "s", "x"),
('.*ieu', "s", "x"),
('.*tri', "xes", "ces"),
('.{2,}[yia]n', "xes", "ges")
):
if self._pl_reg_plurals(pair, stems, end1, end2):
return True
return False
def _pl_check_plurals_adj(self, word1, word2):
# VERSION: tuple in endswith requires python 2.5
word1a = word1[:word1.rfind("'")] if word1.endswith(("'s", "'")) else ''
word2a = word2[:word2.rfind("'")] if word2.endswith(("'s", "'")) else ''
# TODO: BUG? report upstream. I don't think you should chop off the s'
# word1b = word1[:-2] if word1.endswith("s'") else ''
# word2b = word2[:-2] if word2.endswith("s'") else ''
# TODO: dresses', dresses's -> dresses, dresses when chop off letters
# then they return False because they are the same. Need to fix this.
if word1a:
if word2a and (self._pl_check_plurals_N(word1a, word2a)
or self._pl_check_plurals_N(word2a, word1a)):
return True
# if word2b and ( self._pl_check_plurals_N(word1a, word2b)
# or self._pl_check_plurals_N(word2b, word1a) ):
# return True
# if word1b:
# if word2a and ( self._pl_check_plurals_N(word1b, word2a)
# or self._pl_check_plurals_N(word2a, word1b) ):
# return True
# if word2b and ( self._pl_check_plurals_N(word1b, word2b)
# or self._pl_check_plurals_N(word2b, word1b) ):
# return True
return False
def get_count(self, count=None):
if count is None and self.persistent_count is not None:
count = self.persistent_count
if count is not None:
count = 1 if ((str(count) in pl_count_one) or
(self.classical_dict['zero'] and str(count).lower() in pl_count_zero)) else 2
else:
count = ''
return count
# @profile
def _plnoun(self, word, count=None):
count = self.get_count(count)
# DEFAULT TO PLURAL
if count == 1:
return word
# HANDLE USER-DEFINED NOUNS
value = self.ud_match(word, self.pl_sb_user_defined)
if value is not None:
return value
# HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS
if word == '':
return word
lowerword = word.lower()
if lowerword in pl_sb_uninflected_complete:
return word
if word in pl_sb_uninflected_caps:
return word
for k, v in pl_sb_uninflected_bysize.items():
if lowerword[-k:] in v:
return word
if (self.classical_dict['herd'] and lowerword in pl_sb_uninflected_herd):
return word
# HANDLE COMPOUNDS ("Governor General", "mother-in-law", "aide-de-camp", ETC.)
mo = search(r"^(?:%s)$" % pl_sb_postfix_adj_stems, word, IGNORECASE)
if mo and mo.group(2) != '':
return "%s%s" % (self._plnoun(mo.group(1), 2), mo.group(2))
if ' a ' in lowerword or '-a-' in lowerword:
mo = search(r"^(?:%s)$" % pl_sb_prep_dual_compound, word, IGNORECASE)
if mo and mo.group(2) != '' and mo.group(3) != '':
return "%s%s%s" % (self._plnoun(mo.group(1), 2),
mo.group(2),
self._plnoun(mo.group(3)))
lowersplit = lowerword.split(' ')
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return ' '.join(
lowersplit[:numword - 1] +
[self._plnoun(lowersplit[numword - 1], 2)] + lowersplit[numword:])
lowersplit = lowerword.split('-')
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return ' '.join(
lowersplit[:numword - 1] +
[self._plnoun(lowersplit[numword - 1], 2) +
'-' + lowersplit[numword] + '-']) + ' '.join(lowersplit[(numword + 1):])
# HANDLE PRONOUNS
for k, v in pl_pron_acc_keys_bysize.items():
if lowerword[-k:] in v: # ends with accusivate pronoun
for pk, pv in pl_prep_bysize.items():
if lowerword[:pk] in pv: # starts with a prep
if lowerword.split() == [lowerword[:pk], lowerword[-k:]]: # only whitespace in between
return lowerword[:-k] + pl_pron_acc[lowerword[-k:]]
try:
return pl_pron_nom[word.lower()]
except KeyError:
pass
try:
return pl_pron_acc[word.lower()]
except KeyError:
pass
# HANDLE ISOLATED IRREGULAR PLURALS
wordsplit = word.split()
wordlast = wordsplit[-1]
lowerwordlast = wordlast.lower()
if wordlast in list(pl_sb_irregular_caps.keys()):
llen = len(wordlast)
return '%s%s' % (word[:-llen],
pl_sb_irregular_caps[wordlast])
if lowerwordlast in list(pl_sb_irregular.keys()):
llen = len(lowerwordlast)
return '%s%s' % (word[:-llen],
pl_sb_irregular[lowerwordlast])
if (' '.join(wordsplit[-2:])).lower() in list(pl_sb_irregular_compound.keys()):
llen = len(' '.join(wordsplit[-2:])) # TODO: what if 2 spaces between these words?
return '%s%s' % (word[:-llen],
pl_sb_irregular_compound[(' '.join(wordsplit[-2:])).lower()])
if lowerword[-3:] == 'quy':
return word[:-1] + 'ies'
if lowerword[-6:] == 'person':
if self.classical_dict['persons']:
return word + 's'
else:
return word[:-4] + 'ople'
# HANDLE FAMILIES OF IRREGULAR PLURALS
if lowerword[-3:] == 'man':
for k, v in pl_sb_U_man_mans_bysize.items():
if lowerword[-k:] in v:
return word + 's'
for k, v in pl_sb_U_man_mans_caps_bysize.items():
if word[-k:] in v:
return word + 's'
return word[:-3] + 'men'
if lowerword[-5:] == 'mouse':
return word[:-5] + 'mice'
if lowerword[-5:] == 'louse':
return word[:-5] + 'lice'
if lowerword[-5:] == 'goose':
return word[:-5] + 'geese'
if lowerword[-5:] == 'tooth':
return word[:-5] + 'teeth'
if lowerword[-4:] == 'foot':
return word[:-4] + 'feet'
if lowerword == 'die':
return 'dice'
# HANDLE UNASSIMILATED IMPORTS
if lowerword[-4:] == 'ceps':
return word
if lowerword[-4:] == 'zoon':
return word[:-2] + 'a'
if lowerword[-3:] in ('cis', 'sis', 'xis'):
return word[:-2] + 'es'
for lastlet, d, numend, post in (
('h', pl_sb_U_ch_chs_bysize, None, 's'),
('x', pl_sb_U_ex_ices_bysize, -2, 'ices'),
('x', pl_sb_U_ix_ices_bysize, -2, 'ices'),
('m', pl_sb_U_um_a_bysize, -2, 'a'),
('s', pl_sb_U_us_i_bysize, -2, 'i'),
('n', pl_sb_U_on_a_bysize, -2, 'a'),
('a', pl_sb_U_a_ae_bysize, None, 'e'),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE INCOMPLETELY ASSIMILATED IMPORTS
if (self.classical_dict['ancient']):
if lowerword[-4:] == 'trix':
return word[:-1] + 'ces'
if lowerword[-3:] in ('eau', 'ieu'):
return word + 'x'
if lowerword[-3:] in ('ynx', 'inx', 'anx') and len(word) > 4:
return word[:-1] + 'ges'
for lastlet, d, numend, post in (
('n', pl_sb_C_en_ina_bysize, -2, 'ina'),
('x', pl_sb_C_ex_ices_bysize, -2, 'ices'),
('x', pl_sb_C_ix_ices_bysize, -2, 'ices'),
('m', pl_sb_C_um_a_bysize, -2, 'a'),
('s', pl_sb_C_us_i_bysize, -2, 'i'),
('s', pl_sb_C_us_us_bysize, None, ''),
('a', pl_sb_C_a_ae_bysize, None, 'e'),
('a', pl_sb_C_a_ata_bysize, None, 'ta'),
('s', pl_sb_C_is_ides_bysize, -1, 'des'),
('o', pl_sb_C_o_i_bysize, -1, 'i'),
('n', pl_sb_C_on_a_bysize, -2, 'a'),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
for d, numend, post in (
(pl_sb_C_i_bysize, None, 'i'),
(pl_sb_C_im_bysize, None, 'im'),
):
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS
if lowerword in pl_sb_singular_s_complete:
return word + 'es'
for k, v in pl_sb_singular_s_bysize.items():
if lowerword[-k:] in v:
return word + 'es'
if lowerword[-2:] == 'es' and word[0] == word[0].upper():
return word + 'es'
# Wouldn't special words
# ending with 's' always have been caught, regardless of them starting
# with a capital letter (i.e. being names)
# It makes sense below to do this for words ending in 'y' so that
# Sally -> Sallys. But not sure it makes sense here. Where is the case
# of a word ending in s that is caught here and would otherwise have been
# caught below?
#
# removing it as I can't find a case that executes it
# TODO: check this again
#
# if (self.classical_dict['names']):
# mo = search(r"([A-Z].*s)$", word)
# if mo:
# return "%ses" % mo.group(1)
if lowerword[-1] == 'z':
for k, v in pl_sb_z_zes_bysize.items():
if lowerword[-k:] in v:
return word + 'es'
if lowerword[-2:-1] != 'z':
return word + 'zes'
if lowerword[-2:] == 'ze':
for k, v in pl_sb_ze_zes_bysize.items():
if lowerword[-k:] in v:
return word + 's'
if lowerword[-2:] in ('ch', 'sh', 'zz', 'ss') or lowerword[-1] == 'x':
return word + 'es'
# ## (r"(.*)(us)$", "%s%ses"), TODO: why is this commented?
# HANDLE ...f -> ...ves
if lowerword[-3:] in ('elf', 'alf', 'olf'):
return word[:-1] + 'ves'
if lowerword[-3:] == 'eaf' and lowerword[-4:-3] != 'd':
return word[:-1] + 'ves'
if lowerword[-4:] in ('nife', 'life', 'wife'):
return word[:-2] + 'ves'
if lowerword[-3:] == 'arf':
return word[:-1] + 'ves'
# HANDLE ...y
if lowerword[-1] == 'y':
if lowerword[-2:-1] in 'aeiou' or len(word) == 1:
return word + 's'
if (self.classical_dict['names']):
if lowerword[-1] == 'y' and word[0] == word[0].upper():
return word + 's'
return word[:-1] + 'ies'
# HANDLE ...o
if lowerword in pl_sb_U_o_os_complete:
return word + 's'
for k, v in pl_sb_U_o_os_bysize.items():
if lowerword[-k:] in v:
return word + 's'
if lowerword[-2:] in ('ao', 'eo', 'io', 'oo', 'uo'):
return word + 's'
if lowerword[-1] == 'o':
return word + 'es'
# OTHERWISE JUST ADD ...s
return "%ss" % word
def _pl_special_verb(self, word, count=None):
if (self.classical_dict['zero'] and
str(count).lower() in pl_count_zero):
return False
count = self.get_count(count)
if count == 1:
return word
# HANDLE USER-DEFINED VERBS
value = self.ud_match(word, self.pl_v_user_defined)
if value is not None:
return value
# HANDLE IRREGULAR PRESENT TENSE (SIMPLE AND COMPOUND)
lowerword = word.lower()
try:
firstword = lowerword.split()[0]
except IndexError:
return False # word is ''
if firstword in list(plverb_irregular_pres.keys()):
return "%s%s" % (plverb_irregular_pres[firstword], word[len(firstword):])
# HANDLE IRREGULAR FUTURE, PRETERITE AND PERFECT TENSES
if firstword in plverb_irregular_non_pres:
return word
# HANDLE PRESENT NEGATIONS (SIMPLE AND COMPOUND)
if firstword.endswith("n't") and firstword[:-3] in list(plverb_irregular_pres.keys()):
return "%sn't%s" % (plverb_irregular_pres[firstword[:-3]], word[len(firstword):])
if firstword.endswith("n't"):
return word
# HANDLE SPECIAL CASES
mo = search(r"^(%s)$" % plverb_special_s, word)
if mo:
return False
if search(r"\s", word):
return False
if lowerword == 'quizzes':
return 'quiz'
# HANDLE STANDARD 3RD PERSON (CHOP THE ...(e)s OFF SINGLE WORDS)
if lowerword[-4:] in ('ches', 'shes', 'zzes', 'sses') or \
lowerword[-3:] == 'xes':
return word[:-2]
# # mo = search(r"^(.*)([cs]h|[x]|zz|ss)es$",
# # word, IGNORECASE)
# # if mo:
# # return "%s%s" % (mo.group(1), mo.group(2))
if lowerword[-3:] == 'ies' and len(word) > 3:
return lowerword[:-3] + 'y'
if (lowerword in pl_v_oes_oe or
lowerword[-4:] in pl_v_oes_oe_endings_size4 or
lowerword[-5:] in pl_v_oes_oe_endings_size5):
return word[:-1]
if lowerword.endswith('oes') and len(word) > 3:
return lowerword[:-2]
mo = search(r"^(.*[^s])s$", word, IGNORECASE)
if mo:
return mo.group(1)
# OTHERWISE, A REGULAR VERB (HANDLE ELSEWHERE)
return False
def _pl_general_verb(self, word, count=None):
count = self.get_count(count)
if count == 1:
return word
# HANDLE AMBIGUOUS PRESENT TENSES (SIMPLE AND COMPOUND)
mo = search(r"^(%s)((\s.*)?)$" % plverb_ambiguous_pres_keys, word, IGNORECASE)
if mo:
return "%s%s" % (plverb_ambiguous_pres[mo.group(1).lower()], mo.group(2))
# HANDLE AMBIGUOUS PRETERITE AND PERFECT TENSES
mo = search(r"^(%s)((\s.*)?)$" % plverb_ambiguous_non_pres, word, IGNORECASE)
if mo:
return word
# OTHERWISE, 1st OR 2ND PERSON IS UNINFLECTED
return word
def _pl_special_adjective(self, word, count=None):
count = self.get_count(count)
if count == 1:
return word
# HANDLE USER-DEFINED ADJECTIVES
value = self.ud_match(word, self.pl_adj_user_defined)
if value is not None:
return value
# HANDLE KNOWN CASES
mo = search(r"^(%s)$" % pl_adj_special_keys,
word, IGNORECASE)
if mo:
return "%s" % (pl_adj_special[mo.group(1).lower()])
# HANDLE POSSESSIVES
mo = search(r"^(%s)$" % pl_adj_poss_keys,
word, IGNORECASE)
if mo:
return "%s" % (pl_adj_poss[mo.group(1).lower()])
mo = search(r"^(.*)'s?$",
word)
if mo:
pl = self.plural_noun(mo.group(1))
trailing_s = "" if pl[-1] == 's' else "s"
return "%s'%s" % (pl, trailing_s)
# OTHERWISE, NO IDEA
return False
# @profile
def _sinoun(self, word, count=None, gender=None):
count = self.get_count(count)
# DEFAULT TO PLURAL
if count == 2:
return word
# SET THE GENDER
try:
if gender is None:
gender = self.thegender
elif gender not in singular_pronoun_genders:
raise BadGenderError
except (TypeError, IndexError):
raise BadGenderError
# HANDLE USER-DEFINED NOUNS
value = self.ud_match(word, self.si_sb_user_defined)
if value is not None:
return value
# HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS
if word == '':
return word
lowerword = word.lower()
if word in si_sb_ois_oi_case:
return word[:-1]
if lowerword in pl_sb_uninflected_complete:
return word
if word in pl_sb_uninflected_caps:
return word
for k, v in pl_sb_uninflected_bysize.items():
if lowerword[-k:] in v:
return word
if (self.classical_dict['herd'] and lowerword in pl_sb_uninflected_herd):
return word
# HANDLE COMPOUNDS ("Governor General", "mother-in-law", "aide-de-camp", ETC.)
mo = search(r"^(?:%s)$" % pl_sb_postfix_adj_stems, word, IGNORECASE)
if mo and mo.group(2) != '':
return "%s%s" % (self._sinoun(mo.group(1), 1, gender=gender), mo.group(2))
# how to reverse this one?
# mo = search(r"^(?:%s)$" % pl_sb_prep_dual_compound, word, IGNORECASE)
# if mo and mo.group(2) != '' and mo.group(3) != '':
# return "%s%s%s" % (self._sinoun(mo.group(1), 1),
# mo.group(2),
# self._sinoun(mo.group(3), 1))
lowersplit = lowerword.split(' ')
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return ' '.join(lowersplit[:numword - 1] +
[self._sinoun(lowersplit[numword - 1], 1, gender=gender) or
lowersplit[numword - 1]] + lowersplit[numword:])
lowersplit = lowerword.split('-')
if len(lowersplit) >= 3:
for numword in range(1, len(lowersplit) - 1):
if lowersplit[numword] in pl_prep_list_da:
return ' '.join(
lowersplit[:numword - 1] +
[(self._sinoun(lowersplit[numword - 1], 1, gender=gender) or lowersplit[numword - 1]) +
'-' + lowersplit[numword] + '-']) + ' '.join(lowersplit[(numword + 1):])
# HANDLE PRONOUNS
for k, v in si_pron_acc_keys_bysize.items():
if lowerword[-k:] in v: # ends with accusivate pronoun
for pk, pv in pl_prep_bysize.items():
if lowerword[:pk] in pv: # starts with a prep
if lowerword.split() == [lowerword[:pk], lowerword[-k:]]: # only whitespace in between
return lowerword[:-k] + get_si_pron('acc', lowerword[-k:], gender)
try:
return get_si_pron('nom', word.lower(), gender)
except KeyError:
pass
try:
return get_si_pron('acc', word.lower(), gender)
except KeyError:
pass
# HANDLE ISOLATED IRREGULAR PLURALS
wordsplit = word.split()
wordlast = wordsplit[-1]
lowerwordlast = wordlast.lower()
if wordlast in list(si_sb_irregular_caps.keys()):
llen = len(wordlast)
return '%s%s' % (word[:-llen],
si_sb_irregular_caps[wordlast])
if lowerwordlast in list(si_sb_irregular.keys()):
llen = len(lowerwordlast)
return '%s%s' % (word[:-llen],
si_sb_irregular[lowerwordlast])
if (' '.join(wordsplit[-2:])).lower() in list(si_sb_irregular_compound.keys()):
llen = len(' '.join(wordsplit[-2:])) # TODO: what if 2 spaces between these words?
return '%s%s' % (word[:-llen],
si_sb_irregular_compound[(' '.join(wordsplit[-2:])).lower()])
if lowerword[-5:] == 'quies':
return word[:-3] + 'y'
if lowerword[-7:] == 'persons':
return word[:-1]
if lowerword[-6:] == 'people':
return word[:-4] + 'rson'
# HANDLE FAMILIES OF IRREGULAR PLURALS
if lowerword[-4:] == 'mans':
for k, v in si_sb_U_man_mans_bysize.items():
if lowerword[-k:] in v:
return word[:-1]
for k, v in si_sb_U_man_mans_caps_bysize.items():
if word[-k:] in v:
return word[:-1]
if lowerword[-3:] == 'men':
return word[:-3] + 'man'
if lowerword[-4:] == 'mice':
return word[:-4] + 'mouse'
if lowerword[-4:] == 'lice':
return word[:-4] + 'louse'
if lowerword[-5:] == 'geese':
return word[:-5] + 'goose'
if lowerword[-5:] == 'teeth':
return word[:-5] + 'tooth'
if lowerword[-4:] == 'feet':
return word[:-4] + 'foot'
if lowerword == 'dice':
return 'die'
# HANDLE UNASSIMILATED IMPORTS
if lowerword[-4:] == 'ceps':
return word
if lowerword[-3:] == 'zoa':
return word[:-1] + 'on'
for lastlet, d, numend, post in (
('s', si_sb_U_ch_chs_bysize, -1, ''),
('s', si_sb_U_ex_ices_bysize, -4, 'ex'),
('s', si_sb_U_ix_ices_bysize, -4, 'ix'),
('a', si_sb_U_um_a_bysize, -1, 'um'),
('i', si_sb_U_us_i_bysize, -1, 'us'),
('a', si_sb_U_on_a_bysize, -1, 'on'),
('e', si_sb_U_a_ae_bysize, -1, ''),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE INCOMPLETELY ASSIMILATED IMPORTS
if (self.classical_dict['ancient']):
if lowerword[-6:] == 'trices':
return word[:-3] + 'x'
if lowerword[-4:] in ('eaux', 'ieux'):
return word[:-1]
if lowerword[-5:] in ('ynges', 'inges', 'anges') and len(word) > 6:
return word[:-3] + 'x'
for lastlet, d, numend, post in (
('a', si_sb_C_en_ina_bysize, -3, 'en'),
('s', si_sb_C_ex_ices_bysize, -4, 'ex'),
('s', si_sb_C_ix_ices_bysize, -4, 'ix'),
('a', si_sb_C_um_a_bysize, -1, 'um'),
('i', si_sb_C_us_i_bysize, -1, 'us'),
('s', pl_sb_C_us_us_bysize, None, ''),
('e', si_sb_C_a_ae_bysize, -1, ''),
('a', si_sb_C_a_ata_bysize, -2, ''),
('s', si_sb_C_is_ides_bysize, -3, 's'),
('i', si_sb_C_o_i_bysize, -1, 'o'),
('a', si_sb_C_on_a_bysize, -1, 'on'),
('m', si_sb_C_im_bysize, -2, ''),
('i', si_sb_C_i_bysize, -1, ''),
):
if lowerword[-1] == lastlet: # this test to add speed
for k, v in d.items():
if lowerword[-k:] in v:
return word[:numend] + post
# HANDLE PLURLS ENDING IN uses -> use
if (lowerword[-6:] == 'houses' or
word in si_sb_uses_use_case or
lowerword in si_sb_uses_use):
return word[:-1]
# HANDLE PLURLS ENDING IN ies -> ie
if word in si_sb_ies_ie_case or lowerword in si_sb_ies_ie:
return word[:-1]
# HANDLE PLURLS ENDING IN oes -> oe
if (lowerword[-5:] == 'shoes' or
word in si_sb_oes_oe_case or
lowerword in si_sb_oes_oe):
return word[:-1]
# HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS
if (word in si_sb_sses_sse_case or
lowerword in si_sb_sses_sse):
return word[:-1]
if lowerword in si_sb_singular_s_complete:
return word[:-2]
for k, v in si_sb_singular_s_bysize.items():
if lowerword[-k:] in v:
return word[:-2]
if lowerword[-4:] == 'eses' and word[0] == word[0].upper():
return word[:-2]
# Wouldn't special words
# ending with 's' always have been caught, regardless of them starting
# with a capital letter (i.e. being names)
# It makes sense below to do this for words ending in 'y' so that
# Sally -> Sallys. But not sure it makes sense here. Where is the case
# of a word ending in s that is caught here and would otherwise have been
# caught below?
#
# removing it as I can't find a case that executes it
# TODO: check this again
#
# if (self.classical_dict['names']):
# mo = search(r"([A-Z].*ses)$", word)
# if mo:
# return "%s" % mo.group(1)
if lowerword in si_sb_z_zes:
return word[:-2]
if lowerword in si_sb_zzes_zz:
return word[:-2]
if lowerword[-4:] == 'zzes':
return word[:-3]
if (word in si_sb_ches_che_case or
lowerword in si_sb_ches_che):
return word[:-1]
if lowerword[-4:] in ('ches', 'shes'):
return word[:-2]
if lowerword in si_sb_xes_xe:
return word[:-1]
if lowerword[-3:] == 'xes':
return word[:-2]
# (r"(.*)(us)es$", "%s%s"), TODO: why is this commented?
# HANDLE ...f -> ...ves
if (word in si_sb_ves_ve_case or
lowerword in si_sb_ves_ve):
return word[:-1]
if lowerword[-3:] == 'ves':
if lowerword[-5:-3] in ('el', 'al', 'ol'):
return word[:-3] + 'f'
if lowerword[-5:-3] == 'ea' and word[-6:-5] != 'd':
return word[:-3] + 'f'
if lowerword[-5:-3] in ('ni', 'li', 'wi'):
return word[:-3] + 'fe'
if lowerword[-5:-3] == 'ar':
return word[:-3] + 'f'
# HANDLE ...y
if lowerword[-2:] == 'ys':
if len(lowerword) > 2 and lowerword[-3] in 'aeiou':
return word[:-1]
if (self.classical_dict['names']):
if lowerword[-2:] == 'ys' and word[0] == word[0].upper():
return word[:-1]
if lowerword[-3:] == 'ies':
return word[:-3] + 'y'
# HANDLE ...o
if lowerword[-2:] == 'os':
if lowerword in si_sb_U_o_os_complete:
return word[:-1]
for k, v in si_sb_U_o_os_bysize.items():
if lowerword[-k:] in v:
return word[:-1]
if lowerword[-3:] in ('aos', 'eos', 'ios', 'oos', 'uos'):
return word[:-1]
if lowerword[-3:] == 'oes':
return word[:-2]
# UNASSIMILATED IMPORTS FINAL RULE
if word in si_sb_es_is:
return word[:-2] + 'is'
# OTHERWISE JUST REMOVE ...s
if lowerword[-1] == 's':
return word[:-1]
# COULD NOT FIND SINGULAR
return False
# ADJECTIVES
def a(self, text, count=1):
'''
Return the appropriate indefinite article followed by text.
The indefinite article is either 'a' or 'an'.
If count is not one, then return count followed by text
instead of 'a' or 'an'.
Whitespace at the start and end is preserved.
'''
mo = search(r"\A(\s*)(?:an?\s+)?(.+?)(\s*)\Z",
text, IGNORECASE)
if mo:
word = mo.group(2)
if not word:
return text
pre = mo.group(1)
post = mo.group(3)
result = self._indef_article(word, count)
return "%s%s%s" % (pre, result, post)
return ''
an = a
def _indef_article(self, word, count):
mycount = self.get_count(count)
if mycount != 1:
return "%s %s" % (count, word)
# HANDLE USER-DEFINED VARIANTS
value = self.ud_match(word, self.A_a_user_defined)
if value is not None:
return "%s %s" % (value, word)
# HANDLE ORDINAL FORMS
for a in (
(r"^(%s)" % A_ordinal_a, "a"),
(r"^(%s)" % A_ordinal_an, "an"),
):
mo = search(a[0], word, IGNORECASE)
if mo:
return "%s %s" % (a[1], word)
# HANDLE SPECIAL CASES
for a in (
(r"^(%s)" % A_explicit_an, "an"),
(r"^[aefhilmnorsx]$", "an"),
(r"^[bcdgjkpqtuvwyz]$", "a"),
):
mo = search(a[0], word, IGNORECASE)
if mo:
return "%s %s" % (a[1], word)
# HANDLE ABBREVIATIONS
for a in (
(r"(%s)" % A_abbrev, "an", VERBOSE),
(r"^[aefhilmnorsx][.-]", "an", IGNORECASE),
(r"^[a-z][.-]", "a", IGNORECASE),
):
mo = search(a[0], word, a[2])
if mo:
return "%s %s" % (a[1], word)
# HANDLE CONSONANTS
mo = search(r"^[^aeiouy]", word, IGNORECASE)
if mo:
return "a %s" % word
# HANDLE SPECIAL VOWEL-FORMS
for a in (
(r"^e[uw]", "a"),
(r"^onc?e\b", "a"),
(r"^onetime\b", "a"),
(r"^uni([^nmd]|mo)", "a"),
(r"^u[bcfghjkqrst][aeiou]", "a"),
(r"^ukr", "a"),
(r"^(%s)" % A_explicit_a, "a"),
):
mo = search(a[0], word, IGNORECASE)
if mo:
return "%s %s" % (a[1], word)
# HANDLE SPECIAL CAPITALS
mo = search(r"^U[NK][AIEO]?", word)
if mo:
return "a %s" % word
# HANDLE VOWELS
mo = search(r"^[aeiou]", word, IGNORECASE)
if mo:
return "an %s" % word
# HANDLE y... (BEFORE CERTAIN CONSONANTS IMPLIES (UNNATURALIZED) "i.." SOUND)
mo = search(r"^(%s)" % A_y_cons, word, IGNORECASE)
if mo:
return "an %s" % word
# OTHERWISE, GUESS "a"
return "a %s" % word
# 2. TRANSLATE ZERO-QUANTIFIED $word TO "no plural($word)"
def no(self, text, count=None):
'''
If count is 0, no, zero or nil, return 'no' followed by the plural
of text.
If count is one of:
1, a, an, one, each, every, this, that
return count followed by text.
Otherwise return count follow by the plural of text.
In the return value count is always followed by a space.
Whitespace at the start and end is preserved.
'''
if count is None and self.persistent_count is not None:
count = self.persistent_count
if count is None:
count = 0
mo = search(r"\A(\s*)(.+?)(\s*)\Z", text)
pre = mo.group(1)
word = mo.group(2)
post = mo.group(3)
if str(count).lower() in pl_count_zero:
return "%sno %s%s" % (pre, self.plural(word, 0), post)
else:
return "%s%s %s%s" % (pre, count, self.plural(word, count), post)
# PARTICIPLES
def present_participle(self, word):
'''
Return the present participle for word.
word is the 3rd person singular verb.
'''
plv = self.plural_verb(word, 2)
for pat, repl in (
(r"ie$", r"y"),
(r"ue$", r"u"), # TODO: isn't ue$ -> u encompassed in the following rule?
(r"([auy])e$", r"\g<1>"),
(r"ski$", r"ski"),
(r"[^b]i$", r""),
(r"^(are|were)$", r"be"),
(r"^(had)$", r"hav"),
(r"^(hoe)$", r"\g<1>"),
(r"([^e])e$", r"\g<1>"),
(r"er$", r"er"),
(r"([^aeiou][aeiouy]([bdgmnprst]))$", "\g<1>\g<2>"),
):
(ans, num) = subn(pat, repl, plv)
if num:
return "%sing" % ans
return "%sing" % ans
# NUMERICAL INFLECTIONS
def ordinal(self, num):
'''
Return the ordinal of num.
num can be an integer or text
e.g. ordinal(1) returns '1st'
ordinal('one') returns 'first'
'''
if match(r"\d", str(num)):
try:
num % 2
n = num
except TypeError:
if '.' in str(num):
try:
n = int(num[-1]) # numbers after decimal, so only need last one for ordinal
except ValueError: # ends with '.', so need to use whole string
n = int(num[:-1])
else:
n = int(num)
try:
post = nth[n % 100]
except KeyError:
post = nth[n % 10]
return "%s%s" % (num, post)
else:
mo = search(r"(%s)\Z" % ordinal_suff, num)
try:
post = ordinal[mo.group(1)]
return resub(r"(%s)\Z" % ordinal_suff, post, num)
except AttributeError:
return "%sth" % num
def millfn(self, ind=0):
if ind > len(mill) - 1:
print3("number out of range")
raise NumOutOfRangeError
return mill[ind]
def unitfn(self, units, mindex=0):
return "%s%s" % (unit[units], self.millfn(mindex))
def tenfn(self, tens, units, mindex=0):
if tens != 1:
return "%s%s%s%s" % (ten[tens],
'-' if tens and units else '',
unit[units],
self.millfn(mindex))
return "%s%s" % (teen[units], mill[mindex])
def hundfn(self, hundreds, tens, units, mindex):
if hundreds:
return "%s hundred%s%s%s, " % (unit[hundreds], # use unit not unitfn as simpler
" %s " % self.number_args['andword'] if tens or units else '',
self.tenfn(tens, units),
self.millfn(mindex))
if tens or units:
return "%s%s, " % (self.tenfn(tens, units), self.millfn(mindex))
return ''
def group1sub(self, mo):
units = int(mo.group(1))
if units == 1:
return " %s, " % self.number_args['one']
elif units:
# TODO: bug one and zero are padded with a space but other numbers aren't. check this in perl
return "%s, " % unit[units]
else:
return " %s, " % self.number_args['zero']
def group1bsub(self, mo):
units = int(mo.group(1))
if units:
# TODO: bug one and zero are padded with a space but other numbers aren't. check this in perl
return "%s, " % unit[units]
else:
return " %s, " % self.number_args['zero']
def group2sub(self, mo):
tens = int(mo.group(1))
units = int(mo.group(2))
if tens:
return "%s, " % self.tenfn(tens, units)
if units:
return " %s %s, " % (self.number_args['zero'], unit[units])
return " %s %s, " % (self.number_args['zero'], self.number_args['zero'])
def group3sub(self, mo):
hundreds = int(mo.group(1))
tens = int(mo.group(2))
units = int(mo.group(3))
if hundreds == 1:
hunword = " %s" % self.number_args['one']
elif hundreds:
hunword = "%s" % unit[hundreds]
# TODO: bug one and zero are padded with a space but other numbers aren't. check this in perl
else:
hunword = " %s" % self.number_args['zero']
if tens:
tenword = self.tenfn(tens, units)
elif units:
tenword = " %s %s" % (self.number_args['zero'], unit[units])
else:
tenword = " %s %s" % (self.number_args['zero'], self.number_args['zero'])
return "%s %s, " % (hunword, tenword)
def hundsub(self, mo):
ret = self.hundfn(int(mo.group(1)), int(mo.group(2)), int(mo.group(3)), self.mill_count)
self.mill_count += 1
return ret
def tensub(self, mo):
return "%s, " % self.tenfn(int(mo.group(1)), int(mo.group(2)), self.mill_count)
def unitsub(self, mo):
return "%s, " % self.unitfn(int(mo.group(1)), self.mill_count)
def enword(self, num, group):
# import pdb
# pdb.set_trace()
if group == 1:
num = resub(r"(\d)", self.group1sub, num)
elif group == 2:
num = resub(r"(\d)(\d)", self.group2sub, num)
num = resub(r"(\d)", self.group1bsub, num, 1)
# group1bsub same as
# group1sub except it doesn't use the default word for one.
# Is this required? i.e. is the default word not to beused when
# grouping in pairs?
#
# No. This is a bug. Fixed. TODO: report upstream.
elif group == 3:
num = resub(r"(\d)(\d)(\d)", self.group3sub, num)
num = resub(r"(\d)(\d)", self.group2sub, num, 1)
num = resub(r"(\d)", self.group1sub, num, 1)
elif int(num) == 0:
num = self.number_args['zero']
elif int(num) == 1:
num = self.number_args['one']
else:
num = num.lstrip().lstrip('0')
self.mill_count = 0
# surely there's a better way to do the next bit
mo = search(r"(\d)(\d)(\d)(?=\D*\Z)", num)
while mo:
num = resub(r"(\d)(\d)(\d)(?=\D*\Z)", self.hundsub, num, 1)
mo = search(r"(\d)(\d)(\d)(?=\D*\Z)", num)
num = resub(r"(\d)(\d)(?=\D*\Z)", self.tensub, num, 1)
num = resub(r"(\d)(?=\D*\Z)", self.unitsub, num, 1)
return num
def blankfn(self, mo):
''' do a global blank replace
TODO: surely this can be done with an option to resub
rather than this fn
'''
return ''
def commafn(self, mo):
''' do a global ',' replace
TODO: surely this can be done with an option to resub
rather than this fn
'''
return ','
def spacefn(self, mo):
''' do a global ' ' replace
TODO: surely this can be done with an option to resub
rather than this fn
'''
return ' '
def number_to_words(self, num, wantlist=False,
group=0, comma=',', andword='and',
zero='zero', one='one', decimal='point',
threshold=None):
'''
Return a number in words.
group = 1, 2 or 3 to group numbers before turning into words
comma: define comma
andword: word for 'and'. Can be set to ''.
e.g. "one hundred and one" vs "one hundred one"
zero: word for '0'
one: word for '1'
decimal: word for decimal point
threshold: numbers above threshold not turned into words
parameters not remembered from last call. Departure from Perl version.
'''
self.number_args = dict(andword=andword, zero=zero, one=one)
num = '%s' % num
# Handle "stylistic" conversions (up to a given threshold)...
if (threshold is not None and float(num) > threshold):
spnum = num.split('.', 1)
while (comma):
(spnum[0], n) = subn(r"(\d)(\d{3}(?:,|\Z))", r"\1,\2", spnum[0])
if n == 0:
break
try:
return "%s.%s" % (spnum[0], spnum[1])
except IndexError:
return "%s" % spnum[0]
if group < 0 or group > 3:
raise BadChunkingOptionError
nowhite = num.lstrip()
if nowhite[0] == '+':
sign = "plus"
elif nowhite[0] == '-':
sign = "minus"
else:
sign = ""
myord = (num[-2:] in ('st', 'nd', 'rd', 'th'))
if myord:
num = num[:-2]
finalpoint = False
if decimal:
if group != 0:
chunks = num.split('.')
else:
chunks = num.split('.', 1)
if chunks[-1] == '': # remove blank string if nothing after decimal
chunks = chunks[:-1]
finalpoint = True # add 'point' to end of output
else:
chunks = [num]
first = 1
loopstart = 0
if chunks[0] == '':
first = 0
if len(chunks) > 1:
loopstart = 1
for i in range(loopstart, len(chunks)):
chunk = chunks[i]
# remove all non numeric \D
chunk = resub(r"\D", self.blankfn, chunk)
if chunk == "":
chunk = "0"
if group == 0 and (first == 0 or first == ''):
chunk = self.enword(chunk, 1)
else:
chunk = self.enword(chunk, group)
if chunk[-2:] == ', ':
chunk = chunk[:-2]
chunk = resub(r"\s+,", self.commafn, chunk)
if group == 0 and first:
chunk = resub(r", (\S+)\s+\Z", " %s \\1" % andword, chunk)
chunk = resub(r"\s+", self.spacefn, chunk)
# chunk = resub(r"(\A\s|\s\Z)", self.blankfn, chunk)
chunk = chunk.strip()
if first:
first = ''
chunks[i] = chunk
numchunks = []
if first != 0:
numchunks = chunks[0].split("%s " % comma)
if myord and numchunks:
# TODO: can this be just one re as it is in perl?
mo = search(r"(%s)\Z" % ordinal_suff, numchunks[-1])
if mo:
numchunks[-1] = resub(r"(%s)\Z" % ordinal_suff, ordinal[mo.group(1)],
numchunks[-1])
else:
numchunks[-1] += 'th'
for chunk in chunks[1:]:
numchunks.append(decimal)
numchunks.extend(chunk.split("%s " % comma))
if finalpoint:
numchunks.append(decimal)
# wantlist: Perl list context. can explictly specify in Python
if wantlist:
if sign:
numchunks = [sign] + numchunks
return numchunks
elif group:
signout = "%s " % sign if sign else ''
return "%s%s" % (signout, ", ".join(numchunks))
else:
signout = "%s " % sign if sign else ''
num = "%s%s" % (signout, numchunks.pop(0))
if decimal is None:
first = True
else:
first = not num.endswith(decimal)
for nc in numchunks:
if nc == decimal:
num += " %s" % nc
first = 0
elif first:
num += "%s %s" % (comma, nc)
else:
num += " %s" % nc
return num
# Join words with commas and a trailing 'and' (when appropriate)...
def join(self, words, sep=None, sep_spaced=True,
final_sep=None, conj='and', conj_spaced=True):
'''
Join words into a list.
e.g. join(['ant', 'bee', 'fly']) returns 'ant, bee, and fly'
options:
conj: replacement for 'and'
sep: separator. default ',', unless ',' is in the list then ';'
final_sep: final separator. default ',', unless ',' is in the list then ';'
conj_spaced: boolean. Should conj have spaces around it
'''
if not words:
return ""
if len(words) == 1:
return words[0]
if conj_spaced:
if conj == '':
conj = ' '
else:
conj = ' %s ' % conj
if len(words) == 2:
return "%s%s%s" % (words[0], conj, words[1])
if sep is None:
if ',' in ''.join(words):
sep = ';'
else:
sep = ','
if final_sep is None:
final_sep = sep
final_sep = "%s%s" % (final_sep, conj)
if sep_spaced:
sep += ' '
return "%s%s%s" % (sep.join(words[0:-1]), final_sep, words[-1])
| 94,476 | 30.273419 | 106 | py |
Reflect | Reflect-master/util/train_params.py | radam_slw = {
'learning_rate': 0.0001,
'optimizer': 'radam',
'hold_base_rate_steps': 0
}
adam_slw = {
'learning_rate': 0.0001,
'optimizer': 'adam',
'hold_base_rate_steps': 0
}
adam_mid = {
'learning_rate': 0.0005,
'optimizer': 'adam',
'hold_base_rate_steps': 0
}
adam_midmid = {
'learning_rate': 0.0002,
'optimizer': 'adam',
'hold_base_rate_steps': 0
}
radam_fst_long = {
'learning_rate': 0.001,
'optimizer': 'radam',
'hold_base_rate_steps': 1000000
}
radam_slw2 = {
'learning_rate': 0.0005,
'optimizer': 'radam',
'hold_base_rate_steps': 10000
}
radam_slw_long = {
'learning_rate': 0.0001,
'optimizer': 'radam',
'hold_base_rate_steps': 1000000
}
radam_fst = {
'learning_rate': 0.001,
'optimizer': 'radam',
'hold_base_rate_steps': 10000
}
radam_mid = {
'learning_rate': 0.0005,
'optimizer': 'radam',
'hold_base_rate_steps': 10000
}
crs_fst = {
'learning_rate': 0.001,
'optimizer': 'adam',
'decay_steps': 1000,
'schedule': 'crs_fst'
}
crs_fst_v2 = {
'learning_rate': 0.0001,
'optimizer': 'adam',
'decay_steps': 1000,
'schedule': 'crs_fst'
}
crs_slw = {
'learning_rate': 0.001,
'optimizer': 'adam',
'decay_steps': 10000,
'schedule': 'crs_slw'
}
crs_slw_v2 = {
'learning_rate': 0.0001,
'optimizer': 'adam',
'decay_steps': 10000,
'schedule': 'crs_slw'
}
crs_slw_v3 = {
'learning_rate': 0.0005,
'optimizer': 'adam',
'decay_steps': 10000,
'schedule': 'crs_slw'
}
mnist_adam = {'optimizer': 'adam',
'learning_rate': 0.001,
'decay_steps': 10000,
'num_train_epochs': 20
}
svhn_adam_mid = {
'learning_rate': 0.0005,
'optimizer': 'adam',
'hold_base_rate_steps': 1000,
'num_train_epochs': 100,
}
svhn_radam_mid = {
'learning_rate': 0.0005,
'optimizer': 'radam',
'hold_base_rate_steps': 1000,
'num_train_epochs': 200
}
svhn_crs_slw = {
'learning_rate': 0.0005,
'optimizer': 'adam',
'hold_base_rate_steps': 0,
'num_train_epochs': 100,
'decay_steps': 10000,
'schedule': 'crs_slw',
'num_train_epochs': 200
}
TRAIN_PARAMS = {'radam_slw': radam_slw,
'radam_fst': radam_fst,
'adam_slw': adam_slw,
'radam_fst_long': radam_fst_long,
'radam_slw_long': radam_slw_long,
'adam_mid': adam_mid,
'adam_midmid': adam_midmid,
'radam_mid': radam_mid,
'crs_fst': crs_fst,
'crs_slw': crs_slw,
'crs_slw_v2': crs_slw_v2,
'crs_slw_v3': crs_slw_v3,
'crs_fst_v2': crs_fst_v2,
'mnist_adam': mnist_adam,
'radam_slw2': radam_slw2,
'svhn_adam_mid': svhn_adam_mid,
'svhn_radam_mid': svhn_radam_mid,
'svhn_crs_slw': svhn_crs_slw} | 2,734 | 18.535714 | 49 | py |
Reflect | Reflect-master/util/config_util.py | from util.distill_params import DISTILL_PARAMS
from util.model_configs import GPT2Config, ModelConfig, MODEL_CONFIGS, CapsConfig, ResnetConfig
from util.train_params import TRAIN_PARAMS
class TrainParams(object):
def __init__(self, optimizer,
learning_rate=0.0001,
n_epochs=60,
warmup_steps=5000,
decay_steps=10000,
hold_base_rate_steps=1000,
total_training_steps=60000,
num_train_epochs=60,
decay_rate=0.96,
schedule='',
):
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
self.hold_base_rate_steps = hold_base_rate_steps
self.total_training_steps = total_training_steps
self.num_train_epochs = num_train_epochs
self.optimizer = optimizer
self.schedule = schedule
self.decay_rate = decay_rate
class DistillParams(object):
def __init__(self,
distill_temp=5.0,
student_distill_rate=0.9,
student_gold_rate=0.1,
student_learning_rate=0.0001,
student_decay_steps=10000,
student_warmup_steps=10000,
student_hold_base_rate_steps=1000,
student_decay_rate=0.96,
student_optimizer='adam',
teacher_learning_rate=0.0001,
teacher_decay_steps=10000,
teacher_warmup_steps=10000,
teacher_hold_base_rate_steps=1000,
teacher_decay_rate=0.96,
teacher_optimizer='radam',
n_epochs=60,
schedule='',
distill_decay_steps=1000000,
distill_warmup_steps=0,
hold_base_distillrate_steps=1000000,
student_distill_rep_rate=1.0,
distill_min_rate=0.0,
distill_schedule='cnst',
):
self.distill_temp = distill_temp
self.distill_schedule = distill_schedule
self.student_distill_rate = student_distill_rate
self.distill_min_rate = distill_min_rate
self.student_gold_rate = student_gold_rate
self.student_learning_rate = student_learning_rate
self.student_decay_steps = student_decay_steps
self.student_warmup_steps = student_warmup_steps
self.student_hold_base_rate_steps = student_hold_base_rate_steps
self.student_optimizer = student_optimizer
self.teacher_learning_rate = teacher_learning_rate
self.teacher_warmup_steps = teacher_warmup_steps
self.teacher_decay_steps = teacher_decay_steps
self.teacher_optimizer = teacher_optimizer
self.teacher_hold_base_rate_steps = teacher_hold_base_rate_steps
self.n_epochs = n_epochs
self.schedule = schedule
self.distill_decay_steps = distill_decay_steps
self.distill_warmup_steps = distill_warmup_steps
self.hold_base_distillrate_steps = hold_base_distillrate_steps
self.student_distill_rep_rate = student_distill_rep_rate
self.teacher_decay_rate = teacher_decay_rate
self.student_decay_rate = student_decay_rate
class TaskParams:
def __init__(self, batch_size=64, num_replicas_in_sync=1):
self.batch_size = batch_size
self.num_replicas_in_sync = num_replicas_in_sync
def get_train_params(train_config):
train_params = TrainParams(**TRAIN_PARAMS[train_config])
return train_params
def get_distill_params(distill_config):
if distill_config != 'base':
return DistillParams(**DISTILL_PARAMS[distill_config])
return DistillParams()
def get_task_params(**kwargs):
task_params = TaskParams(**kwargs)
return task_params
def get_model_params(task, config_name='', model_config='base'):
print("model config:", model_config)
if model_config in MODEL_CONFIGS:
model_cnfgs = MODEL_CONFIGS.get(model_config)
else:
model_cnfgs = MODEL_CONFIGS.get('base')
if 'gpt' in config_name or 'bert' in config_name:
return GPT2Config(vocab_size=task.vocab_size(),
output_dim=task.output_size(),
num_labels=task.output_size(),
**model_cnfgs)
elif 'caps' in config_name:
return CapsConfig(output_dim=task.output_size(),
**model_cnfgs)
elif 'resnet' in config_name:
return ResnetConfig(output_dim=task.output_size(),
**model_cnfgs)
else:
return ModelConfig(input_dim=task.vocab_size(),
output_dim=task.output_size(),**model_cnfgs)
| 4,520 | 34.880952 | 95 | py |
Reflect | Reflect-master/util/models.py | from tf2_models.capnet import Capsule
from tf2_models.cnn import VanillaCNN
from tf2_models.ff import VanillaFF
from tf2_models.ff_resnet import FFResnet
from tf2_models.lm_lstm import LmLSTM, LmLSTMSharedEmb, ClassifierLSTM, LmLSTMSharedEmbV2
from tf2_models.lm_transformer import LmGPT2, LmGPT2SharedWeights, ClassifierGPT2, ClassifierGPT2SharedWeights, \
ClassifierBERT, ClassifierBERTSharedWeights
from tf2_models.matrix_caps import MatrixCaps
from tf2_models.resnet import Resnet
MODELS = {"lm_lstm": LmLSTM,
"lm_gpt2": LmGPT2,
"lm_gpt2_shared": LmGPT2SharedWeights,
"lm_lstm_shared_emb": LmLSTMSharedEmbV2,
'cl_gpt2': ClassifierGPT2,
'cl_lstm': ClassifierLSTM,
'cl_gpt2_shared': ClassifierGPT2SharedWeights,
'cl_bert': ClassifierBERT,
'cl_bert_shared': ClassifierBERTSharedWeights,
'cl_vcnn': VanillaCNN,
'cl_vff': VanillaFF,
'cl_capsule': Capsule,
'matrix_capsule': MatrixCaps,
'resnet': Resnet,
'resnet_ff': FFResnet} | 1,068 | 41.76 | 113 | py |
Reflect | Reflect-master/util/__init__.py | 0 | 0 | 0 | py |
|
Reflect | Reflect-master/util/tasks.py | from tasks.lm1b import Lm1B
from tasks.mnist import Mnist, AffNistTask, Svhn, Mnist40
from tasks.smallnorb import SmallNorb
from tasks.sst import ClassifySST2, LmSST2
from tasks.sv_agreement import SvAgreementLM, WordSvAgreementLM, WordSvAgreementVP
from tasks.wiki import WikiLM
TASKS = {
'sv_agreement_lm': SvAgreementLM,
'word_sv_agreement_lm': WordSvAgreementLM,
'word_sv_agreement_vp': WordSvAgreementVP,
'mnist': Mnist,
'affnist': AffNistTask,
'smallnorb': SmallNorb,
'sst2': ClassifySST2,
'lm_sst2': LmSST2,
'lm1b': Lm1B,
'wikilm': WikiLM,
'svhn': Svhn,
'mnist40': Mnist40
} | 606 | 27.904762 | 82 | py |
Reflect | Reflect-master/distill/offline_repshare.py | import tensorflow as tf
import os
from distill.distiller import Distiller
from distill.online_distiller import OnlineDistiller
from distill.repsim_util import get_reps
from tf2_models.train_utils import ExponentialDecayWithWarmpUp
from tf2_models.trainer import OPTIMIZER_DIC
from tf2_models.utils import camel2snake
from inspect import isfunction
import numpy as np
class OfflineRepDistiller(Distiller):
"""
Implementation of soft representation sharing in online mode
"""
def __init__(self, hparams, distill_params, teacher_model, student_model, teacher_task, student_task,
teacher_log_dir, student_log_dir, teacher_ckpt_dir, student_ckpt_dir):
self.teacher_model = teacher_model
self.student_model = student_model
self.student_task = student_task
self.teacher_task = teacher_task
self.hparams = hparams
self.distill_params = distill_params
self.temperature = tf.convert_to_tensor(distill_params.distill_temp)
self.rep_loss = self.student_task.get_rep_loss()
self.student_task_loss = self.student_task.get_loss_fn()
self.teacher_task_loss = self.teacher_task.get_loss_fn()
self.student_metrics = self.student_task.metrics()
self.teacher_metrics = self.teacher_task.metrics()
self.teacher_task_probs_fn = self.teacher_task.get_probs_fn()
self.create_student_optimizer()
self.setup_ckp_and_summary(student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir)
self.setup_models(distill_params)
def setup_models(self, distill_params):
x_s, y_s = iter(self.student_task.valid_dataset).next()
x_t, y_t = iter(self.teacher_task.valid_dataset).next()
self.student_model(x_s)
self.student_model.summary()
self.teacher_model(x_t)
self.teacher_model.summary()
self.student_model.compile(
optimizer=self.student_optimizer,
loss=self.student_task_loss,
metrics=[self.student_metrics])
self.teacher_model.compile(
loss=self.teacher_task_loss,
metrics=[self.teacher_metrics])
def distill_loop(self):
@tf.function(experimental_relax_shapes=True)
def student_train_step(x, y_s, teacher_logits, teacher_reps):
''' Training step for the student model (this is the only training step for offline distillation).
:param x: input
:param y: output of the teacher model, used to compute distill loss
:param y_true: actual outputs, used to compute actual loss
:return:
distill_loss
actual_loss
'''
#teacher_probs = self.task_probs_fn(logits=teacher_logits, labels=y_t, temperature=self.temperature)
with tf.GradientTape() as tape:
#logits = self.student_model(x, training=True)
logits, student_reps = get_reps(x, self.student_model,
index=(0, self.student_model.rep_index),
layer= (None, self.student_model.rep_layer), training=True)
rep_loss = self.rep_loss(reps1=student_reps, reps2=teacher_reps, padding_symbol=self.student_task.output_padding_symbol)
reg_loss = tf.math.add_n(self.student_model.losses)
actual_loss = self.student_task_loss(y_pred=logits, y_true=y_s)
final_loss = self.distill_params.student_distill_rep_rate * rep_loss + \
self.distill_params.student_gold_rate * actual_loss + reg_loss
grads = tape.gradient(final_loss, self.student_model.trainable_weights)
self.student_model.optimizer.apply_gradients(zip(grads, self.student_model.trainable_weights),
name="student_optimizer")
return rep_loss, actual_loss
@tf.function
def epoch_loop():
step = 0
student_train_examples = self.student_task.train_dataset
for x_s, y_s in student_train_examples:
teacher_logits, teacher_reps = get_reps(x_s, self.teacher_model,
index=(0, self.teacher_model.rep_index),
layer=(None, self.teacher_model.rep_layer), training=False)
reg_loss = tf.math.add_n(self.teacher_model.losses)
actual_loss = self.teacher_task_loss(y_pred=teacher_logits, y_true=y_s)
teacher_loss = actual_loss + reg_loss
distill_loss, actual_loss = student_train_step(x=x_s, y_s=y_s,
teacher_logits=teacher_logits, teacher_reps=teacher_reps)
# Log every 200 batches.
if step % 200 == 0:
with tf.summary.experimental.summary_scope("student_train"):
tf.summary.scalar('student_learning_rate',
self.student_model.optimizer.learning_rate(self.student_model.optimizer.iterations))
tf.summary.scalar('fine_distill_loss', distill_loss, )
with tf.summary.experimental.summary_scope("teacher_train"):
tf.summary.scalar('teacher_loss', teacher_loss)
step += 1
if step == self.student_task.n_train_batches:
with tf.summary.experimental.summary_scope("student_train"):
tf.summary.scalar('distill_loss', distill_loss)
tf.summary.scalar('actual_loss', actual_loss)
break
with self.summary_writer.as_default():
num_epochs = self.distill_params.n_epochs
for _ in tf.range(num_epochs):
epoch_loop()
teacher_eval_results = self.teacher_model.evaluate(self.teacher_task.valid_dataset,
steps=self.teacher_task.n_valid_batches)
# Evaluate Teacher
with tf.summary.experimental.summary_scope("eval_teacher"):
for i, m_name in enumerate(self.teacher_model.metrics_names):
tf.summary.scalar(m_name, teacher_eval_results[i])
# Evaluate Student
student_eval_results = self.student_model.evaluate(self.student_task.valid_dataset,
steps=self.student_task.n_valid_batches)
with tf.summary.experimental.summary_scope("eval_student"):
for i, m_name in enumerate(self.student_model.metrics_names):
tf.summary.scalar(m_name, student_eval_results[i])
self.save_student()
| 6,270 | 42.248276 | 128 | py |
Reflect | Reflect-master/distill/repsim_util.py | import tensorflow as tf
import numpy as np
def get_reps(outputs, index=1, layer=-1, **kwargs):
"""
If Model is LSTM:
1: final_rnn_outputs,
2: hidden_activation (for all layers, including input embeddings)
reduction: None, "last", "sum"
"""
logits = outputs[0]
outputs = tf.tuple(outputs)
rep = outputs[index]
if layer != -1 :
rep = tf.gather(rep, layer)
return logits, rep
@tf.function
def normalized_pairwisedot_product_sim(reps1, reps2):
reps1 = reps1 / tf.norm(reps1, axis=-1)[..., None]
reps2 = reps2 / tf.norm(reps2, axis=-1)[..., None]
pw_dot_product = tf.cast(tf.matmul(reps1, reps2, transpose_b=True), dtype=tf.float32)
return pw_dot_product
@tf.function
def normalized_dot_product_sim(reps1, reps2, padding_mask):
# normalize reps:
reps1 = reps1 / tf.norm(reps1, axis=-1)[..., None]
reps2 = reps2 / tf.norm(reps2, axis=-1)[..., None]
# Elementwise multiplication
dot_product = tf.multiply(reps1, reps2)
# Sum over last axis to get the dot product similarity between corresponding pairs
dot_product = tf.reduce_sum(dot_product, axis=-1)
dot_product = tf.multiply(dot_product, padding_mask[:, 0])
return dot_product
@tf.function
def second_order_rep_sim(reps1, reps2, padding_mask):
sims1 = normalized_pairwisedot_product_sim(reps1, reps1)
sims2 = normalized_pairwisedot_product_sim(reps2, reps2)
#padding_mask = tf.ones((tf.shape(reps1)[0], 1))
so_sims = normalized_dot_product_sim(sims1, sims2, padding_mask) * padding_mask[:, 0]
mean_sim = tf.reduce_sum(so_sims) / tf.reduce_sum(padding_mask)
return mean_sim, so_sims
@tf.function
def compare_models(inputs, model1, model2, index1=1, index2=1, layer1=None, layer2=None, padding_symbol=None):
reps1 = get_reps(inputs, model1, index=index1, layer=layer1)
reps2 = get_reps(inputs, model2, index=index2, layer=layer2)
reps1 = tf.reshape(reps1, (-1, tf.shape(reps1)[-1]))
reps2 = tf.reshape(reps2, (-1, tf.shape(reps2)[-1]))
if padding_symbol is not None and padding_symbol > -1:
padding_mask = tf.cast(1.0 - (inputs == padding_symbol), dtype=tf.float32)
padding_mask = tf.reshape(padding_mask, (-1, 1))
else:
padding_mask = tf.ones((tf.shape(reps1)[0]))
similarity_measures = second_order_rep_sim(reps1, reps2, padding_mask=padding_mask)
return similarity_measures
@tf.function
def compare_reps(reps1, reps2, padding_symbol=None, inputs=None):
reps1 = tf.reshape(reps1, (-1, tf.shape(reps1)[-1]))
reps2 = tf.reshape(reps2, (-1, tf.shape(reps2)[-1]))
if padding_symbol is not None and padding_symbol > -1:
padding_mask = tf.cast(1.0 - (inputs == padding_symbol), dtype=tf.float32)
padding_mask = tf.reshape(padding_mask, (-1, 1))
else:
padding_mask = tf.ones((tf.shape(reps1)[0], 1))
similarity_measures = second_order_rep_sim(reps1, reps2, padding_mask)
return similarity_measures
@tf.function(experimental_relax_shapes=True)
def rep_loss(reps1, reps2, padding_symbol=None, inputs=None):
reps1 = tf.reshape(reps1, (-1, tf.shape(reps1)[-1]))
reps2 = tf.reshape(reps2, (-1, tf.shape(reps2)[-1]))
if padding_symbol is not None and padding_symbol > -1:
padding_mask = 1.0 - tf.cast(inputs == padding_symbol, dtype=tf.float32)
padding_mask = tf.reshape(padding_mask, (-1, 1))
else:
padding_mask = tf.ones((tf.shape(reps1)[0], 1))
mean_sim, _ = second_order_rep_sim(reps1, reps2, padding_mask)
return 1.0 - mean_sim | 3,444 | 31.5 | 110 | py |
Reflect | Reflect-master/distill/online_distiller.py | import tensorflow as tf
import os
from distill.distill_util import get_distill_scheduler
from distill.distiller import Distiller
from tf2_models.train_utils import ExponentialDecayWithWarmpUp
from tf2_models.trainer import OPTIMIZER_DIC
from tf2_models.utils import camel2snake
from inspect import isfunction
import numpy as np
class OnlineDistiller(Distiller):
def __init__(self, hparams, distill_params, teacher_model, student_model, task,
teacher_log_dir, student_log_dir, teacher_ckpt_dir, student_ckpt_dir):
self.hparams = hparams
self.teacher_model = teacher_model
self.student_model = student_model
self.task = task
self.distill_params = distill_params
self.temperature = tf.convert_to_tensor(distill_params.distill_temp)
self.distill_loss = self.task.get_distill_loss_fn(self.distill_params)
self.task_loss = self.task.get_loss_fn()
self.student_metrics = self.task.metrics()
self.teacher_metrics = self.task.metrics()
self.task_probs_fn = self.task.get_probs_fn()
self.create_student_optimizer()
self.create_teacher_optimizer()
self.setup_ckp_and_summary(student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir)
self.setup_models(distill_params, task)
self.distillrate_scheduler = get_distill_scheduler(distill_params.distill_schedule,
min=distill_params.distill_min_rate,
max=distill_params.student_distill_rate)
def setup_ckp_and_summary(self, student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir):
# Init checkpoints
self.teacher_ckpt = tf.train.Checkpoint(step=tf.Variable(1),
optimizer=self.teacher_optimizer,
net=self.teacher_model)
self.teacher_manager = tf.train.CheckpointManager(self.teacher_ckpt, teacher_ckpt_dir, max_to_keep=self.hparams.max_checkpoints)
self.student_ckpt = tf.train.Checkpoint(step=tf.Variable(1),
optimizer=self.student_optimizer,
net=self.student_model)
self.student_manager = tf.train.CheckpointManager(self.student_ckpt, student_ckpt_dir, max_to_keep=self.hparams.max_checkpoints)
# Init summary
student_summary_dir = os.path.join(student_log_dir, 'summaries')
tf.io.gfile.makedirs(student_log_dir)
self.summary_writer = tf.compat.v2.summary.create_file_writer(os.path.join(student_summary_dir, 'train'))
tf.compat.v2.summary.experimental.set_step(self.teacher_optimizer.iterations)
def create_teacher_optimizer(self):
teacher_initial_learning_rate = self.distill_params.teacher_learning_rate
lr_schedule = ExponentialDecayWithWarmpUp(
initial_learning_rate=teacher_initial_learning_rate,
decay_steps=self.distill_params.teacher_decay_steps,
decay_rate=self.distill_params.teacher_decay_rate,
warmup_steps=self.distill_params.teacher_warmup_steps,
hold_base_rate_steps=self.distill_params.teacher_hold_base_rate_steps)
self.teacher_optimizer = OPTIMIZER_DIC[self.distill_params.teacher_optimizer](
learning_rate=lr_schedule, epsilon=1e-08, clipnorm=1.0)
def setup_models(self, distill_params, task):
x, y = iter(self.task.valid_dataset).next()
self.student_model(x, padding_symbol=self.task.input_padding_symbol)
self.student_model.summary()
self.teacher_model(x, padding_symbol=self.task.input_padding_symbol)
self.teacher_model.summary()
self.student_model.compile(
optimizer=self.student_optimizer,
loss=self.task_loss,
metrics=[self.student_metrics])
self.teacher_model.compile(
optimizer=self.teacher_optimizer,
loss=self.task_loss,
metrics=[self.teacher_metrics])
def distill_loop(self):
@tf.function(experimental_relax_shapes=True)
def teacher_train_step(x, y_true):
with tf.GradientTape() as tape:
logits = self.teacher_model(x, padding_symbol=self.task.input_padding_symbol, training=True)
loss = self.teacher_model.loss(y_pred=logits, y_true=y_true)
if len(self.teacher_model.losses) > 0:
reg_loss = tf.math.add_n(self.teacher_model.losses)
else:
reg_loss = 0
final_loss = loss + reg_loss
grads = tape.gradient(final_loss, self.teacher_model.trainable_weights)
self.teacher_model.optimizer.apply_gradients(zip(grads, self.teacher_model.trainable_weights),
name="teacher_optimizer")
return logits, final_loss
@tf.function(experimental_relax_shapes=True)
def student_train_step(x, y, y_true):
''' Training step for the student model (this is the only training step for offline distillation).
:param x: input
:param y: output of the teacher model, used to compute distill loss
:param y_true: actual outputs, used to compute actual loss
:return:
distill_loss
actual_loss
'''
student_distill_rate = self.distillrate_scheduler(self.student_optimizer.iterations)
student_gold_rate = 1 - student_distill_rate
with tf.GradientTape() as tape:
logits = self.student_model(x, padding_symbol=self.task.input_padding_symbol, training=True)
distill_loss = self.distill_loss(y_pred=logits, y_true=y)
if len(self.student_model.losses) > 0:
reg_loss = tf.math.add_n(self.student_model.losses)
else:
reg_loss = 0
actual_loss = self.task_loss(y_pred=logits, y_true=y_true)
final_loss = student_distill_rate * distill_loss + \
student_gold_rate * actual_loss + reg_loss
grads = tape.gradient(final_loss, self.student_model.trainable_weights)
self.student_model.optimizer.apply_gradients(zip(grads, self.student_model.trainable_weights),
name="student_optimizer")
return distill_loss, actual_loss, student_distill_rate
@tf.function
def epoch_loop():
step = 0
for x, y in self.task.train_dataset:
teacher_logits, teacher_loss = teacher_train_step(x, y)
teacher_probs = self.task_probs_fn(logits=teacher_logits, labels=y, temperature=self.temperature)
soft_targets = tf.stop_gradient(teacher_probs)
distill_loss, actual_loss, student_distill_rate = student_train_step(x=x, y=soft_targets, y_true=y)
# Log every 200 batches.
if step % 200 == 0:
with tf.summary.experimental.summary_scope("student_train"):
tf.summary.scalar('student_learning_rate',
self.student_model.optimizer.learning_rate(self.student_model.optimizer.iterations))
tf.summary.scalar('fine_distill_loss', distill_loss)
tf.summary.scalar('student_distill_rate', student_distill_rate)
with tf.summary.experimental.summary_scope("teacher_train"):
tf.summary.scalar('teacher_loss', teacher_loss)
tf.summary.scalar('teacher_learning_rate',
self.teacher_model.optimizer.learning_rate(self.teacher_model.optimizer.iterations))
step += 1
if step == self.task.n_train_batches:
with tf.summary.experimental.summary_scope("student_train"):
tf.summary.scalar('distill_loss', distill_loss)
tf.summary.scalar('actual_loss', actual_loss)
break
with self.summary_writer.as_default():
num_epochs = self.distill_params.n_epochs
for epoch in tf.range(num_epochs):
epoch_loop()
teacher_eval_results = self.teacher_model.evaluate(self.task.valid_dataset,
steps=self.task.n_valid_batches)
# Evaluate Teacher
with tf.summary.experimental.summary_scope("eval_teacher"):
for i, m_name in enumerate(self.teacher_model.metrics_names):
tf.summary.scalar(m_name, teacher_eval_results[i])
# Evaluate Student
student_eval_results = self.student_model.evaluate(self.task.valid_dataset,
steps=self.task.n_valid_batches)
with tf.summary.experimental.summary_scope("eval_student"):
for i, m_name in enumerate(self.student_model.metrics_names):
tf.summary.scalar(m_name, student_eval_results[i])
pow2 = [0,1,2,4,8,16,32,64,128,256,512]
if self.hparams.keep_some_checkpoints:
if (epoch in pow2) or (epoch == (self.distill_params.n_epochs - 1)):
self.save_student()
self.save_teacher()
else:
self.save_student()
self.save_teacher()
def save_teacher(self):
self.teacher_ckpt.step.assign_add(1)
save_path = self.teacher_manager.save()
tf.print("Saved teacher checkpoint", save_path)
| 9,001 | 43.127451 | 132 | py |
Reflect | Reflect-master/distill/model.py | class Model(object):
def apply(self, examples):
raise NotImplementedError
def update(self, loss):
raise NotImplementedError | 136 | 21.833333 | 29 | py |
Reflect | Reflect-master/distill/distill_main.py | ''' Code to apply the distillation process for a teacher and a student model.
Run:
python distill/distill_main.py \
--task=word_sv_agreement_vp \
--teacher_exp_name=small_lstm_v4_0.0001_withl2 \
--teacher_model=cl_lstm \
--teacher_config=small_lstm_v4 \
--student_exp_name=distilled0 \
--student_model=cl_gpt2 \
--student_config=small_gpt_v9 \
--distill_mode=offline
'''
from distill.distiller import Distiller
from distill.online_distiller import OnlineDistiller
from util import constants
from util.config_util import get_distill_params
import os
from util.config_util import get_model_params, get_task_params, get_train_params
from absl import flags, logging
import sys
import tensorflow as tf
from util.models import MODELS
from util.tasks import TASKS
FLAGS = flags.FLAGS
flags.DEFINE_string('logdir', 'logs', 'log dir')
flags.DEFINE_string('chkpt_dir', 'tf_ckpts', 'checkpoint dir')
flags.DEFINE_string('task', 'word_sv_agreement_lm', 'sv_agreement_lm | word_sv_agreement_lm | word_sv_agreement_vp')
flags.DEFINE_string('distill_config', 'base', ' distillation hparams set')
flags.DEFINE_string('teacher_exp_name', 'trial4', 'experiment directory')
flags.DEFINE_string('teacher_model', 'lm_lstm', 'lm_lstm | lm_gpt2')
flags.DEFINE_string('student_exp_name', 'trial1', 'experiment directory')
flags.DEFINE_string('student_model', 'lm_lstm', 'lm_lstm | lm_gpt2')
flags.DEFINE_string('student_config', 'base', 'base | small_lstm ')
flags.DEFINE_string('teacher_config', 'base', 'base | small_lstm ')
flags.DEFINE_string('distill_mode', 'offline', 'offline | online | off_schdld | on_schdld')
flags.DEFINE_integer('max_checkpoints', 2, 'keep_checkpoint_every_n_hours passed to training manager')
flags.DEFINE_boolean('keep_some_checkpoints', False, 'keep_checkpoint_every_n_hours passed to training manager')
flags.DEFINE_string('keep_checkpoint_every_n_hours',None, 'keep_checkpoint_every_n_hours passed to training manager')
flags.DEFINE_integer('batch_size', 64, 'batch_size')
FLAGS(sys.argv)
hparams = flags.FLAGS
def create_and_load_models():
if hasattr(task.databuilder, 'sentence_encoder'):
cl_token = task.databuilder.sentence_encoder().encode(constants.bos)
else:
cl_token = 0
teacher_model = MODELS[hparams.teacher_model](
hparams=get_model_params(task, hparams.teacher_model, hparams.teacher_config), cl_token=cl_token)
student_model = MODELS[hparams.student_model](
hparams=get_model_params(task, hparams.student_model, hparams.student_config), cl_token=cl_token)
teacher_log_dir = os.path.join(hparams.logdir, task.name,
'_'.join([hparams.distill_mode,hparams.distill_config,
"teacher",teacher_model.model_name,hparams.teacher_config,hparams.teacher_exp_name]))
teacher_ckpt_dir = os.path.join(hparams.chkpt_dir, task.name,
'_'.join([teacher_model.model_name, hparams.teacher_config,hparams.teacher_exp_name]))
student_log_dir = os.path.join(hparams.logdir, task.name,
'_'.join([hparams.distill_mode,hparams.distill_config,
"teacher", teacher_model.model_name, str(hparams.teacher_config), hparams.teacher_exp_name,
"student", student_model.model_name, str(hparams.student_config), hparams.student_exp_name]))
student_ckpt_dir = os.path.join(hparams.chkpt_dir, task.name,
'_'.join([hparams.distill_mode,hparams.distill_config,
"teacher", teacher_model.model_name, str(hparams.teacher_config), hparams.teacher_exp_name,
"student",student_model.model_name, str(hparams.student_config),hparams.student_exp_name]))
return teacher_model, student_model, teacher_log_dir, teacher_ckpt_dir, student_log_dir, student_ckpt_dir
DISTILLER = {'offline': Distiller,
'online': OnlineDistiller,
}
if __name__ == '__main__':
# Create task
task = TASKS[hparams.task](get_task_params(batch_size=hparams.batch_size))
# Create the Model
teacher_model, student_model, \
teacher_log_dir, teacher_ckpt_dir, student_log_dir, student_ckpt_dir = create_and_load_models()
distiller = DISTILLER[hparams.distill_mode](hparams=hparams,
distill_params=get_distill_params(hparams.distill_config),
teacher_model=teacher_model,
student_model=student_model,
task=task,
teacher_ckpt_dir=teacher_ckpt_dir,
teacher_log_dir=teacher_log_dir,
student_ckpt_dir=student_ckpt_dir,
student_log_dir=student_log_dir,
)
# Restore Models
distiller.restore_teacher()
distiller.restore_student()
# Run the distillation loop
distiller.distill_loop() | 5,174 | 47.820755 | 136 | py |
Reflect | Reflect-master/distill/distill_mnist.py | ''' Code to apply the distillation process for a teacher and a student model.
Run:
python distill/distill_main.py \
--task=word_sv_agreement_vp \
--teacher_exp_name=small_lstm_v4_0.0001_withl2 \
--teacher_model=cl_lstm \
--teacher_config=small_lstm_v4 \
--student_exp_name=distilled0 \
--student_model=cl_gpt2 \
--student_config=small_gpt_v9 \
--distill_mode=offline
'''
from distill.distiller import Distiller
from distill.online_distiller import OnlineDistiller
from distill.scheduled_distiller import ScheduledDistiller
from util import constants
from util.config_util import get_distill_params
import os
from util.config_util import get_model_params, get_task_params, get_train_params
from absl import flags, logging
import sys
import tensorflow as tf
from util.models import MODELS
from util.tasks import TASKS
FLAGS = flags.FLAGS
flags.DEFINE_string('logdir', 'logs', 'log dir')
flags.DEFINE_string('chkpt_dir', 'tf_ckpts', 'checkpoint dir')
flags.DEFINE_string('task', 'word_sv_agreement_lm', 'sv_agreement_lm | word_sv_agreement_lm | word_sv_agreement_vp')
flags.DEFINE_string('distill_config', 'base', ' distillation hparams set')
flags.DEFINE_string('teacher_exp_name', 'trial4', 'experiment directory')
flags.DEFINE_string('teacher_model', 'lm_lstm', 'lm_lstm | lm_gpt2')
flags.DEFINE_string('student_exp_name', 'trial1', 'experiment directory')
flags.DEFINE_string('student_model', 'lm_lstm', 'lm_lstm | lm_gpt2')
flags.DEFINE_string('student_config', 'base', 'base | small_lstm ')
flags.DEFINE_string('teacher_config', 'base', 'base | small_lstm ')
flags.DEFINE_string('distill_mode', 'offline', 'offline | online | off_schdld | on_schdld')
flags.DEFINE_string('keep_checkpoint_every_n_hours',None, 'keep_checkpoint_every_n_hours passed to training manager')
FLAGS(sys.argv)
hparams = flags.FLAGS
def create_and_load_models():
teacher_model = MODELS[hparams.teacher_model](
hparams=get_model_params(task, hparams.teacher_model, hparams.teacher_config))
student_model = MODELS[hparams.student_model](
hparams=get_model_params(task, hparams.student_model, hparams.student_config))
teacher_log_dir = os.path.join(hparams.logdir, task.name,
'_'.join([hparams.distill_mode,hparams.distill_config,
"teacher",teacher_model.model_name,hparams.teacher_config,hparams.teacher_exp_name]))
teacher_ckpt_dir = os.path.join(hparams.chkpt_dir, task.name,
'_'.join([teacher_model.model_name, hparams.teacher_config,hparams.teacher_exp_name]))
student_log_dir = os.path.join(hparams.logdir, task.name,
'_'.join([hparams.distill_mode,hparams.distill_config,
"teacher", teacher_model.model_name, str(hparams.teacher_config), hparams.teacher_exp_name,
"student", student_model.model_name, str(hparams.student_config), hparams.student_exp_name]))
student_ckpt_dir = os.path.join(hparams.chkpt_dir, task.name,
'_'.join([hparams.distill_mode,hparams.distill_config,
"teacher", teacher_model.model_name, str(hparams.teacher_config), hparams.teacher_exp_name,
"student",student_model.model_name, str(hparams.student_config),hparams.student_exp_name]))
return teacher_model, student_model, teacher_log_dir, teacher_ckpt_dir, student_log_dir, student_ckpt_dir
DISTILLER = {'offline': Distiller,
'online': OnlineDistiller,
'off_schdld': ScheduledDistiller}
if __name__ == '__main__':
# Create task
task = TASKS[hparams.task](get_task_params())
# Create the Model
teacher_model, student_model, \
teacher_log_dir, teacher_ckpt_dir, student_log_dir, student_ckpt_dir = create_and_load_models()
distiller = DISTILLER[hparams.distill_mode](hparams=hparams,
distill_params=get_distill_params(hparams.distill_config),
teacher_model=teacher_model,
student_model=student_model,
task=task,
teacher_ckpt_dir=teacher_ckpt_dir,
teacher_log_dir=teacher_log_dir,
student_ckpt_dir=student_ckpt_dir,
student_log_dir=student_log_dir,
)
# Restore Models
distiller.restore_teacher()
distiller.restore_student()
# Run the distillation loop
distiller.distill_loop() | 4,778 | 47.272727 | 136 | py |
Reflect | Reflect-master/distill/__init__.py | 0 | 0 | 0 | py |
|
Reflect | Reflect-master/distill/distill_util.py | import tensorflow as tf
from tf2_models.metrics import distill_loss, sequence_distill_loss
@tf.function(experimental_relax_shapes=True)
def get_topk_mask(inputs, k):
inputs_shape = tf.shape(inputs)
inputs_shape = tf.cast(inputs_shape, dtype=tf.int64)
values, indices = tf.nn.top_k(inputs, k=k, sorted=False)
indices = tf.cast(indices, dtype=tf.int64)
k = tf.cast(k, dtype=tf.int64)
temp_indices = tf.meshgrid(*[tf.range(d, dtype=tf.int64) for d in (tf.unstack(
inputs_shape[:(inputs.get_shape().ndims - 1)]) + [k])], indexing='ij')
temp_indices = tf.stack(temp_indices[:-1] + [indices], axis=-1)
full_indices = tf.reshape(temp_indices, [-1, inputs.get_shape().ndims])
values = tf.reshape(values, [-1])
mask_vals = tf.ones_like(values, dtype=tf.int64)
full_indices = tf.cast(
full_indices, dtype=tf.int64)
mask_st = tf.SparseTensor(indices=full_indices, values=mask_vals, dense_shape=inputs_shape)
mask = tf.sparse.to_dense(tf.sparse.reorder(mask_st))
return mask
@tf.function(experimental_relax_shapes=True)
def get_topk_masked_probs(logits, labels, temperature, k=100, padding_symbol=0):
topk_mask = (1 - tf.cast(get_topk_mask(logits, k), dtype=tf.float32)) * -10e8
teacher_probs = tf.nn.softmax((logits + topk_mask) / temperature, axis=-1)
sequence_mask = tf.cast(labels != padding_symbol, dtype=tf.float32)
masked_teacher_probs = teacher_probs * sequence_mask[..., None] + tf.eye(tf.shape(teacher_probs)[-1])[0] * (
1 - sequence_mask[..., None])
return masked_teacher_probs
@tf.function(experimental_relax_shapes=True)
def get_masked_probs(logits, labels, temperature, padding_symbol=0):
teacher_probs = tf.nn.softmax(logits / temperature, axis=-1)
sequence_mask = tf.cast(labels != padding_symbol, dtype=tf.float32)
masked_teacher_probs = teacher_probs * sequence_mask[..., None] + tf.eye(tf.shape(teacher_probs)[-1])[0] * (
1 - sequence_mask[..., None])
return masked_teacher_probs
@tf.function(experimental_relax_shapes=True)
def get_probs(logits, labels, temperature):
teacher_probs = tf.nn.softmax(logits / temperature, axis=-1)
return teacher_probs
class DistillLoss(tf.keras.losses.Loss):
def __init__(self, padding_symbol=0, tmp=1.0,
**kwargs):
super(DistillLoss, self).__init__(**kwargs)
self.tmp = tf.Variable(tmp, dtype=tf.float32, name="temp")
self.padding_symbol = tf.Variable(padding_symbol, dtype=tf.int64, name="padding_symbol")
def call(self, y_true, y_pred):
return distill_loss(y_true, y_pred, self.tmp)
class SequenceDistillLoss(tf.keras.losses.Loss):
def __init__(self, padding_symbol=0, tmp=1.0,
**kwargs):
super(SequenceDistillLoss, self).__init__(**kwargs)
self.tmp = tf.Variable(tmp, dtype=tf.float32, name="tmp")
self.padding_symbol = tf.Variable(padding_symbol, dtype=tf.int64, name="padding_symbol")
def call(self, y_true, y_pred):
return sequence_distill_loss(y_true, y_pred, self.padding_symbol, self.tmp)
def get_distill_scheduler(schedule, min=0.0, max=1.0, decay_steps=10000):
if schedule is "exp":
scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
max,
decay_steps=1000,
decay_rate=0.96,
staircase=True)
elif schedule is 'crs':
scheduler = tf.keras.experimental.CosineDecayRestarts(
max,
decay_steps,
t_mul=2.0,
m_mul=0.9,
alpha=0.001,
)
elif schedule is 'lnr':
a = (max - min) / decay_steps
scheduler = lambda x: max - a*x
elif schedule is 'stp':
scheduler = lambda x: max if x < decay_steps else min
else:
scheduler = lambda x: max
return scheduler | 3,653 | 35.54 | 110 | py |
Reflect | Reflect-master/distill/distiller.py | import tensorflow as tf
import os
from distill.distill_util import get_distill_scheduler
from tf2_models.train_utils import ExponentialDecayWithWarmpUp
from tf2_models.trainer import OPTIMIZER_DIC
import numpy as np
class Distiller(object):
''' Pipeline for offline distillation.
'''
def __init__(self, hparams, distill_params, teacher_model, student_model, task,
teacher_log_dir, student_log_dir, teacher_ckpt_dir, student_ckpt_dir):
self.teacher_model = teacher_model
self.student_model = student_model
self.task = task
self.distill_params = distill_params
self.temperature = tf.convert_to_tensor(distill_params.distill_temp)
self.distill_loss = self.task.get_distill_loss_fn(self.distill_params)
self.task_loss = self.task.get_loss_fn()
self.metrics = self.task.metrics()
self.task_probs_fn = self.task.get_probs_fn()
self.hparams = hparams
self.create_student_optimizer()
self.setup_ckp_and_summary(student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir)
self.setup_models(distill_params, task)
self.distillrate_scheduler = get_distill_scheduler(distill_params.distill_schedule,
min=distill_params.distill_min_rate,
max=distill_params.student_distill_rate)
def create_student_optimizer(self):
student_initial_learning_rate = self.distill_params.student_learning_rate
if 'crs' in self.distill_params.schedule:
lr_schedule = (
tf.keras.experimental.CosineDecayRestarts(
student_initial_learning_rate,
first_decay_steps=self.distill_params.student_decay_steps,
t_mul=5.0, #0.2
m_mul=self.distill_params.student_decay_rate,
alpha=0.001,
))
else:
lr_schedule = ExponentialDecayWithWarmpUp(
initial_learning_rate=student_initial_learning_rate,
decay_steps=self.distill_params.student_decay_steps,
decay_rate=self.distill_params.student_decay_rate,
warmup_steps=self.distill_params.student_warmup_steps,
hold_base_rate_steps=self.distill_params.student_hold_base_rate_steps)
self.student_optimizer = OPTIMIZER_DIC[self.distill_params.student_optimizer](
learning_rate=lr_schedule, epsilon=1e-08, clipnorm=1.0)
def setup_ckp_and_summary(self, student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir):
# Init checkpoints
self.teacher_ckpt = tf.train.Checkpoint(net=self.teacher_model)
self.teacher_manager = tf.train.CheckpointManager(self.teacher_ckpt, teacher_ckpt_dir, max_to_keep=self.hparams.max_checkpoints)
self.student_ckpt = tf.train.Checkpoint(step=tf.Variable(1),
optimizer=self.student_optimizer,
net=self.student_model)
self.student_manager = tf.train.CheckpointManager(self.student_ckpt, student_ckpt_dir,
keep_checkpoint_every_n_hours=self.hparams.keep_checkpoint_every_n_hours,
max_to_keep=self.hparams.max_checkpoints)
# Init summary
student_summary_dir = os.path.join(student_log_dir, 'summaries')
tf.io.gfile.makedirs(student_log_dir)
self.summary_writer = tf.compat.v2.summary.create_file_writer(os.path.join(student_summary_dir, 'train'))
tf.compat.v2.summary.experimental.set_step(self.student_optimizer.iterations)
def setup_models(self, distill_params, task):
x, y = iter(self.task.valid_dataset).next()
self.student_model(x, padding_symbol=self.task.input_padding_symbol, training=True)
self.student_model.summary()
self.teacher_model(x, padding_symbol=self.task.input_padding_symbol, training=True)
self.teacher_model.summary()
self.student_model.compile(
optimizer=self.student_optimizer,
loss=self.task_loss,
metrics=[self.metrics])
self.teacher_model.compile(
loss=self.task_loss,
metrics=[self.metrics])
def restore_teacher(self):
''' Restore the teacher model from its checkpoint.
'''
self.teacher_ckpt.restore(self.teacher_manager.latest_checkpoint)
if self.teacher_manager.latest_checkpoint:
print("Restored teacher from {}".format(self.teacher_manager.latest_checkpoint))
else:
print("Initializing teacher from scratch.")
def restore_student(self):
''' Restore the student model from its checkpoint.
'''
self.student_ckpt.restore(self.student_manager.latest_checkpoint)
if self.student_manager.latest_checkpoint:
print("Restored student from {}".format(self.student_manager.latest_checkpoint))
else:
print("Initializing student from scratch.")
def save_student(self):
self.student_ckpt.step.assign_add(1)
save_path = self.student_manager.save()
tf.print("Saved student checkpoint", save_path)
def distill_loop(self):
''' Offline Distillation main loop.
'''
# logging.info('Distribute strategy: mirrored.')
# strategy = tf.distribute.MirroredStrategy()
# train_dataset = strategy.experimental_distribute_dataset(self.task.train_dataset)
# valid_dataset = strategy.experimental_distribute_dataset(self.task.valid_dataset)
@tf.function(experimental_relax_shapes=True)
def student_train_step(x, teacher_y, y_true):
''' Training step for the student model (this is the only training step for offline distillation).
:param x: input
:param y: output of the teacher model, used to compute distill loss
:param y_true: actual outputs, used to compute actual loss
:return:
distill_loss
actual_loss
'''
student_distill_rate = self.distillrate_scheduler(self.student_optimizer.iterations)
student_gold_rate = 1 - student_distill_rate
with tf.GradientTape() as tape:
logits = self.student_model(x, padding_symbol=self.task.input_padding_symbol, training=True)
distill_loss = self.distill_loss(y_pred=logits, y_true=teacher_y)
reg_loss = tf.math.add_n(self.student_model.losses)
actual_loss = self.task_loss(y_pred=logits, y_true=y_true)
final_loss = student_distill_rate * distill_loss + \
student_gold_rate * actual_loss + reg_loss
grads = tape.gradient(final_loss, self.student_model.trainable_weights)
self.student_model.optimizer.apply_gradients(zip(grads, self.student_model.trainable_weights),
name="student_optimizer")
return distill_loss, actual_loss, student_distill_rate
@tf.function
def epoch_loop():
step = 0
for x,y in self.task.train_dataset:
teacher_logits = self.teacher_model(x, padding_symbol=self.task.input_padding_symbol, training=True)
teacher_probs = self.task_probs_fn(logits=teacher_logits, labels=y, temperature=self.temperature)
distill_loss, actual_loss, student_distill_rate = student_train_step(x=x, teacher_y=teacher_probs, y_true=y)
# Log every 200 batches.
if step % 200 == 0:
with tf.summary.experimental.summary_scope("student_train"):
tf.summary.scalar('student_learning_rate',
self.student_model.optimizer.learning_rate(self.student_model.optimizer.iterations),
)
tf.summary.scalar('fine_distill_loss',
distill_loss)
tf.summary.scalar('student_distill_rate',
student_distill_rate)
step += 1
# Stop at the end of the epoch
if (step % self.task.n_train_batches) == 0:
with tf.summary.experimental.summary_scope("student_train"):
tf.summary.scalar('distill_loss', distill_loss)
tf.summary.scalar('actual_loss', actual_loss)
break
@tf.function
def summarize(teacher_eval_results, student_eval_results):
with tf.summary.experimental.summary_scope("eval_teacher"):
for i, m_name in enumerate(self.teacher_model.metrics_names):
tf.summary.scalar(m_name, teacher_eval_results[i])
with tf.summary.experimental.summary_scope("eval_student"):
for i, m_name in enumerate(self.student_model.metrics_names):
tf.summary.scalar(m_name, student_eval_results[i])
with self.summary_writer.as_default():
for epoch in np.arange(self.distill_params.n_epochs):
epoch_loop()
# Evaluate Teacher
teacher_eval_results = self.teacher_model.evaluate(self.task.valid_dataset,
steps=self.task.n_valid_batches)
# Evaluate Student
student_eval_results = self.student_model.evaluate(self.task.valid_dataset,
steps=self.task.n_valid_batches)
summarize(teacher_eval_results, student_eval_results)
pow2 = [0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
if self.hparams.keep_some_checkpoints:
if (epoch in pow2) or (epoch == (self.distill_params.n_epochs - 1)):
self.save_student()
else:
self.save_student()
| 9,284 | 44.292683 | 132 | py |
Reflect | Reflect-master/tf2_models/embedding.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list
class SharedEmbeddings(tf.keras.layers.Layer):
"""Construct shared token embeddings.
"""
def __init__(self, vocab_size, hidden_size, initializer_range=None, regularizer=None, **kwargs):
super(SharedEmbeddings, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range
self.regularizer = regularizer
def build(self, input_shape):
"""Build shared word embedding layer
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
self.weight = self.add_weight(
"weight",
shape=[self.vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
regularizer=self.regularizer)
super(SharedEmbeddings, self).build(input_shape)
def call(self, inputs, mode="embedding"):
"""Get token embeddings of inputs.
Args:
inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)
mode: string, a valid value is one of "embedding" and "linear".
Returns:
outputs: (1) If mode == "embedding", output embedding tensor, float32 with
shape [batch_size, length, embedding_size]; (2) mode == "linear", output
linear tensor, float32 with shape [batch_size, length, vocab_size].
Raises:
ValueError: if mode is not valid.
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
if mode == "embedding":
return self._embedding(inputs)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def _embedding(self, input_ids):
"""Applies embedding based on inputs tensor."""
return tf.gather(self.weight, input_ids)
def _linear(self, inputs):
"""Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [..., hidden_size]
Returns:
float32 tensor with shape [..., vocab_size].
"""
first_dims = shape_list(inputs)[:-1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.weight, transpose_b=True)
return tf.reshape(logits, first_dims + [self.vocab_size]) | 2,633 | 38.313433 | 137 | py |
Reflect | Reflect-master/tf2_models/lm_transformer.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list
from tf2_models.embedding import SharedEmbeddings
from tf2_models.transformer_layers import Block
from tf2_models.transformers import *
class LmGPT2(tf.keras.Model):
def __init__(self, hparams, scope='lm_gpt2', *inputs, **kwargs):
del kwargs['cl_token']
super(LmGPT2, self).__init__(hparams, *inputs, **kwargs)
self.scope = scope
self.rep_index = 1
self.rep_layer = None
self.model_name = '_'.join([self.scope,
'h-'+str(hparams.embedding_dim),
'd-'+str(hparams.depth),
'rdrop-'+str(hparams.resid_pdrop),
'adrop-' + str(hparams.attn_pdrop),
'indrop-'+str(hparams.embd_pdrop)])
self.create_vars(hparams)
@tf.function
def create_vars(self, hparams):
self.transformer = GPT2(hparams, name='transformer')
def call(self, inputs, padding_symbol=None, **kwargs):
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.wte(hidden_states, mode="linear")
#outputs = (lm_logits,) + transformer_outputs[1:]
return lm_logits # lm_logits, presents, (all hidden_states), (attentions)
def detailed_call(self, inputs, padding_symbol=None, **kwargs):
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.wte(hidden_states, mode="linear")
outputs = (lm_logits,) + transformer_outputs
return outputs # lm_logits, presents, (all hidden_states), (attentions)
class LmGPT2SharedWeights(LmGPT2):
def __init__(self, hparams, scope='lm_gpt2_shared_weights', *inputs, **kwargs):
super(LmGPT2SharedWeights, self).__init__(hparams, scope=scope, *inputs, **kwargs)
@tf.function
def create_vars(self, hparams):
self.transformer = GPT2SharedWeights(hparams, name='shared_transformer')
def call(self, inputs, padding_symbol=None, **kwargs):
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.wte(hidden_states, mode="linear")
#outputs = (lm_logits,) + transformer_outputs[1:]
return lm_logits # lm_logits, presents, (all hidden_states), (attentions)
class ClassifierGPT2(tf.keras.Model):
def __init__(self, hparams, scope='cl_gpt2',*inputs, **kwargs):
self.cl_token = kwargs['cl_token']
del kwargs['cl_token']
super(ClassifierGPT2, self).__init__(hparams, *inputs, **kwargs)
self.rep_index = 2
self.rep_layer = None
self.scope = scope
self.hparams = hparams
self.model_name = '_'.join([self.scope,
'h-'+str(hparams.embedding_dim),
'd-'+str(hparams.depth),
'rdrop-'+str(hparams.resid_pdrop),
'adrop-' + str(hparams.attn_pdrop),
'indrop-'+str(hparams.embd_pdrop)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0001)
self.create_vars(**kwargs)
#@tf.function
def create_vars(self,**kwargs):
self.transformer = GPT2(self.hparams, name='transformer',
**kwargs)
self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels,
kernel_initializer=get_initializer(self.hparams.initializer_range),
name='e2c')
def call(self, inputs, padding_symbol=None, **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(batch_size, inputs, transformer_outputs):
mask = tf.cast(inputs != 0, dtype=tf.int32)
inputs_lengths = tf.reduce_sum(mask, axis=-1) - 1
batch_indices = tf.range(batch_size)
indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1)
hidden_states = tf.gather_nd(transformer_outputs[0], indices)
cl_logits = self.e2c(hidden_states)
return cl_logits
# Add CL token:
batch_size = tf.shape(inputs)[0]
#cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1))
#cl_tokens = tf.tile(cl_token, (batch_size, 1))
#inputs = tf.concat([cl_tokens, inputs], axis=-1)
transformer_outputs = self.transformer(inputs, **kwargs)
cl_logits = _call(batch_size, inputs, transformer_outputs)
return cl_logits
def detailed_call(self, inputs, padding_symbol=None, **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(batch_size, inputs, transformer_outputs):
mask = tf.cast(inputs != 0, dtype=tf.int32)
inputs_lengths = tf.reduce_sum(mask, axis=-1) - 1
batch_indices = tf.range(batch_size)
indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1)
hidden_states = tf.gather_nd(transformer_outputs[0], indices)
cl_logits = self.e2c(hidden_states)
return cl_logits, hidden_states
# Add CL token:
batch_size = tf.shape(inputs)[0]
#cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1))
#cl_tokens = tf.tile(cl_token, (batch_size, 1))
#inputs = tf.concat([cl_tokens, inputs], axis=-1)
transformer_outputs = self.transformer(inputs, **kwargs)
cl_logits, hidden_states = _call(batch_size, inputs, transformer_outputs)
outputs = (cl_logits, hidden_states) + transformer_outputs
return outputs
class ClassifierGPT2SharedWeights(ClassifierGPT2):
def __init__(self, hparams, scope='cl_gpt2_shared_weights', *inputs, **kwargs):
super(ClassifierGPT2SharedWeights, self).__init__(hparams, scope=scope, *inputs, **kwargs)
@tf.function
def create_vars(self):
self.transformer = GPT2SharedWeights(self.hparams, name='shared_transformer')
self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels,
kernel_initializer=get_initializer(self.hparams.initializer_range),
name='e2c')
class ClassifierBERT(tf.keras.Model):
def __init__(self, hparams, scope='cl_bert',*inputs, **kwargs):
self.cl_token = kwargs['cl_token']
del kwargs['cl_token']
super(ClassifierBERT, self).__init__(hparams, *inputs, **kwargs)
self.scope = scope
self.hparams = hparams
self.rep_index = 2
self.rep_layer = None
self.model_name = '_'.join([self.scope,
'h-'+str(hparams.embedding_dim),
'd-'+str(hparams.depth),
'rdrop-'+str(hparams.resid_pdrop),
'adrop-' + str(hparams.attn_pdrop),
'indrop-'+str(hparams.embd_pdrop)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0001)
self.create_vars(**kwargs)
#@tf.function
def create_vars(self,**kwargs):
self.transformer = Bert(self.hparams, name='transformer',
**kwargs)
self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels,
kernel_initializer=get_initializer(self.hparams.initializer_range),
name='e2c')
def call(self, inputs, padding_symbol=None, add_cls=True, **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(batch_size, inputs, transformer_outputs):
#mask = tf.cast(inputs != 0, dtype=tf.int32)
#inputs_lengths = tf.reduce_sum(mask, axis=-1) - 1
#batch_indices = tf.range(batch_size)
#indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1)
hidden_states = transformer_outputs[0][:,0]#tf.gather_nd(transformer_outputs[0], indices)
cl_logits = self.e2c(hidden_states, **kwargs)
return cl_logits
# Add CL token:
batch_size = tf.shape(inputs)[0]
if add_cls:
cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1))
cl_tokens = tf.tile(cl_token, (batch_size, 1))
inputs = tf.concat([cl_tokens, inputs], axis=-1)
transformer_outputs = self.transformer(inputs, **kwargs)
cl_logits = _call(batch_size, inputs, transformer_outputs)
return cl_logits
def detailed_call(self, inputs, padding_symbol=None, add_cls=True, **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(batch_size, inputs, transformer_outputs):
hidden_states = transformer_outputs[0][:, 0]
cl_logits = self.e2c(hidden_states)
return cl_logits, hidden_states
# Add CL token:
batch_size = tf.shape(inputs)[0]
if add_cls:
cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1))
cl_tokens = tf.tile(cl_token, (batch_size, 1))
inputs = tf.concat([cl_tokens, inputs], axis=-1)
transformer_outputs = self.transformer(inputs, **kwargs)
cl_logits, hidden_states = _call(batch_size, inputs, transformer_outputs)
reps_start_index = 1 if add_cls else 0
outputs = (cl_logits, hidden_states, transformer_outputs[0][:,reps_start_index:,:]) + transformer_outputs
return outputs
def get_input_embeddings(self, inputs, add_cls=True, **kwargs):
# Add CL token:
batch_size = tf.shape(inputs)[0]
if add_cls:
cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1, 1))
cl_tokens = tf.tile(cl_token, (batch_size, 1))
inputs = tf.concat([cl_tokens, inputs], axis=-1)
outputs = self.transformer.get_input_embeddings(inputs, **kwargs)
return outputs
def call_with_embeddings(self, input_embeddings, input_shape, padding_mask, past , **kwargs):
transformer_outputs = self.transformer.call_with_embeddings(input_embeddings=input_embeddings,
input_shape=input_shape, padding_mask=padding_mask,
past=past, **kwargs)
hidden_states = transformer_outputs[0][:, 0]
cl_logits = self.e2c(hidden_states)
return cl_logits, hidden_states
class ClassifierBERTSharedWeights(ClassifierBERT):
def __init__(self, hparams, scope='cl_bert_shared', *inputs, **kwargs):
super(ClassifierBERTSharedWeights, self).__init__(hparams, scope=scope, *inputs, **kwargs)
# @tf.function
def create_vars(self, **kwargs):
self.transformer = BertSharedWeights(self.hparams, name='transformer',
**kwargs)
self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels,
kernel_initializer=get_initializer(self.hparams.initializer_range),
name='e2c')
| 10,814 | 39.965909 | 109 | py |
Reflect | Reflect-master/tf2_models/ff.py | import tensorflow as tf
import numpy as np
class VanillaFF(tf.keras.models.Sequential):
def __init__(self, hparams, scope="cl_vff", *inputs, **kwargs):
if 'cl_token' in kwargs:
del kwargs['cl_token']
super(VanillaFF, self).__init__()
self.scope = scope
self.hparams = hparams
self.model_name = '_'.join([self.scope,
'h-' + '.'.join([str(x) for x in self.hparams.hidden_dim]),
'd-' + str(self.hparams.depth),
'hdrop-' + str(self.hparams.hidden_dropout_rate),
'indrop-' + str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00001,
l2=0.00001)
self.create_vars()
self.rep_index = 1
self.rep_layer = -1
def create_vars(self):
self.flat = tf.keras.layers.Flatten()
# self.batch_norm = tf.keras.layers.BatchNormalization()
# self.batch_norm.trainable = True
self.indrop = tf.keras.layers.Dropout(self.hparams.input_dropout_rate)
self.activation = tf.keras.layers.Activation('relu')
self.hidden_layers = []
self.hidden_batch_norms = []
self.hidden_dropouts = []
for i in np.arange(self.hparams.depth):
self.hidden_layers.append(tf.keras.layers.Dense(self.hparams.hidden_dim[i],
activation=None, #'relu',
kernel_regularizer=self.regularizer))
self.hidden_batch_norms.append(tf.keras.layers.BatchNormalization())
self.hidden_batch_norms[i].trainable = True
self.hidden_dropouts.append(tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate))
self.final_dense = tf.keras.layers.Dense(self.hparams.output_dim,
kernel_regularizer=self.regularizer)
def call(self, inputs, padding_symbol=None, training=None, **kwargs):
x = self.flat(inputs, **kwargs)
# x = self.batch_norm(x, training=training, **kwargs)
x = self.indrop(x, training=training, **kwargs)
for i in np.arange(self.hparams.depth):
x = self.hidden_layers[i](x, training=training, **kwargs)
x = self.activation(x)
x = self.hidden_batch_norms[i](x, training=training, **kwargs)
x = self.hidden_dropouts[i](x, training=training, **kwargs)
logits = self.final_dense(x, training=training, **kwargs)
return logits
def detailed_call(self, inputs, padding_symbol=None, training=None, **kwargs):
layer_activations = []
x = self.flat(inputs, **kwargs)
x = self.indrop(x, training=None, **kwargs)
layer_activations.append(x)
for i in np.arange(self.hparams.depth):
x = self.hidden_layers[i](x, training=training, **kwargs)
x = self.activation(x)
x = self.hidden_batch_norms[i](x, training=training, **kwargs)
x = self.hidden_dropouts[i](x, training=training, **kwargs)
layer_activations.append(x)
pnltimt = x
logits = self.final_dense(x, training=None, **kwargs)
return logits, pnltimt, layer_activations
| 3,116 | 36.107143 | 92 | py |
Reflect | Reflect-master/tf2_models/common_layers.py | import tensorflow as tf
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import nest
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def shape_list(x):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
def make_variable_state_initializer(**kwargs):
def variable_state_initializer(shape, batch_size, dtype, index):
args = kwargs.copy()
if args.get('name'):
args['name'] = args['name'] + '_' + str(index)
else:
args['name'] = 'init_state_' + str(index)
args['shape'] = shape
args['dtype'] = dtype
var = tf.get_variable(**args)
var = tf.expand_dims(var, 0)
var = tf.tile(var, tf.pack([batch_size] + [1] * len(shape)))
var.set_shape(_state_size_with_prefix(shape, prefix=[None]))
return var
return variable_state_initializer
def get_initial_cell_state(cell, initializer, batch_size, dtype):
"""Return state tensor(s), initialized with initializer.
Args:
cell: RNNCell.
batch_size: int, float, or unit Tensor representing the batch size.
initializer: function with two arguments, shape and dtype, that
determines how the state is initialized.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size x state_size]` initialized
according to the initializer.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size x s]` for each s in `state_size`.
"""
state_size = cell.state_size
if nest.is_sequence(state_size):
state_size_flat = nest.flatten(state_size)
init_state_flat = [
initializer(s, batch_size, dtype, i)
for i, s in enumerate(state_size_flat)]
init_state = nest.pack_sequence_as(structure=state_size,
flat_sequence=init_state_flat)
else:
init_state_size = state_size
init_state = initializer(init_state_size, batch_size, dtype, None)
return init_state
def _generate_variable_state(batch_size_tensor, state_size, dtype):
"""Generate a variable tensor with shape [batch_size, state_size]."""
def create_variable(unnested_state_size):
flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
init_state_size = [batch_size_tensor] + flat_dims
return tf.Variable(init_state_size, dtype=dtype)
if nest.is_sequence(state_size):
return nest.map_structure(create_variable, state_size)
else:
return create_variable(state_size)
| 3,398 | 34.041237 | 72 | py |
Reflect | Reflect-master/tf2_models/lm_lstm.py | import absl
import tensorflow as tf
import numpy as np
from tensorboard.compat.tensorflow_stub import tensor_shape
from tensorflow.python.util import nest
from tf2_models.common_layers import get_initializer
from tf2_models.embedding import SharedEmbeddings
from tf2_models.utils import create_init_var
class LmLSTM(tf.keras.Model):
def __init__(self, hparams, scope="lm_lstm",*inputs, **kwargs):
del kwargs['cl_token']
super(LmLSTM, self).__init__(*inputs, **kwargs)
self.hparams = hparams
self.scope = scope
self.rep_index = 2
self.rep_layer = -1
self.model_name = '_'.join([self.scope,
'em-'+str(self.hparams.embedding_dim),
'h-'+str(self.hparams.hidden_dim),
'd-'+str(self.hparams.depth),
'hdrop-'+str(self.hparams.hidden_dropout_rate),
'indrop-'+str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.00001)
self.create_vars()
@tf.function
def create_vars(self):
self.input_embedding = tf.compat.v2.keras.layers.Embedding(input_dim=self.hparams.input_dim,
output_dim=self.hparams.embedding_dim,
input_shape=(None, None),
mask_zero=True,
embeddings_regularizer=self.regularizer,
name='input_embedding')
self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate)
self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
self.output_embedding = tf.compat.v2.keras.layers.Dense(units=self.hparams.output_dim,
kernel_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
name='output_projection')
self.stacked_rnns = []
for _ in np.arange(self.hparams.depth):
self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim,
return_sequences=True,
return_state=True,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
recurrent_dropout=self.hparams.hidden_dropout_rate,
dropout=self.hparams.hidden_dropout_rate,
kernel_regularizer=self.regularizer,
recurrent_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
))
@tf.function(experimental_relax_shapes=True)
def call(self, inputs, **kwargs):
if 'training' in kwargs:
training = kwargs['training']
else:
training = False
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs),training=training)
rnn_outputs = embedded_input
input_mask = self.input_embedding.compute_mask(inputs)
float_input_mask = tf.cast(input_mask, dtype=tf.float32)
for i in np.arange(self.hparams.depth):
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, training=training)
rnn_outputs = self.output_embedding_dropout(rnn_outputs, training=training)
logits = self.output_embedding(rnn_outputs)
logits = logits * float_input_mask[...,None] + tf.eye(self.hparams.output_dim)[0] * (1 - float_input_mask[...,None])
return logits
class ClassifierLSTM(tf.keras.Model):
def __init__(self, hparams, scope="cl_lstm", *inputs, **kwargs):
del kwargs['cl_token']
super(ClassifierLSTM, self).__init__(*inputs, **kwargs)
self.hparams = hparams
self.scope = scope
self.rep_index = 2
self.rep_layer = -1
self.model_name = '_'.join([self.scope,
'em-'+str(self.hparams.embedding_dim),
'h-'+str(self.hparams.hidden_dim),
'd-'+str(self.hparams.depth),
'hdrop-'+str(self.hparams.hidden_dropout_rate),
'indrop-'+str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.00001)
self.create_vars()
@tf.function
def create_vars(self):
self.input_embedding = tf.compat.v2.keras.layers.Embedding(input_dim=self.hparams.input_dim,
output_dim=self.hparams.embedding_dim,
input_shape=(None, None),
mask_zero=True,
embeddings_regularizer=self.regularizer,
name='input_embedding')
self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate)
self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
self.output_embedding = tf.compat.v2.keras.layers.Dense(units=self.hparams.output_dim,
kernel_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
name='output_projection')
self.stacked_rnns = []
for _ in np.arange(self.hparams.depth):
self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim,
return_sequences=True,
return_state=True,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
recurrent_dropout=self.hparams.hidden_dropout_rate,
dropout=self.hparams.hidden_dropout_rate,
kernel_regularizer=self.regularizer,
recurrent_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
))
def call(self, inputs, **kwargs):
if 'training' in kwargs:
training = kwargs['training']
else:
training = False
@tf.function(experimental_relax_shapes=True)
def _call(inputs, training):
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs),training=training)
rnn_outputs = embedded_input
input_mask = self.input_embedding.compute_mask(inputs)
inputs_length = tf.reduce_sum(tf.cast(input_mask, dtype=tf.int32), axis=-1)
for i in np.arange(self.hparams.depth):
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, training=training)
rnn_outputs = self.output_embedding_dropout(rnn_outputs, training=training)
batch_size = tf.shape(rnn_outputs)[0]
bach_indices = tf.expand_dims(tf.range(batch_size), 1)
final_indexes = tf.concat([bach_indices, tf.expand_dims(tf.cast(inputs_length - 1, dtype=tf.int32), 1)], axis=-1)
final_rnn_outputs = tf.gather_nd(rnn_outputs, final_indexes)
logits = self.output_embedding(final_rnn_outputs)
return logits
return _call(inputs, training)
#@tf.function(experimental_relax_shapes=True)
def detailed_call(self, inputs, **kwargs):
if 'training' in kwargs:
training = kwargs['training']
else:
training = False
@tf.function(experimental_relax_shapes=True)
def _call(inputs, training):
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs), training=training)
rnn_outputs = embedded_input
input_mask = self.input_embedding.compute_mask(inputs)
inputs_length = tf.reduce_sum(tf.cast(input_mask, dtype=tf.int32), axis=-1)
hidden_activation = [embedded_input]
for i in np.arange(self.hparams.depth):
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, training=training)
hidden_activation.append(rnn_outputs)
rnn_outputs = self.output_embedding_dropout(rnn_outputs, training=training)
batch_size = tf.shape(rnn_outputs)[0]
bach_indices = tf.expand_dims(tf.range(batch_size), 1)
final_indexes = tf.concat([bach_indices, tf.expand_dims(tf.cast(inputs_length - 1, dtype=tf.int32), 1)], axis=-1)
final_rnn_outputs = tf.gather_nd(rnn_outputs, final_indexes)
logits = self.output_embedding(final_rnn_outputs)
out = logits
out = (out, final_rnn_outputs, hidden_activation)
return out
return _call(inputs, training)
class LmLSTMSharedEmb(tf.keras.Model):
def __init__(self, hparams, scope="lm_lstm_shared_emb",*inputs, **kwargs):
del kwargs['cl_token']
super(LmLSTMSharedEmb, self).__init__()
self.rep_index = 3
self.rep_layer = -1
self.hparams = hparams
self.scope = scope
self.model_name = '_'.join([self.scope,
'em-'+str(self.hparams.embedding_dim),
'h-'+str(self.hparams.hidden_dim),
'd-'+str(self.hparams.depth),
'hdrop-'+str(self.hparams.hidden_dropout_rate),
'indrop-'+str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0000)
self.create_vars()
def create_vars(self):
@tf.function
def _create_vars():
self.input_embedding = SharedEmbeddings(vocab_size=self.hparams.input_dim,
hidden_size=self.hparams.embedding_dim,
initializer_range=self.hparams.initializer_range,
regularizer=self.regularizer,
name='embedding')
self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate)
self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
initializer_range = self.hparams.embedding_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range
self.output_projection = tf.keras.layers.Dense(units=self.hparams.embedding_dim,
kernel_initializer=get_initializer(initializer_range))
self.stacked_rnns = []
self.rnn_initial_states = []
for _ in np.arange(self.hparams.depth):
initializer_range = self.hparams.hidden_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range
self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim,
return_sequences=True,
return_state=True,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
recurrent_dropout=self.hparams.hidden_dropout_rate,
dropout=self.hparams.hidden_dropout_rate,
kernel_regularizer=self.regularizer,
recurrent_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
kernel_initializer=get_initializer(initializer_range),
recurrent_initializer=get_initializer(initializer_range)
))
_create_vars()
initializer_range = self.hparams.hidden_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range
for i in np.arange(self.hparams.depth):
state_size = self.stacked_rnns[i].cell.state_size
if nest.is_sequence(state_size):
init_state = nest.map_structure(lambda x: create_init_var(x, i, initializer_range), state_size)
else:
init_state = create_init_var(state_size, i, initializer_range)
self.rnn_initial_states.append(init_state)
def call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, padding_symbol, **kwargs):
input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool)
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'),
**kwargs)
rnn_outputs = embedded_input
for i in np.arange(self.hparams.depth):
batch_size_tensor = tf.shape(rnn_outputs)[0]
absl.logging.info(self.rnn_initial_states[i])
def tile_init(unnested_init_state):
return tf.tile(unnested_init_state, (batch_size_tensor, 1))
init_state = self.rnn_initial_states[i]
if nest.is_sequence(init_state):
init_for_batch = nest.map_structure(tile_init, init_state)
else:
init_for_batch = tile_init(init_state)
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask,
initial_state=init_for_batch,
**kwargs)
rnn_outputs = self.output_projection(rnn_outputs, **kwargs)
rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs)
logits = self.input_embedding(rnn_outputs, mode='linear')
return logits
return _call(inputs, padding_symbol, **kwargs)
@tf.function(experimental_relax_shapes=True)
def detailed_call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, padding_symbol, **kwargs):
input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool)
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'),
**kwargs)
rnn_outputs = embedded_input
hidden_activation = [embedded_input]
for i in np.arange(self.hparams.depth):
batch_size_tensor = tf.shape(rnn_outputs)[0]
absl.logging.info(self.rnn_initial_states[i])
def tile_init(unnested_init_state):
return tf.tile(unnested_init_state, (batch_size_tensor, 1))
init_state = self.rnn_initial_states[i]
if nest.is_sequence(init_state):
init_for_batch = nest.map_structure(tile_init, init_state)
else:
init_for_batch = tile_init(init_state)
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask,
initial_state=init_for_batch,
**kwargs)
hidden_activation.append(rnn_outputs)
rnn_outputs = self.output_projection(rnn_outputs, **kwargs)
rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs)
inputs_lengths = tf.reduce_sum(input_mask, axis=-1) - 1
batch_indices = tf.range(batch_size_tensor)
indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1)
sentence_rep = tf.gather_nd(rnn_outputs, indices)
logits = self.input_embedding(rnn_outputs, mode='linear')
out = logits
out = (out,rnn_outputs, sentence_rep, hidden_activation)
return out
return _call(inputs, padding_symbol, **kwargs)
class LmLSTMSharedEmbV2(tf.keras.Model):
def __init__(self, hparams, scope="lm_lstm_shared_emb",*inputs, **kwargs):
del kwargs['cl_token']
super(LmLSTMSharedEmbV2, self).__init__()
self.rep_index = 3
self.rep_layer = -1
self.hparams = hparams
self.scope = scope
self.model_name = '_'.join([self.scope,
'em-'+str(self.hparams.embedding_dim),
'h-'+str(self.hparams.hidden_dim),
'd-'+str(self.hparams.depth),
'hdrop-'+str(self.hparams.hidden_dropout_rate),
'indrop-'+str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0000)
self.create_vars()
@tf.function
def create_vars(self):
self.input_embedding = SharedEmbeddings(vocab_size=self.hparams.input_dim,
hidden_size=self.hparams.embedding_dim,
initializer_range=self.hparams.initializer_range,
regularizer=self.regularizer,
name='embedding')
self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate)
self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
initializer_range = self.hparams.embedding_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range
self.output_projection = tf.keras.layers.Dense(units=self.hparams.embedding_dim,
kernel_initializer=get_initializer(initializer_range))
self.stacked_rnns = []
self.rnn_initial_states = []
for _ in np.arange(self.hparams.depth):
initializer_range = self.hparams.hidden_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range
self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim,
return_sequences=True,
return_state=True,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
recurrent_dropout=self.hparams.hidden_dropout_rate,
dropout=self.hparams.hidden_dropout_rate,
kernel_regularizer=self.regularizer,
recurrent_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
kernel_initializer=get_initializer(initializer_range),
recurrent_initializer=get_initializer(initializer_range)
))
def call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, padding_symbol, **kwargs):
input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool)
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'),
**kwargs)
rnn_outputs = embedded_input
for i in np.arange(self.hparams.depth):
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask,
**kwargs)
rnn_outputs = self.output_projection(rnn_outputs, **kwargs)
rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs)
logits = self.input_embedding(rnn_outputs, mode='linear')
return logits
return _call(inputs, padding_symbol, **kwargs)
@tf.function(experimental_relax_shapes=True)
def detailed_call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, padding_symbol, **kwargs):
input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool)
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'),
**kwargs)
rnn_outputs = embedded_input
hidden_activation = [embedded_input]
for i in np.arange(self.hparams.depth):
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask,
**kwargs)
hidden_activation.append(rnn_outputs)
rnn_outputs = self.output_projection(rnn_outputs, **kwargs)
rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs)
batch_size_tensor = tf.shape(rnn_outputs)[0]
inputs_lengths = tf.reduce_sum(tf.cast(input_mask, dtype=tf.int32), axis=-1) - 1
batch_indices = tf.range(batch_size_tensor)
indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1)
sentence_rep = tf.gather_nd(rnn_outputs, indices)
logits = self.input_embedding(rnn_outputs, mode='linear')
out = logits
out = (out, rnn_outputs, sentence_rep, hidden_activation)
return out
return _call(inputs, padding_symbol, **kwargs)
if __name__ == '__main__':
class hparams(object):
hidden_dim=8
input_dim=4
output_dim=4
depth=2
hidden_dropout_rate=0.1
lm_lstm = LmLSTM(hparams=hparams)
inputs = np.int64(np.flip(np.sort(np.random.uniform(0,3,size=(2,5)))))
inputs_mask = tf.equal(inputs, 0)
print(inputs_mask)
lm_lstm.build(input_shape=(None,None))
lm_lstm.summary()
print(inputs)
print(lm_lstm(inputs))
| 23,117 | 47.364017 | 138 | py |
Reflect | Reflect-master/tf2_models/transformers.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list
from tf2_models.embedding import SharedEmbeddings
from tf2_models.transformer_layers import Block
class GPT2(tf.keras.layers.Layer):
def __init__(self, hparams, *inputs, **kwargs):
super(GPT2, self).__init__(hparams, *inputs, **kwargs)
self.output_hidden_states = hparams.output_hidden_states
self.output_attentions = hparams.output_attentions
self.output_embeddings = hparams.output_embeddings
self.num_hidden_layers = hparams.depth
self.vocab_size = hparams.vocab_size
self.embedding_dim = hparams.embedding_dim
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0001)
self.create_vars(hparams)
@tf.function
def create_vars(self, hparams):
self.wte = SharedEmbeddings(self.vocab_size,
hparams.hidden_size,
initializer_range=hparams.initializer_range,
regularizer=self.regularizer,
name='wte')
self.wpe = tf.keras.layers.Embedding(hparams.n_positions,
hparams.embedding_dim,
embeddings_initializer=get_initializer(hparams.initializer_range),
embeddings_regularizer=self.regularizer,
name='wpe')
self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop)
self.h = [Block(hparams.n_ctx,
hparams,
regularizer=self.regularizer,
scale=True,
name='h_._{}'.format(i)) for i in range(hparams.depth)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f')
def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None,
training=False):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, past, attention_mask, token_type_ids, position_ids,
training):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = shape_list(past[0][0])[-2]
if position_ids is None:
position_ids = tf.range(past_length, shape_list(inputs)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]
if attention_mask is not None:
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
padding_mask = tf.cast(tf.not_equal(inputs, tf.zeros_like(inputs))[:,tf.newaxis,:,tf.newaxis],
dtype=tf.float32)
if attention_mask is None:
attention_mask = padding_mask
else:
attention_mask = attention_mask*padding_mask
input_shape = shape_list(inputs)
input_ids = tf.reshape(inputs, [-1, input_shape[-1]])
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
inputs_embeds = self.wte(input_ids, mode='embedding')
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.wte(token_type_ids, mode='embedding')
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, layer_past, attention_mask], training=training)
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
if self.output_embeddings:
outputs = outputs + (inputs_embeds,)
return outputs # last hidden state, presents, (all hidden_states), (attentions)
return _call(inputs, past, attention_mask, token_type_ids, position_ids,
training)
class GPT2SharedWeights(GPT2):
def __init__(self, hparams, *inputs, **kwargs):
super(GPT2SharedWeights, self).__init__(hparams, *inputs, **kwargs)
@tf.function
def create_vars(self, hparams):
self.wte = SharedEmbeddings(self.vocab_size ,
hparams.hidden_size,
initializer_range=hparams.initializer_range,
regularizer=self.regularizer,
name='wte')
self.wpe = tf.keras.layers.Embedding(hparams.n_positions,
hparams.embedding_dim,
embeddings_initializer=get_initializer(hparams.initializer_range),
embeddings_regularizer=self.regularizer,
name='wpe')
self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop)
attention_block = Block(hparams.n_ctx,
hparams,
regularizer=self.regularizer,
scale=True,
name='h')
self.h = [attention_block for i in range(hparams.depth)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f')
class Bert(tf.keras.layers.Layer):
def __init__(self, hparams, *inputs, **kwargs):
super(Bert, self).__init__(hparams, *inputs, **kwargs)
self.output_hidden_states = hparams.output_hidden_states
self.output_attentions = hparams.output_attentions
self.output_embeddings = hparams.output_embeddings
self.num_hidden_layers = hparams.depth
self.vocab_size = hparams.vocab_size
self.embedding_dim = hparams.embedding_dim
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0001)
self.create_vars(hparams)
@tf.function
def create_vars(self, hparams):
self.wte = SharedEmbeddings(self.vocab_size,
hparams.hidden_size,
initializer_range=hparams.initializer_range,
regularizer=self.regularizer,
name='wte')
self.wpe = tf.keras.layers.Embedding(hparams.n_positions,
hparams.embedding_dim,
embeddings_initializer=get_initializer(hparams.initializer_range),
embeddings_regularizer=self.regularizer,
name='wpe')
self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop)
self.h = [Block(hparams.n_ctx,
hparams,
regularizer=self.regularizer,
scale=True,
casual_masking=False,
name='h_._{}'.format(i)) for i in range(hparams.depth)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f')
def get_input_embeddings(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None,
training=False):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = shape_list(past[0][0])[-2]
if position_ids is None:
position_ids = tf.range(past_length, shape_list(inputs)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]
input_shape = shape_list(inputs)
input_ids = tf.reshape(inputs, [-1, input_shape[-1]])
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
inputs_embeds = self.wte(input_ids, mode='embedding')
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.wte(token_type_ids, mode='embedding')
else:
token_type_embeds = 0
input_embeddings = inputs_embeds + position_embeds + token_type_embeds
padding_mask = tf.cast(tf.not_equal(inputs, tf.zeros_like(inputs))[:, tf.newaxis, :, tf.newaxis],
dtype=tf.float32)
return input_embeddings, input_shape, padding_mask, past
def call_with_embeddings(self, input_embeddings, input_shape, padding_mask, past, attention_mask=None,
training=False):
@tf.function(experimental_relax_shapes=True)
def _call(input_embeddings, input_shape, padding_mask, past, attention_mask,
training):
if attention_mask is not None:
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
if attention_mask is None:
attention_mask = padding_mask
else:
attention_mask = attention_mask*padding_mask
hidden_states = input_embeddings
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, layer_past, attention_mask], training=training)
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
if self.output_embeddings:
outputs = outputs
return outputs # last hidden state, presents, (all hidden_states), (attentions), input_embedding
return _call(input_embeddings, input_shape, padding_mask, past, attention_mask,
training)
def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None,
training=False):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, past, attention_mask, token_type_ids, position_ids,
training):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = shape_list(past[0][0])[-2]
if position_ids is None:
position_ids = tf.range(past_length, shape_list(inputs)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]
if attention_mask is not None:
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
padding_mask = tf.cast(tf.not_equal(inputs, tf.zeros_like(inputs))[:,tf.newaxis,:,tf.newaxis],
dtype=tf.float32)
if attention_mask is None:
attention_mask = padding_mask
else:
attention_mask = attention_mask*padding_mask
input_shape = shape_list(inputs)
input_ids = tf.reshape(inputs, [-1, input_shape[-1]])
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
inputs_embeds = self.wte(input_ids, mode='embedding')
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.wte(token_type_ids, mode='embedding')
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, layer_past, attention_mask], training=training)
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
if self.output_embeddings:
outputs = outputs + (inputs_embeds,)
return outputs # last hidden state, presents, (all hidden_states), (attentions), input_embedding
return _call(inputs, past, attention_mask, token_type_ids, position_ids,
training)
class BertSharedWeights(Bert):
def __init__(self, hparams, *inputs, **kwargs):
super(BertSharedWeights, self).__init__(hparams, *inputs, **kwargs)
self.output_hidden_states = hparams.output_hidden_states
self.output_attentions = hparams.output_attentions
self.output_embeddings = hparams.output_embeddings
self.num_hidden_layers = hparams.depth
self.vocab_size = hparams.vocab_size
self.embedding_dim = hparams.embedding_dim
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0001)
self.create_vars(hparams)
@tf.function
def create_vars(self, hparams):
self.wte = SharedEmbeddings(self.vocab_size,
hparams.hidden_size,
initializer_range=hparams.initializer_range,
regularizer=self.regularizer,
name='wte')
self.wpe = tf.keras.layers.Embedding(hparams.n_positions,
hparams.embedding_dim,
embeddings_initializer=get_initializer(hparams.initializer_range),
embeddings_regularizer=self.regularizer,
name='wpe')
self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop)
attention_block = Block(hparams.n_ctx,
hparams,
regularizer=self.regularizer,
scale=True,
casual_masking=False,
name='h')
self.h = [attention_block for i in range(hparams.depth)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f')
| 16,938 | 40.619165 | 113 | py |
Reflect | Reflect-master/tf2_models/resnet.py | import tensorflow as tf
class ResnetBlock(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, activation='relu',*inputs, **kwargs):
super(ResnetBlock, self).__init__(*inputs, **kwargs)
self.filters = filters
self.kernel_size = kernel_size
self.activation = activation
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.000000002)
self.create_layer()
def create_layer(self):
self.conv1 = tf.keras.layers.Conv2D(self.filters, self.kernel_size,
activation=self.activation,
padding='same',
kernel_regularizer=self.regularizer)
self.batch_norm1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(self.filters, self.kernel_size,
activation=None,
padding='same',
kernel_regularizer=self.regularizer)
self.batch_norm2 = tf.keras.layers.BatchNormalization()
self.add = tf.keras.layers.Add()
self.activation = tf.keras.layers.Activation('relu')
def call(self, inputs, training=None, **kwargs):
outputs = self.conv1(inputs, training=training, **kwargs)
outputs = self.batch_norm1(outputs,training=training, **kwargs)
outputs = self.conv2(outputs, training=training, **kwargs)
outputs = self.batch_norm2(outputs,training=training, **kwargs)
outputs = self.add([outputs, inputs],training=training, **kwargs)
outputs = self.activation(outputs, training=training, **kwargs)
return outputs
class Resnet(tf.keras.Model):
def __init__(self, hparams, scope='resnet', *inputs, **kwargs):
if 'cl_token' in kwargs:
del kwargs['cl_token']
super(Resnet, self).__init__(name=scope, *inputs, **kwargs)
self.scope = scope
self.hparams = hparams
self.model_name = '_'.join([self.scope,
'h-' + str(self.hparams.hidden_dim),
'rd-' + str(self.hparams.num_res_net_blocks),
'hdrop-' + str(self.hparams.hidden_dropout_rate),
'indrop-' + str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.000000002)
self.create_layers()
self.rep_index = 1
self.rep_layer = -1
def create_layers(self):
self.batch_norm1 = tf.keras.layers.BatchNormalization()
self.activation = tf.keras.layers.Activation('relu')
self.conv1 = tf.keras.layers.Conv2D(self.hparams.filters[0], self.hparams.kernel_size[0],
activation=None,
kernel_regularizer=self.regularizer)
self.batch_norm2 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(self.hparams.filters[1], self.hparams.kernel_size[1],
activation=None,
kernel_regularizer=self.regularizer)
self.batch_norm3 = tf.keras.layers.BatchNormalization()
self.pool2 = tf.keras.layers.MaxPooling2D(self.hparams.pool_size)
self.resblocks = []
for i in range(self.hparams.num_res_net_blocks):
self.resblocks.append(ResnetBlock(self.hparams.filters[2], self.hparams.kernel_size[2]))
self.conv4 = tf.keras.layers.Conv2D(self.hparams.filters[3], self.hparams.kernel_size[3],
activation=None)
self.batch_norm4 = tf.keras.layers.BatchNormalization()
self.avgpool = tf.keras.layers.GlobalAveragePooling2D()
self.dense = tf.keras.layers.Dense(self.hparams.hidden_dim, activation='relu')
self.dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
self.project = tf.keras.layers.Dense(self.hparams.output_dim, activation=None)
def call(self, inputs, padding_symbol=None, training=None, **kwargs):
x = inputs #self.batch_norm1(inputs, training=training, **kwargs)
x = self.conv1(x, training=training, **kwargs)
x = self.batch_norm2(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
x = self.conv2(x, training=training, **kwargs)
x = self.batch_norm3(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
x = self.pool2(x, training=training, **kwargs)
for i in range(self.hparams.num_res_net_blocks):
x = self.resblocks[i](x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
x = self.conv4(x, training=training, **kwargs)
x = self.batch_norm4(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
x = self.avgpool(x, training=training, **kwargs)
x = self.dense(x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
outputs = self.project(x, training=training, **kwargs)
return outputs
def detailed_call(self, inputs, padding_symbol=None, training=None, **kwargs):
self.layer_activations = []
x = self.batch_norm1(inputs, training=training, **kwargs)
x = self.conv1(x, training=training, **kwargs)
x = self.batch_norm2(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.conv2(x, training=training, **kwargs)
x = self.batch_norm3(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.pool2(x, training=training, **kwargs)
for i in range(self.hparams.num_res_net_blocks):
x = self.resblocks[i](x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.conv4(x, training=training, **kwargs)
x = self.batch_norm4(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.avgpool(x, training=training, **kwargs)
x = self.dense(x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
pnltimt = x
outputs = self.project(x, training=training, **kwargs)
return outputs, pnltimt, self.layer_activations | 6,572 | 40.601266 | 94 | py |
Reflect | Reflect-master/tf2_models/cnn.py | import tensorflow as tf
import numpy as np
def max_out(inputs, num_units, axis=None):
shape = inputs.get_shape().as_list()
if shape[0] is None:
shape[0] = -1
if axis is None: # Assume that channel is the last dimension
axis = -1
num_channels = shape[axis]
if num_channels % num_units:
raise ValueError('number of features({}) is not '
'a multiple of num_units({})'.format(num_channels,
num_units))
shape[axis] = num_units
shape += [num_channels // num_units]
outputs = tf.reduce_max(tf.reshape(inputs, shape), -1)
return outputs
class VanillaCNN(tf.keras.models.Model):
def __init__(self, hparams, scope="cl_vcnn", *inputs, **kwargs):
if 'cl_token' in kwargs:
del kwargs['cl_token']
super(VanillaCNN, self).__init__(*inputs, **kwargs)
self.hparams = hparams
self.scope = scope
self.model_name = '_'.join([self.scope,
'hc-' + '.'.join(
[str(h) for h in self.hparams.filters]),
'hfc-' + '.'.join(
[str(h) for h in self.hparams.fc_dim]),
'd-' + str(self.hparams.depth),
'hdrop-' + str(
self.hparams.hidden_dropout_rate),
'indrop-' + str(
self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.000000002)
self.create_vars()
self.rep_index = 1
self.rep_layer = -1
def create_vars(self):
self.indrop = tf.keras.layers.Dropout(rate=self.hparams.input_dropout_rate)
self.cnns = []
self.cnn_nns = []
self.cnn_bnz = []
self.cnn_activations = []
self.cnn_pooling = []
self.cnn_dropouts = []
for i in np.arange(self.hparams.depth):
self.cnns.append(tf.keras.layers.Conv2D(self.hparams.filters[i],
self.hparams.kernel_size[i],
activation=None,
kernel_regularizer=self.regularizer))
# if self.hparams.maxout_size[i] < self.hparams.filters[i]:
# nn_size = int(self.hparams.filters[i] / self.hparams.maxout_size[i])
# self.cnn_nns.append(tf.keras.layers.Conv2D(self.hparams.maxout_size[i],
# (1,1),
# activation=None,
# kernel_regularizer=self.regularizer))
# else:
# self.cnn_nns.append(tf.keras.layers.Lambda(lambda x: x))
self.cnn_bnz.append(tf.keras.layers.BatchNormalization())
self.cnn_activations.append(tf.keras.layers.Activation('relu'))
self.cnn_pooling.append(
tf.keras.layers.MaxPooling2D(self.hparams.pool_size[i]))
self.cnn_dropouts.append(
tf.keras.layers.Dropout(rate=self.hparams.hidden_dropout_rate))
self.avg_pool = tf.keras.layers.GlobalAveragePooling2D()
self.densez = []
self.dense_bnz = []
self.dense_activations = []
self.dense_dropouts = []
for i in np.arange(self.hparams.proj_depth):
self.densez.append(
tf.keras.layers.Dense(self.hparams.fc_dim[i], activation=None,
kernel_regularizer=self.regularizer))
self.dense_bnz.append(tf.keras.layers.BatchNormalization())
self.dense_activations.append(tf.keras.layers.Activation('relu'))
self.dense_dropouts.append(
tf.keras.layers.Dropout(rate=self.hparams.hidden_dropout_rate))
self.projector = tf.keras.layers.Dense(self.hparams.output_dim,
kernel_regularizer=self.regularizer)
def call(self, inputs, padding_symbol=None, training=None, **kwargs):
x = self.indrop(inputs, training=training, **kwargs)
for i in np.arange(self.hparams.depth):
x = self.cnns[i](x, training=training, **kwargs)
# x = self.cnn_nns[i](x, training=training, **kwargs)
x = max_out(x, self.hparams.maxout_size[i])
x = self.cnn_bnz[i](x, training=training, **kwargs)
x = self.cnn_activations[i](x, training=training, **kwargs)
x = self.cnn_pooling[i](x, training=training, **kwargs)
x = self.cnn_dropouts[i](x, training=training, **kwargs)
x = self.avg_pool(x, **kwargs)
for i in np.arange(self.hparams.proj_depth):
x = self.densez[i](x, training=training, **kwargs)
x = self.dense_bnz[i](x, training=training, **kwargs)
x = self.dense_activations[i](x, training=training, **kwargs)
x = self.dense_dropouts[i](x, training=training, **kwargs)
logits = self.projector(x, training=training, **kwargs)
return logits
def detailed_call(self, inputs, padding_symbol=None, **kwargs):
x = self.indrop(inputs)
hidden_activations = []
for i in np.arange(self.hparams.depth):
x = self.cnns[i](x, **kwargs)
x = max_out(x, self.hparams.maxout_size[i])
x = self.cnn_bnz[i](x, **kwargs)
x = self.cnn_activations[i](x, **kwargs)
x = self.cnn_pooling[i](x, **kwargs)
x = self.cnn_dropouts[i](x, **kwargs)
hidden_activations.append(x)
x = self.avg_pool(x, **kwargs)
hidden_activations.append(x)
for i in np.arange(self.hparams.proj_depth):
x = self.densez[i](x, **kwargs)
x = self.dense_bnz[i](x, **kwargs)
x = self.dense_activations[i](x, **kwargs)
x = self.dense_dropouts[i](x, **kwargs)
hidden_activations.append(x)
logits = self.projector(x, **kwargs)
return logits, hidden_activations[-1], hidden_activations
| 5,878 | 38.993197 | 91 | py |
Reflect | Reflect-master/tf2_models/utils.py | import tensorflow as tf
import re
from tensorboard.compat.tensorflow_stub import tensor_shape
def camel2snake(name):
return name[0].lower() + re.sub(r'(?!^)[A-Z]', lambda x: '_' + x.group(0).lower(), name[1:])
def log_summary(log_value, log_name, summary_scope):
"""Produce scalar summaries."""
with tf.compat.v2.summary.experimental.summary_scope(summary_scope):
tf.summary.scalar(log_name, log_value)
def create_init_var(unnested_state_size, i, initializer_range):
flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
init_state_size = [1] + flat_dims
return tf.Variable(shape=init_state_size, dtype=tf.float32,
initial_value=tf.keras.initializers.TruncatedNormal(stddev=initializer_range)(
shape=init_state_size),
trainable=True,
name="lstm_init_" + str(i))
| 884 | 31.777778 | 99 | py |
Reflect | Reflect-master/tf2_models/train_utils.py | import absl
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2.learning_rate_schedule import LearningRateSchedule
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
from tensorflow_addons.utils import keras_utils
@keras_export("keras.optimizers.schedules.ExponentialDecay")
class ExponentialDecayWithWarmpUp(LearningRateSchedule):
"""A LearningRateSchedule that uses an exponential decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
warmup_steps,
warmup_learning_rate=0.0,
hold_base_rate_steps=0,
staircase=False,
name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies an exponential decay function
to an optimizer step, given a provided initial learning rate.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
```
If the argument `staircase` is `True`, then `step / decay_steps` is
an integer division and the decayed learning rate follows a
staircase function.
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: When fitting a Keras model, decay every 100000 steps with a base
of 0.96:
```python
initial_learning_rate = 0.1
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete
intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(ExponentialDecayWithWarmpUp, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.warmup_steps = warmup_steps
self.warmup_learning_rate = warmup_learning_rate
self.hold_base_rate_steps = hold_base_rate_steps
self.staircase = staircase
self.name = name
@tf.function(experimental_relax_shapes=True)
def __call__(self, step):
with ops.name_scope_v2(self.name or "ExponentialDecay") as name:
initial_learning_rate = tf.constant(
self.initial_learning_rate, name="initial_learning_rate", dtype=tf.float32)
warmup_learning_rate = tf.constant(
self.warmup_learning_rate, name="warmup_learning_rate", dtype=tf.float32)
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
warmup_steps = math_ops.cast(self.warmup_steps, dtype)
hold_base_rate_steps = math_ops.cast(self.hold_base_rate_steps, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = (global_step_recomp - (warmup_steps+hold_base_rate_steps)) / (decay_steps)
if self.staircase:
p = math_ops.floor(p)
learning_rate= math_ops.multiply(
initial_learning_rate, math_ops.pow(decay_rate, p), name=name)
learning_rate = tf.where(
global_step_recomp > (warmup_steps + hold_base_rate_steps),
learning_rate, initial_learning_rate)
if self.warmup_steps > 0:
if self.initial_learning_rate < self.warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to '
'warmup_learning_rate.')
slope = (initial_learning_rate - warmup_learning_rate) / warmup_steps
warmup_rate = slope * tf.cast(global_step_recomp,
tf.float32) + warmup_learning_rate
learning_rate = tf.where(global_step_recomp < warmup_steps, warmup_rate,
learning_rate)
return learning_rate
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"warmup_steps": self.warmup_steps,
"warmup_learning_rate": self.warmup_learning_rate,
"hold_base_rate_steps": self.hold_base_rate_steps,
"name": self.name
}
class RectifiedAdam(tf.keras.optimizers.Optimizer):
"""Variant of the Adam optimizer whose adaptive learning rate is rectified
so as to have a consistent variance.
It implements the Rectified Adam (a.k.a. RAdam) proposed by
Liyuan Liu et al. in [On The Variance Of The Adaptive Learning Rate
And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf).
Example of usage:
```python
opt = tfa.optimizers.RectifiedAdam(lr=1e-3)
```
Note: `amsgrad` is not described in the original paper. Use it with
caution.
RAdam is not a placement of the heuristic warmup, the settings should be
kept if warmup has already been employed and tuned in the baseline method.
You can enable warmup by setting `total_steps` and `warmup_proportion`:
```python
opt = tfa.optimizers.RectifiedAdam(
lr=1e-3,
total_steps=10000,
warmup_proportion=0.1,
min_lr=1e-5,
)
```
In the above example, the learning rate will increase linearly
from 0 to `lr` in 1000 steps, then decrease linearly from `lr` to `min_lr`
in 9000 steps.
Lookahead, proposed by Michael R. Zhang et.al in the paper
[Lookahead Optimizer: k steps forward, 1 step back]
(https://arxiv.org/abs/1907.08610v1), can be integrated with RAdam,
which is announced by Less Wright and the new combined optimizer can also
be called "Ranger". The mechanism can be enabled by using the lookahead
wrapper. For example:
```python
radam = tfa.optimizers.RectifiedAdam()
ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)
```
"""
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
weight_decay=0.,
amsgrad=False,
sma_threshold=5.0,
total_steps=0,
warmup_proportion=0.1,
min_lr=0.,
name='RectifiedAdam',
**kwargs):
r"""Construct a new RAdam optimizer.
Args:
learning_rate: A Tensor or a floating point value.
The learning rate.
beta_1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
weight_decay: A floating point value. Weight decay for each param.
amsgrad: boolean. Whether to apply AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
beyond".
sma_threshold. A float value.
The threshold for simple mean average.
total_steps: An integer. Total number of training steps.
Enable warmup by setting a positive value.
warmup_proportion: A floating point value.
The proportion of increasing steps.
min_lr: A floating point value. Minimum learning rate after warmup.
name: Optional name for the operations created when applying
gradients. Defaults to "RectifiedAdam".
**kwargs: keyword arguments. Allowed to be {`clipnorm`,
`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients
by norm; `clipvalue` is clip gradients by value, `decay` is
included for backward compatibility to allow time inverse
decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super(RectifiedAdam, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self._set_hyper('decay', self._initial_decay)
self._set_hyper('weight_decay', weight_decay)
self._set_hyper('sma_threshold', sma_threshold)
self._set_hyper('total_steps', float(total_steps))
self._set_hyper('warmup_proportion', warmup_proportion)
self._set_hyper('min_lr', min_lr)
self.epsilon = epsilon or tf.keras.backend.epsilon()
self.amsgrad = amsgrad
self._initial_weight_decay = weight_decay
self._initial_total_steps = total_steps
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
if self.amsgrad:
for var in var_list:
self.add_slot(var, 'vhat')
def set_weights(self, weights):
params = self.weights
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[:len(params)]
super(RectifiedAdam, self).set_weights(weights)
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_power = tf.pow(beta_2_t, local_step)
if self._initial_total_steps > 0:
total_steps = self._get_hyper('total_steps', var_dtype)
warmup_steps = total_steps *\
self._get_hyper('warmup_proportion', var_dtype)
min_lr = self._get_hyper('min_lr', var_dtype)
decay_steps = tf.maximum(total_steps - warmup_steps, 1)
decay_rate = (min_lr - lr_t) / decay_steps
lr_t = tf.where(
local_step <= warmup_steps,
lr_t * (local_step / warmup_steps),
lr_t + decay_rate * tf.minimum(local_step - warmup_steps,
decay_steps),
)
sma_inf = 2.0 / (1.0 - beta_2_t) - 1.0
sma_t = sma_inf - 2.0 * local_step * beta_2_power / (
1.0 - beta_2_power)
m_t = m.assign(
beta_1_t * m + (1.0 - beta_1_t) * grad,
use_locking=self._use_locking)
m_corr_t = m_t / (1.0 - beta_1_power)
v_t = v.assign(
beta_2_t * v + (1.0 - beta_2_t) * tf.square(grad),
use_locking=self._use_locking)
if self.amsgrad:
vhat = self.get_slot(var, 'vhat')
vhat_t = vhat.assign(
tf.maximum(vhat, v_t), use_locking=self._use_locking)
v_corr_t = tf.sqrt(vhat_t / (1.0 - beta_2_power))
else:
vhat_t = None
v_corr_t = tf.sqrt(v_t / (1.0 - beta_2_power))
r_t = tf.sqrt((sma_t - 4.0) / (sma_inf - 4.0) * (sma_t - 2.0) /
(sma_inf - 2.0) * sma_inf / sma_t)
sma_threshold = self._get_hyper('sma_threshold', var_dtype)
var_t = tf.where(sma_t >= sma_threshold,
r_t * m_corr_t / (v_corr_t + epsilon_t), m_corr_t)
if self._initial_weight_decay > 0.0:
var_t += self._get_hyper('weight_decay', var_dtype) * var
var_update = var.assign_sub(
lr_t * var_t, use_locking=self._use_locking)
updates = [var_update, m_t, v_t]
if self.amsgrad:
updates.append(vhat_t)
return tf.group(*updates)
def _resource_apply_sparse(self, grad, var, indices):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_power = tf.pow(beta_2_t, local_step)
if self._initial_total_steps > 0:
total_steps = self._get_hyper('total_steps', var_dtype)
warmup_steps = total_steps *\
self._get_hyper('warmup_proportion', var_dtype)
min_lr = self._get_hyper('min_lr', var_dtype)
decay_steps = tf.maximum(total_steps - warmup_steps, 1)
decay_rate = (min_lr - lr_t) / decay_steps
lr_t = tf.where(
local_step <= warmup_steps,
lr_t * (local_step / warmup_steps),
lr_t + decay_rate * tf.minimum(local_step - warmup_steps,
decay_steps),
)
sma_inf = 2.0 / (1.0 - beta_2_t) - 1.0
sma_t = sma_inf - 2.0 * local_step * beta_2_power / (
1.0 - beta_2_power)
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * (1 - beta_1_t)
m_t = m.assign(m * beta_1_t, use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
m_corr_t = m_t / (1.0 - beta_1_power)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * (1 - beta_2_t)
v_t = v.assign(v * beta_2_t, use_locking=self._use_locking)
with tf.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
if self.amsgrad:
vhat = self.get_slot(var, 'vhat')
vhat_t = vhat.assign(
tf.maximum(vhat, v_t), use_locking=self._use_locking)
v_corr_t = tf.sqrt(vhat_t / (1.0 - beta_2_power))
else:
vhat_t = None
v_corr_t = tf.sqrt(v_t / (1.0 - beta_2_power))
r_t = tf.sqrt((sma_t - 4.0) / (sma_inf - 4.0) * (sma_t - 2.0) /
(sma_inf - 2.0) * sma_inf / sma_t)
sma_threshold = self._get_hyper('sma_threshold', var_dtype)
var_t = tf.where(sma_t >= sma_threshold,
r_t * m_corr_t / (v_corr_t + epsilon_t), m_corr_t)
if self._initial_weight_decay > 0.0:
var_t += self._get_hyper('weight_decay', var_dtype) * var
with tf.control_dependencies([var_t]):
var_update = self._resource_scatter_add(
var, indices, tf.gather(-lr_t * var_t, indices))
updates = [var_update, m_t, v_t]
if self.amsgrad:
updates.append(vhat_t)
return tf.group(*updates)
def get_config(self):
config = super(RectifiedAdam, self).get_config()
config.update({
'learning_rate':
self._serialize_hyperparameter('learning_rate'),
'beta_1':
self._serialize_hyperparameter('beta_1'),
'beta_2':
self._serialize_hyperparameter('beta_2'),
'decay':
self._serialize_hyperparameter('decay'),
'weight_decay':
self._serialize_hyperparameter('weight_decay'),
'sma_threshold':
self._serialize_hyperparameter('sma_threshold'),
'epsilon':
self.epsilon,
'amsgrad':
self.amsgrad,
'total_steps':
self._serialize_hyperparameter('total_steps'),
'warmup_proportion':
self._serialize_hyperparameter('warmup_proportion'),
'min_lr':
self._serialize_hyperparameter('min_lr'),
})
return config | 17,416 | 40.568019 | 92 | py |
Reflect | Reflect-master/tf2_models/transformer_layers.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list, gelu
class Attention(tf.keras.layers.Layer):
def __init__(self, hidden_dim, n_ctx, config, regularizer, casual_masking=True, scale=False, **kwargs):
super(Attention, self).__init__(**kwargs)
self.output_attentions = config.output_attentions
self.casual_masking = casual_masking
n_state = hidden_dim
assert n_state % config.n_head == 0
self.n_ctx = n_ctx
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.regularizer = regularizer
self.c_attn = Conv1D(nf=n_state * 3, nx=hidden_dim,
initializer_range=config.initializer_range,
regularizer=self.regularizer, name='c_attn')
self.c_proj = Conv1D(nf=n_state, nx=hidden_dim,
initializer_range=config.initializer_range,
regularizer=self.regularizer,
name='c_proj')
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
@staticmethod
def causal_attention_mask(nd, ns, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:, None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def _attn(self, inputs, training=False):
q, k, v, attention_mask = inputs
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
if self.scale:
dk = tf.cast(tf.shape(k)[-1], tf.float32) # scale attention_scores
w = w / tf.math.sqrt(dk)
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
if self.casual_masking:
b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = tf.nn.softmax(w, axis=-1)
w = self.attn_dropout(w, training=training)
outputs = [tf.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = tf.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
return tf.reshape(x, new_x_shape)
def split_heads(self, x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
x = tf.reshape(x, new_x_shape)
return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
def call(self, inputs, training=False):
x, layer_past, attention_mask = inputs
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query)
key = self.split_heads(key)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = tf.unstack(layer_past, axis=1)
key = tf.concat([past_key, key], axis=-2)
value = tf.concat([past_value, value], axis=-2)
present = tf.stack([key, value], axis=1)
attn_outputs = self._attn([query, key, value, attention_mask], training=training)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a, training=training)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class Conv1D(tf.keras.layers.Layer):
def __init__(self, nf, nx, regularizer, initializer_range=0.02, **kwargs):
""" TFConv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super(Conv1D, self).__init__(**kwargs)
self.nf = nf
self.nx = nx
self.initializer_range = initializer_range
self.regularizer = regularizer
def build(self, input_shape):
self.weight = self.add_weight(
"weight",
shape=[self.nx, self.nf],
initializer=get_initializer(self.initializer_range),
regularizer=self.regularizer)
self.bias = self.add_weight(
"bias",
shape=[1, self.nf],
initializer=tf.zeros_initializer(),
regularizer=self.regularizer)
def call(self, x, **kwargs):
bz, sl = shape_list(x)[:2]
x = tf.reshape(x, [-1, self.nx])
x = tf.matmul(x, self.weight) + self.bias
x = tf.reshape(x, [bz, sl, self.nf])
return x
class Block(tf.keras.layers.Layer):
def __init__(self, n_ctx, config, regularizer, casual_masking=True, scale=False, **kwargs):
super(Block, self).__init__(**kwargs)
self.regularizer = regularizer
nx = config.embedding_dim
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_1')
self.attn = Attention(hidden_dim=nx, n_ctx=n_ctx, config=config, scale=scale,
regularizer=self.regularizer,
casual_masking=casual_masking, name='attn')
self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_2')
self.mlp = TransformerMLP(4 * nx, config, regularizer=self.regularizer, name='mlp')
def call(self, inputs, training=False):
x, layer_past, attention_mask = inputs
a = self.ln_1(x)
output_attn = self.attn([a, layer_past, attention_mask], training=training)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.ln_2(x)
m = self.mlp(m, training=training)
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class TransformerMLP(tf.keras.layers.Layer):
def __init__(self, n_state, config, regularizer, **kwargs):
super(TransformerMLP, self).__init__(**kwargs)
self.regularizer = regularizer
nx = config.embedding_dim
self.c_fc = Conv1D(n_state, nx, initializer_range=config.initializer_range,
regularizer=self.regularizer, name='c_fc')
self.c_proj = Conv1D(nx, n_state, initializer_range=config.initializer_range,
regularizer=self.regularizer, name='c_proj')
self.act = gelu
self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)
def call(self, x, training=False):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
h2 = self.dropout(h2, training=training)
return h2 | 6,560 | 35.049451 | 105 | py |
Reflect | Reflect-master/tf2_models/ff_resnet.py | import tensorflow as tf
class FFResnetBlock(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, activation='relu',*inputs, **kwargs):
super(FFResnetBlock, self).__init__(*inputs, **kwargs)
self.filters = filters
self.kernel_size = kernel_size
self.activation = activation
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.000000002)
self.create_layer()
def create_layer(self):
self.conv1 = tf.keras.layers.Dense(self.filters*9,
activation=self.activation,
kernel_regularizer=self.regularizer)
self.batch_norm1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Dense(self.filters*9,
activation=None,
kernel_regularizer=self.regularizer)
self.batch_norm2 = tf.keras.layers.BatchNormalization()
self.add = tf.keras.layers.Add()
self.activation = tf.keras.layers.Activation('relu')
def call(self, inputs, training=None, **kwargs):
outputs = self.conv1(inputs, training=training, **kwargs)
outputs = self.batch_norm1(outputs,training=training, **kwargs)
outputs = self.conv2(outputs, training=training, **kwargs)
outputs = self.batch_norm2(outputs,training=training, **kwargs)
outputs = self.add([outputs, inputs],training=training, **kwargs)
outputs = self.activation(outputs, training=training, **kwargs)
return outputs
class FFResnet(tf.keras.Model):
def __init__(self, hparams, scope='ff_resnet', *inputs, **kwargs):
if 'cl_token' in kwargs:
del kwargs['cl_token']
super(FFResnet, self).__init__(name=scope, *inputs, **kwargs)
self.scope = scope
self.hparams = hparams
self.model_name = '_'.join([self.scope,
'h-' + str(self.hparams.hidden_dim),
'rd-' + str(self.hparams.num_res_net_blocks),
'hdrop-' + str(self.hparams.hidden_dropout_rate),
'indrop-' + str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.000000002)
self.create_layers()
self.rep_index = 1
self.rep_layer = -1
def create_layers(self):
self.flat = tf.keras.layers.Flatten()
self.batch_norm1 = tf.keras.layers.BatchNormalization()
self.activation = tf.keras.layers.Activation('relu')
self.conv1 = tf.keras.layers.Dense(self.hparams.filters[0]*9,
activation=None,
kernel_regularizer=self.regularizer)
self.batch_norm2 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Dense(self.hparams.filters[1]*9,
activation=None,
kernel_regularizer=self.regularizer)
self.batch_norm3 = tf.keras.layers.BatchNormalization()
self.resblocks = []
for i in range(self.hparams.num_res_net_blocks):
self.resblocks.append(FFResnetBlock(self.hparams.filters[2], self.hparams.kernel_size[2]))
self.conv4 = tf.keras.layers.Dense(self.hparams.filters[3]*9,
activation=None)
self.batch_norm4 = tf.keras.layers.BatchNormalization()
self.dense = tf.keras.layers.Dense(self.hparams.hidden_dim, activation='relu')
self.dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
self.project = tf.keras.layers.Dense(self.hparams.output_dim, activation=None)
def call(self, inputs, padding_symbol=None, training=None, **kwargs):
x = self.flat(inputs, **kwargs)
x = self.batch_norm1(x, training=training, **kwargs)
x = self.conv1(x, training=training, **kwargs)
x = self.batch_norm2(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
x = self.conv2(x, training=training, **kwargs)
x = self.batch_norm3(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
for i in range(self.hparams.num_res_net_blocks):
x = self.resblocks[i](x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
x = self.conv4(x, training=training, **kwargs)
x = self.batch_norm4(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
x = self.dense(x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
outputs = self.project(x, training=training, **kwargs)
return outputs
def detailed_call(self, inputs, padding_symbol=None, training=None, **kwargs):
self.layer_activations = []
x = self.flat(inputs, **kwargs)
x = self.batch_norm1(x, training=training, **kwargs)
x = self.conv1(x, training=training, **kwargs)
x = self.batch_norm2(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.conv2(x, training=training, **kwargs)
x = self.batch_norm3(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
for i in range(self.hparams.num_res_net_blocks):
x = self.resblocks[i](x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.conv4(x, training=training, **kwargs)
x = self.batch_norm4(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.dense(x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
pnltimt = x
outputs = self.project(x, training=training, **kwargs)
return outputs, pnltimt, self.layer_activations | 6,118 | 39.256579 | 96 | py |
Reflect | Reflect-master/tf2_models/keras_callbacks.py | import tensorflow as tf
from tf2_models.utils import log_summary
class CheckpointCallback(tf.keras.callbacks.Callback):
def __init__(self, manager, ckpt):
super(CheckpointCallback, self).__init__()
self.manager = manager
self.ckpt = ckpt
def on_epoch_end(self, epoch, logs=None):
self.ckpt.step.assign_add(1)
save_path = self.manager.save()
tf.print("Epoch %d: " %epoch)
tf.print("Saved checkpoint for:", save_path)
class SummaryCallback(tf.keras.callbacks.Callback):
def __init__(self, summary_writer):
self.summary_writer = summary_writer
def on_train_batch_end(self, batch, logs=None):
if (self.model.optimizer.iterations % 200) == 0:
print(logs)
if 'loss' in logs.keys():
log_summary(log_name='learning_rate', log_value=self.model.optimizer.learning_rate( self.model.optimizer.iterations), summary_scope='train')
log_summary(log_name='fine_total_loss', log_value=logs['loss'], summary_scope='train')
if 'masked_sequence_loss' in logs.keys():
log_summary(log_name='fine_lm_loss', log_value=logs['masked_sequence_loss'], summary_scope='train')
if 'sequence_loss' in logs.keys():
log_summary(log_name='fine_lm_loss', log_value=logs['sequence_loss'], summary_scope='train')
def on_epoch_end(self, epoch, logs=None):
# Log summary for test and train
if 'masked_sequence_loss' in logs.keys():
log_summary(log_name='perolexity', log_value=tf.exp(logs['masked_sequence_loss']), summary_scope='train')
log_summary(log_name='perplexity', log_value=tf.exp(logs['val_masked_sequence_loss']), summary_scope='valid')
for key in logs.keys():
if 'val' in key:
log_summary(log_name=key, log_value=logs[key], summary_scope='valid')
else:
log_summary(log_name=key, log_value=logs[key], summary_scope='train')
| 1,859 | 38.574468 | 148 | py |
Reflect | Reflect-master/tf2_models/metrics.py | import tensorflow as tf
@tf.function(experimental_relax_shapes=True)
def distill_loss(y_true, y_pred, tmp):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.float32)
scale_factor = 1.0 / (tmp*tmp)
return tf.reduce_mean(tf.compat.v2.nn.softmax_cross_entropy_with_logits(logits=y_pred / tmp,
labels=y_true,
name='loss')) * scale_factor
@tf.function(experimental_relax_shapes=True)
def sequence_distill_loss(y_true, y_pred, padding_symbol, tmp):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.float32)
sequence_mask = tf.cast(y_true[..., padding_symbol] != 1.0, dtype=tf.float32)
sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask)
scale_factor = 1.0 / (tmp * tmp)
return tf.reduce_sum(tf.compat.v2.nn.softmax_cross_entropy_with_logits(logits=y_pred / tmp,
labels=y_true,
name='loss') * sequence_mask) * scale_factor
@tf.function(experimental_relax_shapes=True)
def masked_sequence_loss(y_true, y_pred, padding_symbol=0):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64)
sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32)
# [batch_size, 1]
sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask, axis=-1)[...,None]
return tf.reduce_mean(tf.reduce_sum(tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,
labels=y_true,
name='loss') * sequence_mask, axis=-1))
@tf.function(experimental_relax_shapes=True)
def batch_masked_sequence_loss(y_true, y_pred, padding_symbol=0):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64)
sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32)
# [batch_size, 1]
sequence_mask = sequence_mask
return tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,
labels=y_true,
name='loss'), sequence_mask
@tf.function(experimental_relax_shapes=True)
def masked_perplexity(y_true, y_pred, padding_symbol=0):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64)
sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32)
# [batch_size, 1]
sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask, axis=-1)[...,None]
return tf.reduce_mean(tf.exp(tf.reduce_sum(tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,
labels=y_true,
name='loss') * sequence_mask, axis=-1)))
@tf.function(experimental_relax_shapes=True)
def masked_batch_perplexity(y_true, y_pred, padding_symbol=0):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64)
sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32)
# [batch_size, 1]
sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask)
return tf.exp(tf.reduce_sum(sequence_mask * tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,
labels=y_true,
name='loss')))
#@tf.function(experimental_relax_shapes=True)
def classification_loss(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = tf.squeeze(y_true, axis=-1)
y_true = tf.cast(y_true, dtype=tf.int64)
return tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,
labels=y_true,
name='loss')
@tf.function(experimental_relax_shapes=True)
def accuracy(targets, logits, padding_symbol=0):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
sequence_mask = tf.cast(targets != padding_symbol, dtype=tf.float32)
return accuracy_topk(targets, logits, sequence_mask, topk=tf.constant(1))
@tf.function(experimental_relax_shapes=True)
def unmasked_accuracy(targets, logits, ):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
return unmasked_accuracy_topk(targets, logits, topk=tf.constant(1))
@tf.function(experimental_relax_shapes=True)
def accuracy_top2(targets, logits, padding_symbol=0):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
sequence_mask = tf.cast(targets != padding_symbol, dtype=tf.float32)
return accuracy_topk(targets, logits, sequence_mask, topk=tf.constant(2))
@tf.function(experimental_relax_shapes=True)
def unmasked_accuracy_top2(targets, logits, ):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
return unmasked_accuracy_topk(targets, logits, topk=tf.constant(2))
@tf.function(experimental_relax_shapes=True)
def accuracy_top5(targets, logits, padding_symbol=0):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
sequence_mask = tf.cast(targets != padding_symbol, dtype=tf.float32)
return accuracy_topk(targets, logits, sequence_mask, topk=tf.constant(5))
@tf.function(experimental_relax_shapes=True)
def unmasked_accuracy_top5(targets, logits, ):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
return unmasked_accuracy_topk(targets, logits, topk=tf.constant(5))
@tf.function(experimental_relax_shapes=True)
def accuracy_topk(targets, logits, sequence_mask, topk):
orig_shape = tf.shape(logits)
last_dim = orig_shape[-1]
logits = tf.reshape(logits, (-1,last_dim))
targets = tf.reshape(targets, (-1,1))
sequence_mask = tf.cast(tf.reshape(sequence_mask, (-1,1)), tf.float32)
unmasked_accuracies = tf.keras.metrics.sparse_top_k_categorical_accuracy(y_true=targets,
y_pred=logits,
k=topk)
normalizing_factor = sequence_mask / tf.reduce_sum(sequence_mask)
normalizing_factor = tf.squeeze(normalizing_factor)
return tf.reduce_sum(tf.multiply(normalizing_factor, unmasked_accuracies))
@tf.function(experimental_relax_shapes=True)
def unmasked_accuracy_topk(targets, logits, topk):
orig_shape = tf.shape(logits)
last_dim = orig_shape[-1]
logits = tf.reshape(logits, (-1,last_dim))
targets = tf.reshape(targets, (-1,1))
unmasked_accuracies = tf.keras.metrics.sparse_top_k_categorical_accuracy(y_true=targets,
y_pred=logits,
k=topk)
return tf.reduce_mean(unmasked_accuracies)
class MaskedSequenceLoss(tf.keras.losses.Loss):
def __init__(self, padding_symbol=0, num_replicas_in_sync=1,
**kwargs):
super(MaskedSequenceLoss, self).__init__(reduction=tf.keras.losses.Reduction.SUM, **kwargs)
self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64)
self.name = "batch_masked_sequence_loss"
self.num_replicas_in_sync = num_replicas_in_sync
def call(self, y_true, y_pred, sample_weight=None):
entropies, mask = batch_masked_sequence_loss(y_true=y_true, y_pred=y_pred, padding_symbol=self.padding_symbol)
if sample_weight is not None:
mask = sample_weight
norm_factor = mask / tf.reduce_sum(mask)
return tf.reduce_sum(entropies * norm_factor) / self.num_replicas_in_sync
class MaskedSequenceMetric(tf.keras.losses.Loss):
def __init__(self, padding_symbol=0,
**kwargs):
super(MaskedSequenceMetric, self).__init__(reduction=tf.keras.losses.Reduction.NONE, **kwargs)
self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64)
self.name = "batch_masked_sequence_loss"
def call(self, y_true, y_pred, sample_weight=None):
entropies, mask = batch_masked_sequence_loss(y_true=y_true, y_pred=y_pred, padding_symbol=self.padding_symbol)
if sample_weight is not None:
mask = sample_weight
norm_factor = mask / tf.reduce_sum(mask)
return tf.reduce_sum(entropies * norm_factor)
class ClassificationLoss(tf.keras.losses.Loss):
def __init__(self, global_batch_size, padding_symbol=tf.constant(0),
**kwargs):
super(ClassificationLoss, self).__init__(reduction=tf.keras.losses.Reduction.SUM, **kwargs)
self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64)
self.name = "classification_loss"
self.global_batch_size = tf.cast(global_batch_size, dtype=tf.float32)
def call(self, y_true, y_pred):
return classification_loss(y_true=y_true, y_pred=y_pred) / self.global_batch_size
class ClassificationLossMetric(tf.keras.losses.Loss):
def __init__(self, global_batch_size, padding_symbol=0,
**kwargs):
super(ClassificationLossMetric, self).__init__(reduction=tf.keras.losses.Reduction.NONE, **kwargs)
self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64)
self.name = "classification_loss"
self.global_batch_size = global_batch_size
def call(self, y_true, y_pred):
return tf.reduce_mean(classification_loss(y_true=y_true, y_pred=y_pred), axis=0)
class AccuracyTopk(tf.keras.losses.Loss):
def __init__(self, global_batch_size, padding_symbol=0, topk=1,
**kwargs):
super(AccuracyTopk, self).__init__(reduction=tf.keras.losses.Reduction.NONE, **kwargs)
self.name = '-'.join(['accuracy','top', str(topk)])
self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64)
self.global_batch_size = global_batch_size
self.topk = tf.constant(topk)
def call(self, y_true, y_pred):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64)
sequence_mask = tf.cast(y_true != self.padding_symbol, dtype=tf.float32)
return accuracy_topk(targets=y_true, logits=y_pred, sequence_mask=sequence_mask, topk=self.topk)
if __name__ == '__main__':
import numpy as np
a = np.asarray([[[1,1.5,2,0], [4,3,0,0]],
[[1,1.5,2,0], [4,3,0,0]]], dtype=np.float32)
a_mask = [[1, 1],[1 , 0]]
print(a_mask)
b = np.asarray([[0, 0],[1, 1]], dtype=np.int64)
print(accuracy_topk(logits=a,targets=b,sequence_mask=a_mask,topk=1)) | 10,276 | 46.578704 | 117 | py |
Reflect | Reflect-master/tf2_models/__init__.py | 0 | 0 | 0 | py |
|
Reflect | Reflect-master/tf2_models/trainer.py | import tensorflow as tf
import os
from tf2_models.keras_callbacks import CheckpointCallback, SummaryCallback
from tf2_models.train_utils import RectifiedAdam, ExponentialDecayWithWarmpUp
OPTIMIZER_DIC = {'adam': tf.keras.optimizers.Adam,
'radam': RectifiedAdam,
}
class Trainer(object):
def __init__(self, hparams, strategy, model, task, train_params, log_dir, ckpt_dir):
self.hparams = hparams
self.model = model
self.task = task
self.train_params = train_params
self.strategy = strategy
lr_schedule = self.get_lr_schedule()
self.optimizer = OPTIMIZER_DIC[self.train_params.optimizer](learning_rate=lr_schedule, epsilon=1e-08, clipnorm=1.0)
self.ckpt = tf.train.Checkpoint(step=tf.Variable(1, name='checkpoint_step'), optimizer=self.optimizer, net=self.model)
self.manager = tf.train.CheckpointManager(self.ckpt, ckpt_dir,
keep_checkpoint_every_n_hours=self.hparams.keep_checkpoint_every_n_hours,
max_to_keep=2)
with self.strategy.scope():
x, y = iter(self.task.valid_dataset).next()
model(x)
model.summary()
model.compile(
optimizer=self.optimizer,
loss=self.task.get_loss_fn(),
metrics=self.task.metrics())#[self.task.get_loss_fn()])
#tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),)
summary_dir = os.path.join(log_dir, 'summaries')
tf.io.gfile.makedirs(log_dir)
self.summary_writer = tf.compat.v2.summary.create_file_writer(os.path.join(summary_dir, 'train'))
tf.compat.v2.summary.experimental.set_step(self.optimizer.iterations)
ckpt_callback = CheckpointCallback(manager=self.manager, ckpt=self.ckpt)
summary_callback = SummaryCallback(summary_writer=self.summary_writer)
self.callbacks = [ckpt_callback, summary_callback]
def get_lr_schedule(self):
if 'crs' in self.train_params.schedule:
initial_learning_rate = self.train_params.learning_rate
lr_schedule = (
tf.keras.experimental.CosineDecayRestarts(
initial_learning_rate,
self.train_params.decay_steps,
t_mul=2.0,
m_mul=0.9,
alpha=0.001,
))
elif self.train_params.optimizer == 'radam':
initial_learning_rate = self.train_params.learning_rate
lr_schedule = ExponentialDecayWithWarmpUp(
initial_learning_rate=initial_learning_rate,
decay_steps=self.train_params.decay_steps,
hold_base_rate_steps=self.train_params.hold_base_rate_steps,
decay_rate=0.96,
warmup_steps=0.0)
else:
initial_learning_rate = self.train_params.learning_rate
lr_schedule = ExponentialDecayWithWarmpUp(
initial_learning_rate=initial_learning_rate,
decay_steps=self.train_params.decay_steps,
decay_rate=0.96,
hold_base_rate_steps=self.train_params.hold_base_rate_steps,
warmup_steps=self.train_params.warmup_steps)
return lr_schedule
def restore(self):
with self.strategy.scope():
self.ckpt.restore(self.manager.latest_checkpoint)
if self.manager.latest_checkpoint:
print("Restored from {}".format(self.manager.latest_checkpoint))
else:
print("Initializing from scratch.")
def train(self):
with self.strategy.scope():
with self.summary_writer.as_default():
print("initial learning rate:", self.model.optimizer.learning_rate(self.model.optimizer.iterations))
self.model.fit(self.task.train_dataset,
epochs=self.train_params.num_train_epochs,
steps_per_epoch=self.task.n_train_batches,
validation_steps=self.task.n_valid_batches,
callbacks=self.callbacks,
validation_data=self.task.valid_dataset,
verbose=2
)
| 3,931 | 39.536082 | 122 | py |
Reflect | Reflect-master/tfds_data/__init__.py | 0 | 0 | 0 | py |
|
Reflect | Reflect-master/tfds_data/tal_agreement.py | from collections import Counter
import tensorflow as tf
import tensorflow_datasets as tfds
import os
import numpy as np
from tensorflow_datasets.core.features.text import Tokenizer
from tensorflow_datasets.core.features.text.text_encoder import write_lines_to_file, read_lines_from_file
from prep_data.build_dictionary import build_and_save_dic
from util import text_util, constants
from util.text_util import deps_from_tsv, deps_to_tsv
import string
class SVAgreement(tfds.core.GeneratorBasedBuilder):
""" This is the dataset for evaluating the ability of language models to learn syntax.
Paper:
Assessing the Ability of LSTMs to Learn Syntax-Sensitive Dependencies
Tal Linzen, Emmanuel Dupoux, Yoav Goldberg
"""
VERSION = tfds.core.Version('0.1.0')
CLASS_TO_CODE = {'VBZ': 0, 'VBP': 1}
CODE_TO_CLASS = {x: y for y, x in CLASS_TO_CODE.items()}
def __init__(self, **kwargs):
super(SVAgreement, self).__init__(**kwargs)
def _info(self):
self.text_encoder_config = tfds.features.text.TextEncoderConfig(
encoder_cls=tfds.features.text.SubwordTextEncoder,
vocab_size=2 ** 13)
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=("This is the dataset for subject verb agreement "
"to assess the ability of language models to learn syntax"),
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
"sentence": tfds.features.Text(
encoder_config=self.text_encoder_config),
# Here, labels can be of 5 distinct values.
"verb_class": tfds.features.ClassLabel(names=["VBZ", "VBP"]),
"verb_position": tf.int32,
"n_intervening": tf.int32,
"n_diff_intervening": tf.int32,
"distance": tf.int32,
"verb": tfds.features.Text()
}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=("sentence", "verb_class"),
# Homepage of the dataset for documentation
urls=["https://github.com/TalLinzen/rnn_agreement"],
# Bibtex citation for the dataset
citation=r"""@article{my-awesome-dataset-2020,
author = {Linzen, Tal; Dupoux,Emmanuel; Goldberg, Yoav},"}""",
)
def _vocab_text_gen(self, input_file):
for _, ex in self._generate_examples(input_file):
yield ex["sentence"]
def _split_generators(self, dl_manager):
# Downloads the data and defines the splits
# dl_manager is a tfds.download.DownloadManager that can be used to
# download and extract URLs
extracted_path = dl_manager.download_and_extract(
'http://tallinzen.net/media/rnn_agreement/agr_50_mostcommon_10K.tsv.gz')
def make_splits(extracted_path, data_dir, prop_train=0.1, prop_valid=0.01):
# for reproducibility
np.random.seed(42)
print('| read in the data')
data = deps_from_tsv(extracted_path)
print('| shuffling')
np.random.shuffle(data)
n_train = int(len(data) * prop_train)
n_valid = int(len(data) * prop_valid)
train = data[:n_train]
valid = data[n_train: n_train + n_valid]
test = data[n_train + n_valid:]
print('| splitting')
deps_to_tsv(train, os.path.join(data_dir, "train.tsv"))
deps_to_tsv(valid, os.path.join(data_dir, "valid.tsv"))
deps_to_tsv(test, os.path.join(data_dir, "test.tsv"))
print('| done!')
make_splits(extracted_path,self.data_dir)
# Generate vocabulary from training data if SubwordTextEncoder configured
self.info.features["sentence"].maybe_build_from_corpus(
self._vocab_text_gen(os.path.join(self.data_dir, "train.tsv")))
# Specify the splits
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"input_file_path": os.path.join(self.data_dir, "train.tsv"),
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"input_file_path": os.path.join(self.data_dir, "valid.tsv"),
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"input_file_path": os.path.join(self._data_dir, "test.tsv"),
},
),
]
def _generate_examples(self, input_file_path):
""" Yields examples from the dataset
:param input_file_path:
:return: example
"""
# Read the input data out of the source files
data = deps_from_tsv(input_file_path)
# And yield examples as feature dictionaries
example_id = 0
for example in data:
example_id += 1
yield example_id, {
"sentence": example['sentence'],
"verb_class": example['verb_pos'],
"verb_position": int(example['verb_index']) - 1,
"n_intervening": example['n_intervening'],
"n_diff_intervening": example['n_diff_intervening'],
"distance": example['distance'],
"verb": example['verb']
}
def sentence_encoder(self):
return self.info.features["sentence"].encoder
def vocab_size(self):
"""Retrieves the dictionary mapping word indices back to words.
Arguments:
path: where to cache the data (relative to `~/.keras/dataset`).
Returns:
The word index dictionary.
"""
return self.info.features["sentence"].encoder.vocab_size
class WordSvAgreement(SVAgreement):
""" This is the dataset for evaluating the ability of language models to learn syntax.
Paper:
Assessing the Ability of LSTMs to Learn Syntax-Sensitive Dependencies
Tal Linzen, Emmanuel Dupoux, Yoav Goldberg
"""
VERSION = tfds.core.Version('0.1.0')
CLASS_TO_CODE = {'VBZ': 0, 'VBP': 1}
CODE_TO_CLASS = {x: y for y, x in CLASS_TO_CODE.items()}
VOCAB_DIR = 'tal_agreement/vocab'
def __init__(self, data_dir, **kwargs):
self.vocab_dir = os.path.join(data_dir, self.VOCAB_DIR)
super(WordSvAgreement, self).__init__(data_dir=data_dir, **kwargs)
def _info(self):
vocab = list(np.load(self.vocab_dir, allow_pickle=True).item().keys())
print("Vocab len: ", len(vocab))
self.text_encoder_config = tfds.features.text.TextEncoderConfig(
encoder=tfds.features.text.TokenTextEncoder(vocab_list=vocab,
oov_token=constants.unk,
lowercase=False, tokenizer=tfds.features.text.Tokenizer(
alphanum_only=True,
reserved_tokens=[a for a in string.punctuation if a not in ['<', '>']] + constants.all
)))
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=("This is the dataset for subject verb agreement "
"to assess the ability of language models to learn syntax"),
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
"sentence": tfds.features.Text(
encoder_config=self.text_encoder_config),
# Here, labels can be of 5 distinct values.
"verb_class": tfds.features.ClassLabel(names=["VBZ", "VBP"]),
"verb_position": tf.int32,
"n_intervening": tf.int32,
"n_diff_intervening": tf.int32,
"distance": tf.int32,
"verb": tfds.features.Text()
}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=("sentence", "verb_class"),
# Homepage of the dataset for documentation
homepage="https://github.com/TalLinzen/rnn_agreement",
# Bibtex citation for the dataset
citation=r"""@article{my-awesome-dataset-2020,
author = {Linzen, Tal; Dupoux,Emmanuel; Goldberg, Yoav},"}""",
)
if __name__ == '__main__':
databuilder = WordSvAgreement(data_dir='data')
databuilder.download_and_prepare(download_dir='tmp/',
download_config=tfds.download.DownloadConfig(register_checksums=True))
dataset = databuilder.as_dataset(split="validation", batch_size=1000)
dataset = tfds.as_numpy(dataset)
for batch in dataset:
print("encoded_sentence:", batch['sentence'])
print("decoded_sentence:", databuilder.sentence_encoder().decode(batch['sentence'][0]))
print("verb class:", batch['verb_class'][0])
print("verb position:",batch['verb_position'][0])
print("distance:",batch['distance'][0])
break
print(databuilder.vocab_size())
| 8,680 | 35.020747 | 106 | py |
Reflect | Reflect-master/tasks/task.py | import tensorflow as tf
from distill.distill_util import get_masked_probs
from distill.repsim_util import rep_loss
from util import constants
class Task(object):
def __init__(self, task_params, num_replicas_in_sync=1, builder_cls=None, name='abstract_task', data_dir='data', output_padding=False):
self.name = name
self.task_params = task_params
self.data_dir = data_dir
self.builder_cls = builder_cls
self.num_replicas_in_sync = num_replicas_in_sync
self.add_cls = True
if builder_cls:
self.databuilder = self.builder_cls(data_dir=self.data_dir)
self.input_padding_symbol = tf.constant(0, dtype=tf.int64) #tf.cast(self.sentence_encoder().encode(constants.pad)[0], dtype=tf.int64)
if output_padding:
self.output_padding_symbol = tf.constant(0, dtype=tf.int64) #tf.cast(self.sentence_encoder().encode(constants.pad)[0], dtype=tf.int64)
else:
self.output_padding_symbol = tf.cast(-1, dtype=tf.int64)
self.setup_datasets()
def sentence_encoder(self):
raise NotImplementedError
@property
def padded_shapes(self):
return ([None],[None])
def vocab_size(self):
raise NotImplementedError
def convert_examples(self, examples):
raise NotImplementedError
def get_probs_fn(self):
return get_masked_probs
def setup_datasets(self):
assert self.databuilder
self.info = self.databuilder.info
self.n_train_batches = int(self.info.splits['train'].num_examples / self.task_params.batch_size)
self.n_valid_batches = int(self.info.splits['validation'].num_examples / self.task_params.batch_size)
self.n_test_batches = int(self.info.splits['test'].num_examples / self.task_params.batch_size)
self.valid_dataset = self.databuilder.as_dataset(split="validation")
self.valid_dataset = self.valid_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.valid_dataset = self.valid_dataset.padded_batch(batch_size=self.task_params.batch_size,
padded_shapes=self.padded_shapes,
padding_values=(self.input_padding_symbol,self.output_padding_symbol))
#self.valid_dataset = self.valid_dataset.cache()
self.valid_dataset = self.valid_dataset.repeat()
self.valid_dataset = self.valid_dataset.prefetch(tf.data.experimental.AUTOTUNE)
self.test_dataset = self.databuilder.as_dataset(split="test")
self.test_dataset = self.test_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.test_dataset = self.test_dataset.padded_batch(batch_size=self.task_params.batch_size,
padded_shapes=self.padded_shapes,
padding_values=(self.input_padding_symbol,self.output_padding_symbol))
self.test_dataset = self.test_dataset.repeat()
self.test_dataset = self.test_dataset.prefetch(tf.data.experimental.AUTOTUNE)
self.train_dataset = self.databuilder.as_dataset(split="train")
self.train_dataset = self.train_dataset.shuffle(10000)
self.train_dataset = self.train_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.train_dataset = self.train_dataset.padded_batch(batch_size=self.task_params.batch_size,
padded_shapes=self.padded_shapes,
padding_values=(self.input_padding_symbol,self.output_padding_symbol))
#self.train_dataset = self.train_dataset.cache()
self.train_dataset = self.train_dataset.repeat()
self.train_dataset = self.train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
def get_rep_loss(self):
return rep_loss
class RandomGaussianTask(object):
def __init__(self, task_params, builder_cls=None, name='random_gaussian_task', data_dir='data'):
self.name = name
self.output_padding_symbol = 0
self.task_params = task_params
self.data_dir = data_dir
self.builder_cls = builder_cls
if builder_cls:
self.databuilder = self.builder_cls(data_dir=self.data_dir)
self.setup_datasets()
@property
def padded_shapes(self):
return ([None],[None])
def vocab_size(self):
raise NotImplementedError
def convert_examples(self, examples):
raise NotImplementedError
def get_probs_fn(self):
return get_masked_probs
def setup_datasets(self):
assert self.builder_cls
self.info = self.databuilder.info
self.n_train_batches = int(self.info.splits['train'].num_examples / self.task_params.batch_size)
self.n_valid_batches = int(self.info.splits['validation'].num_examples / self.task_params.batch_size)
self.n_test_batches = int(self.info.splits['test'].num_examples / self.task_params.batch_size)
self.valid_dataset = self.databuilder.as_dataset(split="validation")
self.valid_dataset = self.valid_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.valid_dataset = self.valid_dataset.padded_batch(batch_size=self.task_params.batch_size, padded_shapes=self.padded_shapes)
#self.valid_dataset = self.valid_dataset.cache()
self.valid_dataset = self.valid_dataset.repeat()
self.valid_dataset = self.valid_dataset.prefetch(tf.data.experimental.AUTOTUNE)
self.test_dataset = self.databuilder.as_dataset(split="test")
self.test_dataset = self.test_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.test_dataset = self.test_dataset.padded_batch(batch_size=self.task_params.batch_size,
padded_shapes=self.padded_shapes)
self.test_dataset = self.test_dataset.repeat()
self.test_dataset = self.test_dataset.prefetch(tf.data.experimental.AUTOTUNE)
self.train_dataset = self.databuilder.as_dataset(split="train")
self.train_dataset = self.train_dataset.shuffle(10000)
self.train_dataset = self.train_dataset.map(map_func=lambda x: self.convert_examples(x), num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.train_dataset = self.train_dataset.padded_batch(batch_size=self.task_params.batch_size, padded_shapes=self.padded_shapes)
#self.train_dataset = self.train_dataset.cache()
self.train_dataset = self.train_dataset.repeat()
self.train_dataset = self.train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
| 6,704 | 47.586957 | 142 | py |
Reflect | Reflect-master/tasks/__init__.py | 0 | 0 | 0 | py |
|
Reflect | Reflect-master/tasks/sv_agreement.py | import functools
from distill.distill_util import DistillLoss, get_probs, SequenceDistillLoss, get_topk_masked_probs, get_masked_probs
from tasks.task import Task
import tensorflow as tf
from tf2_models import metrics
from tf2_models.metrics import masked_batch_perplexity, masked_perplexity, \
MaskedSequenceLoss, ClassificationLoss
from tfds_data.tal_agreement import WordSvAgreement, SVAgreement
from util import constants
class SvAgreementLM(Task):
def __init__(self, task_params, name='sv_agreement_lm', data_dir='data', builder_cls=SVAgreement):
super(SvAgreementLM, self).__init__(task_params=task_params, name=name, data_dir=data_dir, builder_cls=builder_cls,
output_padding=True)
@tf.function
def convert_examples(self, examples):
sentences = examples['sentence']
s_shape = tf.shape(sentences)
#batch_size, length = s_shape[0], s_shape[1]
bos = self.databuilder.sentence_encoder().encode(constants.bos)
eos = self.databuilder.sentence_encoder().encode(constants.eos)
sentence = tf.concat([bos, sentences, eos], axis=-1)
return sentence[:-1],\
sentence[1:]
def get_loss_fn(self):
return MaskedSequenceLoss(padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), num_replicas_in_sync=self.task_params.num_replicas_in_sync)
def vocab_size(self):
return self.databuilder.vocab_size()
def output_size(self):
return self.vocab_size()
def sentence_encoder(self):
return self.databuilder.sentence_encoder()
def get_distill_loss_fn(self, distill_params):
return SequenceDistillLoss(tmp=distill_params.distill_temp, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64))
def get_probs_fn(self):
return get_masked_probs
def metrics(self):
return [MaskedSequenceLoss(padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64)),
functools.update_wrapper(functools.partial(masked_batch_perplexity,
padding_symbol=tf.constant(self.output_padding_symbol,
dtype=tf.int64)),
masked_batch_perplexity),
functools.update_wrapper(functools.partial(masked_perplexity,
padding_symbol=tf.constant(self.output_padding_symbol,
dtype=tf.int64)),
masked_perplexity),
metrics.AccuracyTopk(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), topk=1),
metrics.AccuracyTopk(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), topk=2),
metrics.AccuracyTopk(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), topk=5)
]
class WordSvAgreementLM(SvAgreementLM):
def __init__(self, task_params, name='word_sv_agreement_lm', data_dir='data', builder_cls=WordSvAgreement):
super(WordSvAgreementLM, self).__init__(task_params=task_params, name=name, data_dir=data_dir, builder_cls=builder_cls)
class WordSvAgreementVP(Task):
def __init__(self, task_params, name='word_sv_agreement_vp', data_dir='data', builder_cls=WordSvAgreement):
super(WordSvAgreementVP, self).__init__(task_params=task_params, name=name, data_dir=data_dir,
builder_cls=builder_cls,
output_padding=False)
@property
def padded_shapes(self):
return ([None],[])
@tf.function
def convert_examples(self, examples):
sentences = examples['sentence']
#bos = self.databuilder.sentence_encoder().encode(constants.bos)
eos = self.databuilder.sentence_encoder().encode(constants.eos)
sentences = tf.concat([sentences, eos], axis=-1)
verb_position = examples['verb_position']
# The verb it self is also masked
mask = tf.cast(tf.sequence_mask(verb_position,maxlen=tf.shape(sentences)[0]), dtype=tf.int64)
max_length = tf.reduce_max(verb_position + 1)
last_index_mask = tf.eye(tf.shape(sentences)[0], dtype=tf.int64)[verb_position]
last_index_mask = last_index_mask * eos[0]
return (sentences * mask + last_index_mask)[:max_length], \
examples['verb_class']
def vocab_size(self):
return self.databuilder.vocab_size()
def output_size(self):
return 2
def get_loss_fn(self):
return ClassificationLoss(global_batch_size=tf.constant(self.task_params.batch_size), padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64))
def get_distill_loss_fn(self, distill_params):
return DistillLoss(tmp=tf.constant(distill_params.distill_temp), padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64))
def get_probs_fn(self):
return get_probs
def metrics(self):
return [ClassificationLoss(global_batch_size=tf.constant(self.task_params.batch_size), padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64)),
tf.keras.metrics.SparseCategoricalAccuracy()]
def sentence_encoder(self):
return self.databuilder.sentence_encoder() | 5,424 | 44.208333 | 163 | py |
Reflect | Reflect-master/tasks/mnist.py | from distill.distill_util import DistillLoss, get_probs
from tasks.task import Task
import tensorflow as tf
import tensorflow_datasets as tfds
from tf2_models.metrics import ClassificationLoss
from tfds_data.aff_nist import AffNist
class Mnist(Task):
def __init__(self, task_params, name='mnist', data_dir='mnist_data'):
self.databuilder = tfds.builder("mnist")
super(Mnist, self).__init__(task_params=task_params, name=name,
data_dir=data_dir,
builder_cls=None)
def vocab_size(self):
return 28*28
def output_size(self):
return 10
def get_loss_fn(self):
return ClassificationLoss(global_batch_size=self.task_params.batch_size,
padding_symbol=tf.constant(-1, dtype=tf.int64))
def get_distill_loss_fn(self, distill_params):
return DistillLoss(tmp=distill_params.distill_temp)
def get_probs_fn(self):
return get_probs
def metrics(self):
return [ClassificationLoss(global_batch_size=self.task_params.batch_size,
padding_symbol=tf.constant(-1, dtype=tf.int64)),
tf.keras.metrics.SparseCategoricalAccuracy()]
@property
def padded_shapes(self):
# To make sure we are not using this!
raise NotImplementedError
def convert_examples(self, examples):
return tf.cast(examples['image'], dtype=tf.float32)/255, tf.cast(examples['label'], dtype=tf.int32)
def setup_datasets(self):
self.info = self.databuilder.info
self.n_train_batches = int(
self.info.splits['train'].num_examples / self.task_params.batch_size)
self.n_test_batches = int(
self.info.splits['test'].num_examples / self.task_params.batch_size)
self.n_valid_batches = int(
self.info.splits['test'].num_examples / self.task_params.batch_size)
self.databuilder.download_and_prepare(download_dir=self.data_dir)
self.test_dataset = self.databuilder.as_dataset(split="test")
assert isinstance(self.test_dataset, tf.data.Dataset)
self.test_dataset = self.test_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.test_dataset = self.test_dataset.repeat()
self.test_dataset = self.test_dataset.batch(
batch_size=self.task_params.batch_size)
self.test_dataset = self.test_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
self.train_dataset = self.databuilder.as_dataset(split="train")
assert isinstance(self.train_dataset, tf.data.Dataset)
self.train_dataset = self.train_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.train_dataset = self.train_dataset.repeat()
self.train_dataset = self.train_dataset.shuffle(1024)
self.train_dataset = self.train_dataset.batch(
batch_size=self.task_params.batch_size)
# self.train_dataset = self.train_dataset.cache()
self.train_dataset = self.train_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
self.valid_dataset = self.databuilder.as_dataset(split="test")
assert isinstance(self.valid_dataset, tf.data.Dataset)
self.valid_dataset = self.valid_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.valid_dataset = self.valid_dataset.repeat()
self.valid_dataset = self.valid_dataset.batch(
batch_size=self.task_params.batch_size)
self.valid_dataset = self.valid_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
class AffNistTask(Task):
def __init__(self, task_params, name='aff_nist',data_dir='data', builder_cls=AffNist):
super(AffNistTask, self).__init__(task_params=task_params, name=name,
data_dir=data_dir,
builder_cls=builder_cls)
def input_shape(self):
"""
To be used when calling model.build(input_shape)
:return:
#[batch_size, height, width, channels
"""
return [None, 32, 32, 1]
def vocab_size(self):
return 40*40
def output_size(self):
return 10
def get_loss_fn(self):
return ClassificationLoss(global_batch_size=self.task_params.batch_size,
padding_symbol=tf.constant(-1, dtype=tf.int64))
def get_distill_loss_fn(self, distill_params):
return DistillLoss(tmp=distill_params.distill_temp)
def get_probs_fn(self):
return get_probs
def metrics(self):
return [ClassificationLoss(global_batch_size=self.task_params.batch_size,
padding_symbol=tf.constant(-1, dtype=tf.int64)),
tf.keras.metrics.SparseCategoricalAccuracy()]
@property
def padded_shapes(self):
# To make sure we are not using this!
raise NotImplementedError
def convert_examples(self, examples):
return tf.cast(examples['image'], dtype=tf.float32)/255, tf.cast(examples['label'], dtype=tf.int32)
def setup_datasets(self):
self.info = self.databuilder.info
self.n_train_batches = int(
self.info.splits['train'].num_examples / self.task_params.batch_size)
self.n_test_batches = int(
self.info.splits['test'].num_examples / self.task_params.batch_size)
self.n_valid_batches = int(
self.info.splits['test'].num_examples / self.task_params.batch_size)
self.test_dataset = self.databuilder.as_dataset(split="test")
assert isinstance(self.test_dataset, tf.data.Dataset)
self.test_dataset = self.test_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.test_dataset = self.test_dataset.repeat()
self.test_dataset = self.test_dataset.batch(
batch_size=self.task_params.batch_size)
self.test_dataset = self.test_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
self.train_dataset = self.databuilder.as_dataset(split="train")
assert isinstance(self.train_dataset, tf.data.Dataset)
self.train_dataset = self.train_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.train_dataset = self.train_dataset.repeat()
self.train_dataset = self.train_dataset.shuffle(1024)
self.train_dataset = self.train_dataset.batch(
batch_size=self.task_params.batch_size)
# self.train_dataset = self.train_dataset.cache()
self.train_dataset = self.train_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
self.valid_dataset = self.databuilder.as_dataset(split="test")
assert isinstance(self.valid_dataset, tf.data.Dataset)
self.valid_dataset = self.valid_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.valid_dataset = self.valid_dataset.repeat()
self.valid_dataset = self.valid_dataset.batch(
batch_size=self.task_params.batch_size)
self.valid_dataset = self.valid_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
class Svhn(Mnist):
def __init__(self, task_params, name='svhn', data_dir='mnist_data'):
self.databuilder = tfds.builder("svhn_cropped")
super(Mnist, self).__init__(task_params=task_params, name=name,
data_dir=data_dir,
builder_cls=None)
def vocab_size(self):
return 32 * 32
def input_shape(self):
"""
To be used when calling model.build(input_shape)
:return:
#[batch_size, height, width, channels
"""
return [None, 32, 32, 1]
class Mnist40(Mnist):
def __init__(self, task_params, name='mnist40', data_dir='mnist_data'):
self.databuilder = tfds.builder("mnist")
super(Mnist, self).__init__(task_params=task_params, name=name,
data_dir=data_dir,
builder_cls=None)
def vocab_size(self):
return 40 * 40
def output_size(self):
return 10
def input_shape(self):
"""
To be used when calling model.build(input_shape)
:return:
#[batch_size, height, width, channels
"""
return [None, 32, 32, 1]
def convert_examples(self, examples):
pad_length = int((40 - 28) / 2)
return tf.pad(tf.cast(examples['image'], dtype=tf.float32) / 255,
([pad_length, pad_length], [pad_length, pad_length],
[0, 0])), tf.cast(
examples['label'], dtype=tf.int32)
| 8,663 | 37.678571 | 103 | py |
Reflect | Reflect-master/tasks/evaluations/lm_sv_agreement_eval.py | ''' Evaluate word based language models on the subject verb agreement task.
Codes adapted from:
Example Run:
python tasks/evaluations/lm_sv_agreement_eval.py \
--exp_name=lisa_fd4 \
--model_name=lm_gpt2 \
--model_config=very_big_gpt_v10 \
--train_config=adam_slow \
--prefix=offline_pure_distill_2_teacher_lm_lstm_shared_emb_em-512_h-512_d-2_hdrop-0.3_indrop-0.2_0.001_lisa_offlineteacher_v1_student \
--withlr=False \
--chkpt_dir=tf_ckpts \
--logdir=logs
'''
import os
from tasks.sv_agreement import WordSvAgreementLM
from util.config_util import get_model_params, get_task_params, get_train_params
from tf2_models.trainer import Trainer
from util import constants
from collections import Counter
from tqdm import tqdm
from tf2_models.metrics import *
import numpy as np
from absl import flags
from absl import app
from util.models import MODELS
from util.text_util import gen_inflect_from_vocab
FLAGS = flags.FLAGS
flags.DEFINE_string('logdir', 'logs', ' log dir path')
flags.DEFINE_string('chkpt_dir', 'chkpt_dir', ' chkpt_dir path')
flags.DEFINE_string('prefix', 'prefix', ' prefix')
flags.DEFINE_string('exp_name', 'tune_withl2_withpunc', 'tune_withl2_withpunc | withl2_batchsumloss_withpunc')
flags.DEFINE_string('model_config', 'very_big_gpt_v10', 'big_gpt_v5 | very_big_gpt_v10| lstm_drop31_v2')
flags.DEFINE_string('model_name', 'lm_gpt2_shared', 'lm_gpt2_shared | lm_gpt1 | lm_lstm_shared_emb')
flags.DEFINE_string('train_config', 'adam_slw', ' adam_slw | radam_fst')
flags.DEFINE_string('split', 'test', ' valid | test | train')
flags.DEFINE_boolean('withlr', True, 'True | False')
hparams = flags.FLAGS
def compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total):
''' Computes and prints accuracy based on hits
:param distance_hits:
:param distance_total:
:param diff_hits:
:param diff_total:
:return: None
'''
dis_acc = np.zeros(16)
dif_acc = np.zeros(5)
total_nominator = 0.0
total_denominator = 0.0
print('Accuracy by distance')
for k in sorted(distance_hits.keys()):
v = distance_hits[k]
acc = v / distance_total[k]
dis_acc[k-1] = acc
print("%d | %.2f" % (k, acc), distance_total[k])
total_nominator += v
total_denominator += distance_total[k]
print("Micro accuracy (distance):", total_nominator / total_denominator)
print("Macro accuracy (distance):", np.mean(dis_acc))
print('Accuracy by intervenings:')
total_nominator = 0.0
total_denominator = 0.0
for k in sorted(diff_hits.keys()):
v = diff_hits[k]
acc = v * 1. / diff_total[k]
print("%d | %.2f" % (k, acc), diff_total[k])
dif_acc[k] = acc
total_nominator += v
total_denominator += diff_total[k]
print("Micro accuracy (intervenings):", total_nominator / total_denominator)
print("Macro accuracy (intervenings):", np.mean(dif_acc))
def evaluate_vp(model, task, split='test'):
''' Computes the accuracy statistics of the given model on the subject verb agreement task.
:param model: the models to be evaluated
:param task:
:return: distance_hits, distance_total, diff_hits, diff_total
'''
verb_infl, noun_infl = gen_inflect_from_vocab('data/tal_agreement/wiki.vocab')
distance_hits = Counter()
distance_total = Counter()
diff_hits = Counter()
diff_total = Counter()
test_data = task.databuilder.as_dataset(split=split, batch_size=1000)
for example in tqdm(test_data):
encoded_sentences = example['sentence']
s_shape = tf.shape(encoded_sentences)
batch_size, length = s_shape[0], s_shape[1]
bos = tf.ones((batch_size, 1), dtype=tf.int64) * task.databuilder.sentence_encoder().encode(constants.bos)
eos = tf.ones((batch_size, 1), dtype=tf.int64) * task.databuilder.sentence_encoder().encode(constants.eos)
encoded_sentences = tf.concat([bos, encoded_sentences, eos], axis=1)
actual_verbs = example['verb']
inflected_verbs = [verb_infl[v.decode("utf-8")] for v in actual_verbs.numpy()]
verb_indexes = example['verb_position']
distances = example['distance'].numpy()
nz = example['n_intervening'].numpy()
n_diffs = example['n_diff_intervening'].numpy()
actual_verb_indexes = [task.databuilder.sentence_encoder().encode(v)[0] for v in actual_verbs.numpy()]
inflected_verb_indexes = [task.databuilder.sentence_encoder().encode(v)[0] for v in inflected_verbs]
scores = model(encoded_sentences)
actual_batch_indexes = [(i, verb_indexes[i], actual_verb_indexes[i]) for i in range(len(verb_indexes))]
actual_scores = tf.compat.v2.gather_nd(scores, actual_batch_indexes)
inflected_batch_indexes = [(i, verb_indexes[i], inflected_verb_indexes[i]) for i in range(len(verb_indexes))]
infelected_scores = tf.compat.v2.gather_nd(scores, inflected_batch_indexes)
corrects = actual_scores > infelected_scores
for i, c in enumerate(corrects):
if nz[i] > 4 or distances[i] > 16:
continue
distance_total[distances[i]] += 1
distance_hits[distances[i]] += int(c)
if nz[i] == n_diffs[i]:
n = nz[i]
diff_total[n] += 1
diff_hits[n] += int(c)
return distance_hits, distance_total, diff_hits, diff_total
def main(argv):
task = WordSvAgreementLM(task_params=get_task_params(), data_dir='data')
# Create the Model
model_params = get_model_params(task, hparams.model_name, hparams.model_config)
print("model_params: ", model_params.__dict__)
cl_token = task.databuilder.sentence_encoder().encode(constants.bos)
model = MODELS[hparams.model_name](hparams=get_model_params(task, hparams.model_name, hparams.model_config),
cl_token=cl_token)
trainer_params = get_train_params(hparams.train_config)
if len(hparams.prefix) > 0:
hparams.prefix = hparams.prefix + "_"
log_dir = os.path.join(hparams.logdir, task.name,
hparams.prefix+model.model_name + "_" + str(hparams.model_config) + "_" + str(
trainer_params.learning_rate) + "_" + hparams.exp_name)
ckpt_dir = os.path.join(hparams.chkpt_dir, task.name,
hparams.prefix+model.model_name + "_" + str(hparams.model_config) + "_" + ((str(
trainer_params.learning_rate) + "_") if hparams.withlr else '') + hparams.exp_name)
print(ckpt_dir)
trainer = Trainer(task=task,
model=model,
train_params=trainer_params,
log_dir=log_dir,
ckpt_dir=ckpt_dir)
trainer.restore()
distance_hits, distance_total, diff_hits, diff_total = evaluate_vp(trainer.model, trainer.task, hparams.split)
compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total)
if __name__ == '__main__':
app.run(main)
| 6,755 | 36.955056 | 135 | py |
Reflect | Reflect-master/tasks/evaluations/__init__.py | 0 | 0 | 0 | py |
|
Reflect | Reflect-master/notebooks/notebook_utils.py | import tensorflow as tf
import numpy as np
import os
from tqdm import tqdm
from util import constants
from collections import Counter
from util.models import MODELS
from util.tasks import TASKS
from util.config_util import get_model_params, get_task_params, get_train_params
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns; sns.set()
sns.set_style("whitegrid")
from util import inflect
dependency_fields = ['sentence', 'orig_sentence', 'pos_sentence',
'subj', 'verb', 'subj_pos', 'has_rel', 'has_nsubj',
'verb_pos', 'subj_index', 'verb_index', 'n_intervening',
'last_intervening', 'n_diff_intervening', 'distance',
'max_depth', 'all_nouns', 'nouns_up_to_verb']
def get_model(config, task, hparams, cl_token, **kwargs):
model = MODELS[config['model_name']](hparams=hparams, cl_token=cl_token, **kwargs)
ckpt_dir = os.path.join(config['chkpt_dir'],task.name,
model.model_name+"_"+str(config['model_config'])+"_"+str(config['learning_rate'])+"_"+config['exp_name'])
ckpt = tf.train.Checkpoint(net=model)
manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=None)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored student from {}".format(manager.latest_checkpoint))
else:
print("No checkpoint found {}".format(ckpt_dir))
model.compile(loss=task.get_loss_fn(), metrics=task.metrics())
return model, ckpt
def get_student_model(config, task, hparams, cl_token):
teacher_model = MODELS[config['teacher_model']](hparams=get_model_params(task, config['teacher_model'], config['teacher_config']), cl_token=cl_token)
model = MODELS[config['student_model']](hparams=hparams, cl_token=cl_token)
ckpt_dir = os.path.join(config['chkpt_dir'], task.name,
'_'.join([config['distill_mode'],config['distill_config'],
"teacher", teacher_model.model_name,
config['teacher_config'],
config['teacher_exp_name'],
"student",model.model_name,
str(config['student_config']),
config['student_exp_name']]))
print("student_checkpoint:", ckpt_dir)
ckpt = tf.train.Checkpoint(net=model)
manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=None)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored student from {}".format(manager.latest_checkpoint))
else:
print("No checkpoint found {}".format(ckpt_dir))
model.compile(loss=task.get_loss_fn(), metrics=task.metrics())
return model, ckpt
def get_teacher_model(config, task, hparams, cl_token):
model = MODELS[config['teacher_model']](hparams=hparams, cl_token=cl_token)
ckpt_dir = os.path.join(config['chkpt_dir'], task.name,
'_'.join([model.model_name, config['teacher_config'],config['teacher_exp_name']]))
ckpt = tf.train.Checkpoint(net=model)
manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=None)
ckpt.restore(manager.latest_checkpoint)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored student from {}".format(manager.latest_checkpoint))
else:
print("No checkpoint found {}".format(ckpt_dir))
model.compile(loss=task.get_loss_fn(), metrics=task.metrics())
return model, ckpt
def gen_inflect_from_vocab(infl_eng, vocab_file, freq_threshold=1000):
vbp = {}
vbz = {}
nn = {}
nns = {}
from_pos = {'NNS': nns, 'NN': nn, 'VBP': vbp, 'VBZ': vbz}
for line in open(vocab_file):
if line.startswith(' '): # empty string token
continue
word, pos, count = line.strip().split()
count = int(count)
if len(word) > 1 and pos in from_pos and count >= freq_threshold:
from_pos[pos][word] = count
verb_infl = {'VBP': 'VBZ', 'VBZ': 'VBP'}
for word, count in vbz.items():
candidate = infl_eng.plural_verb(word)
if candidate in vbp:
verb_infl[candidate] = word
verb_infl[word] = candidate
noun_infl = {'NN': 'NNS', 'NNS': 'NN'}
for word, count in nn.items():
candidate = infl_eng.plural_noun(word)
if candidate in nns:
noun_infl[candidate] = word
noun_infl[word] = candidate
return verb_infl, noun_infl
def compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total):
''' Computes and prints accuracy based on hits
:param distance_hits:
:param distance_total:
:param diff_hits:
:param diff_total:
:return: None
'''
dis_acc = np.zeros(16)
dif_acc = np.zeros(5)
total_nominator = 0.0
total_denominator = 0.0
print('Accuracy by distance')
for k in sorted(distance_hits.keys()):
v = distance_hits[k]
acc = v / distance_total[k]
dis_acc[k-1] = acc
print("%d | %.2f" % (k, acc), distance_total[k])
total_nominator += v
total_denominator += distance_total[k]
print("Micro accuracy (distance):", total_nominator / total_denominator)
print("Macro accuracy (distance):", np.mean(dis_acc))
print('Accuracy by intervenings:')
total_nominator = 0.0
total_denominator = 0.0
for k in sorted(diff_hits.keys()):
v = diff_hits[k]
acc = v * 1. / diff_total[k]
print("%d | %.2f" % (k, acc), diff_total[k])
dif_acc[k] = acc
total_nominator += v
total_denominator += diff_total[k]
print("Micro accuracy (intervenings):", total_nominator / total_denominator)
print("Macro accuracy (intervenings):", np.mean(dif_acc))
def evaluate_vp_cl(model, verb_infl, noun_infl, task, split='test', batch_size=1000, cls=False):
distance_hits = Counter()
distance_total = Counter()
diff_hits = Counter()
diff_total = Counter()
test_data = task.databuilder.as_dataset(split=split, batch_size=batch_size)
e = 0
for examples in test_data:
e += 1
sentences = examples['sentence']
#bos = tf.cast(task.databuilder.sentence_encoder().encode(constants.bos) * tf.ones((sentences.shape[0],1)), dtype=tf.int64)
eos = tf.cast(task.databuilder.sentence_encoder().encode(constants.eos) *tf.ones((sentences.shape[0],1)), dtype=tf.int64)
sentences = tf.concat([sentences, eos], axis=-1)
verb_position = examples['verb_position']+int(cls) #+1 because of adding bos.
# The verb it self is also masked
mask = tf.cast(tf.sequence_mask(verb_position,maxlen=tf.shape(sentences)[1]), dtype=tf.int64)
max_length = tf.reduce_max(verb_position + 1)
last_index_mask = tf.gather(tf.eye(tf.shape(sentences)[1], dtype=tf.int64),verb_position)
last_index_mask = last_index_mask * eos[0]
inputs = (sentences * mask + last_index_mask)[:,:max_length]
s_shape = tf.shape(inputs)
batch_size, length = s_shape[0], s_shape[1]
verb_classes = examples['verb_class']
actual_verbs = examples['verb']
#inflected_verbs = [verb_infl[v.decode("utf-8")] for v in actual_verbs.numpy()]
distances = examples['distance'].numpy()
nz = examples['n_intervening'].numpy()
n_diffs = examples['n_diff_intervening'].numpy()
actual_verb_indexes = [task.databuilder.sentence_encoder().encode(v)[0] for v in actual_verbs.numpy()]
predictions = model(inputs, training=False)
predictions = np.argmax(predictions, axis=-1)
corrects = predictions == verb_classes
for i, c in enumerate(corrects):
if actual_verb_indexes[i] == 10035 or actual_verb_indexes[i] == 2:
continue
if nz[i] > 4 or distances[i] > 16:
continue
distance_total[distances[i]] += 1
distance_hits[distances[i]] += int(c)
if nz[i] == n_diffs[i]:
n = nz[i]
diff_total[n] += 1
diff_hits[n] += int(c)
return distance_hits, distance_total, diff_hits, diff_total
def test_for_calibration(model, task, n_bins=10):
preds = []
correct_class_probs = []
predicted_class_probs = []
pred_logits = []
y_trues = []
batch_count = task.n_valid_batches
for x, y in task.valid_dataset:
logits = model(x)
pred_logits.extend(logits.numpy())
pred = tf.argmax(logits, axis=-1)
prob = task.get_probs_fn()(logits, labels=y, temperature=1)
preds.extend(pred.numpy())
y_trues.extend(y.numpy())
batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int32), dtype=tf.int32)
true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1)
pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int32)], axis=1)
correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy())
predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy())
batch_count -= 1
if batch_count == 0:
break
model_accuracy = np.asarray(preds) == np.asarray(y_trues)
return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues
def plot_calibration(model_accuracy, predicted_class_probs, correct_class_probs, n_bins=10):
p_confidence_bins = np.zeros(n_bins+1)
n_confidence_bins = np.zeros(n_bins+1)
total_confidence_bins = np.zeros(n_bins+1)
denominator = 100.0 / n_bins
for i in np.arange(len(model_accuracy)):
if model_accuracy[i]:
p_confidence_bins[int(predicted_class_probs[i]*100 / denominator)] += 1.0
else:
n_confidence_bins[int(predicted_class_probs[i]*100 / denominator)] -= 1.0
total_confidence_bins[int(predicted_class_probs[i]*100 / denominator)] += 1
#sns.stripplot(model_accuracy,predicted_class_probs, color='blue', alpha=0.5, jitter=True)
#sns.stripplot(model_accuracy,correct_class_probs, color='green', alpha=0.2, jitter=True)
#sns.swarmplot(model_accuracy,predicted_class_probs, color='blue', alpha=0.5)
#plt.show()
sns.barplot(x=np.arange(0,n_bins)*denominator,
y=np.arange(0,n_bins)/n_bins,
color='green', alpha=0.2, edgecolor='black')
ax = sns.barplot(x=np.arange(0,n_bins)*denominator,
y=p_confidence_bins[1:]/total_confidence_bins[1:],
color='red', alpha=0.5, edgecolor='black')
x_ticks = np.arange(0,n_bins,2)
x_tick_labels = x_ticks / np.float32(n_bins)
ax.set_xticks(x_ticks)
ax.set_xticklabels(x_tick_labels, fontsize=10)
def expected_calibration_error(teacher_accuracy, teacher_predicted_class_probs):
raise NotImplemented | 10,989 | 36.508532 | 153 | py |
Reflect | Reflect-master/notebooks/__init__.py | 0 | 0 | 0 | py |
|
Reflect | Reflect-master/notebooks/calibration_util.py | import os
import tensorflow as tf
from util import constants
from util.config_util import get_model_params, get_task_params, get_train_params
from tf2_models.trainer import Trainer
from absl import app
from absl import flags
import numpy as np
from util.models import MODELS
from util.tasks import TASKS
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns; sns.set()
sns.set_style("whitegrid")
from tqdm import tqdm
def test_for_calibration(model, task, n_bins=10):
preds = []
correct_class_probs = []
predicted_class_probs = []
pred_logits = []
y_trues = []
batch_count = task.n_valid_batches
for x, y in task.valid_dataset:
logits = model(x)
pred_logits.extend(logits.numpy())
pred = tf.argmax(logits, axis=-1)
prob = task.get_probs_fn()(logits, labels=y, temperature=1)
preds.extend(pred.numpy())
y_trues.extend(y.numpy())
batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int64), dtype=tf.int64)
true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1)
pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int64)], axis=1)
correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy())
predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy())
batch_count -= 1
if batch_count == 0:
break
model_accuracy = np.asarray(preds) == np.asarray(y_trues)
return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues
def plot_calibration(model_accuracy, predicted_class_probs, correct_class_probs, n_bins=10):
p_confidence_bins = np.zeros(n_bins)
n_confidence_bins = np.zeros(n_bins)
total_confidence_bins = np.zeros(n_bins)
denominator = 100.0 / n_bins
for i in np.arange(len(model_accuracy)):
if model_accuracy[i]:
p_confidence_bins[min(int(predicted_class_probs[i]*100 // denominator),n_bins-1)] += 1.0
else:
n_confidence_bins[min(int(predicted_class_probs[i]*100 // denominator),n_bins-1)] -= 1.0
total_confidence_bins[min(int(predicted_class_probs[i]*100 // denominator),n_bins-1)] += 1
#sns.stripplot(model_accuracy,predicted_class_probs, color='blue', alpha=0.5, jitter=True)
#sns.stripplot(model_accuracy,correct_class_probs, color='green', alpha=0.2, jitter=True)
#sns.swarmplot(model_accuracy,predicted_class_probs, color='blue', alpha=0.5)
#plt.show()
sns.barplot(x=np.arange(0,n_bins)*denominator,
y=np.arange(0,n_bins)/n_bins,
color='green', alpha=0.2, edgecolor='black')
ax = sns.barplot(x=np.arange(0,n_bins)*denominator,
y=p_confidence_bins/total_confidence_bins,
color='red', alpha=0.5, edgecolor='black')
x_ticks = np.arange(0,n_bins,2)
x_tick_labels = x_ticks / np.float32(n_bins)
ax.set_xticks(x_ticks)
ax.set_xticklabels(x_tick_labels, fontsize=10)
return p_confidence_bins,n_confidence_bins,total_confidence_bins
def expected_calibration_error(teacher_accuracy, teacher_predicted_class_probs):
raise NotImplemented | 3,258 | 38.26506 | 100 | py |
Reflect | Reflect-master/notebooks/viz/__init__.py | 0 | 0 | 0 | py |
|
Reflect | Reflect-master/notebooks/eval_scripts/eval_vp.py | import os
import tensorflow as tf
from util import constants
from util.config_util import get_model_params, get_task_params, get_train_params
from tf2_models.trainer import Trainer
from absl import app
from absl import flags
import numpy as np
from util.models import MODELS
from util.tasks import TASKS
from notebook_utils import *
import pandas as pd
import seaborn as sns; sns.set()
from collections import Counter
from tqdm import tqdm
log_dir = "../logs"
chkpt_dir = "../tf_ckpts"
task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(),data_dir='../data')
cl_token = task.databuilder.sentence_encoder().encode(constants.bos)
models = {}
labels = []
config={'student_exp_name':'gc_f_std124',
'teacher_exp_name':'gc_o_tchr124',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, _ = get_teacher_model(config, task, tchr_hparams, cl_token)
models['lstm_124'] = teacher_model
labels.append('lstm_124')
config={'student_exp_name':'gc_f_std125',
'teacher_exp_name':'gc_o_tchr125',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, _ = get_teacher_model(config, task, tchr_hparams, cl_token)
models['lstm_125'] = teacher_model
labels.append('lstm_125')
config={'student_exp_name':'gc_f_std130',
'teacher_exp_name':'gc_o_tchr130',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, _ = get_teacher_model(config, task, tchr_hparams, cl_token)
models['lstm_130'] = teacher_model
labels.append('lstm_130')
config={'student_exp_name':'gc_f_std131',
'teacher_exp_name':'gc_o_tchr131',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, _ = get_teacher_model(config, task, tchr_hparams, cl_token)
models['lstm_131'] = teacher_model
labels.append('lstm_131')
keys = labels
import tensorflow_probability as tfp
def test_for_calibration(model, task, n_bins=10):
preds = []
correct_class_probs = []
predicted_class_probs = []
pred_logits = []
y_trues = []
batch_count = task.n_valid_batches
for x, y in task.valid_dataset:
y = tf.cast(y, tf.int32)
logits = model(x)
pred_logits.extend(logits.numpy())
pred = tf.argmax(logits, axis=-1)
prob = task.get_probs_fn()(logits, labels=y, temperature=1)
preds.extend(pred.numpy())
y_trues.extend(y.numpy())
batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int32), dtype=tf.int32)
true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1)
pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int32)], axis=1)
correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy())
predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy())
batch_count -= 1
if batch_count == 0:
break
model_accuracy = np.asarray(preds) == np.asarray(y_trues)
return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues
# for key in keys:
# model = models[key]
# print('##################################')
# train = model.evaluate(task.train_dataset, steps=task.n_train_batches)
# valid = model.evaluate(task.valid_dataset, steps=task.n_valid_batches)
# test = model.evaluate(task.test_dataset, steps=task.n_test_batches)
# print(key)
# print(train[0],'\t',train[1],'\t',train[2],'\t', valid[0],'\t', valid[1],'\t', valid[2], '\t', test[0], '\t', test[1], '\t', test[2])
for key in keys:
model = models[key]
print('##################################')
model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20)
model_ece = tfp.stats.expected_calibration_error(
1000000,
logits=model_logits,
labels_true=model_trues,
)
print(model_ece.numpy())
| 5,193 | 28.68 | 139 | py |
Reflect | Reflect-master/notebooks/eval_scripts/eval_vp-bert.py | import os
import tensorflow as tf
from util import constants
from util.config_util import get_model_params, get_task_params, get_train_params
from tf2_models.trainer import Trainer
from absl import app
from absl import flags
import numpy as np
from util.models import MODELS
from util.tasks import TASKS
from notebook_utils import *
import pandas as pd
import seaborn as sns; sns.set()
from collections import Counter
from tqdm import tqdm
import logging
tf.get_logger().setLevel(logging.ERROR)
log_dir = "../logs"
chkpt_dir = "../tf_ckpts"
task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(batch_size=512),data_dir='../data')
cl_token = task.databuilder.sentence_encoder().encode(constants.bos)
students = []
models = []
labels = []
#Bert to LSTM
config={'student_exp_name':'gc_f_std9303',
'teacher_exp_name':'gc_o_tchr8323',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp9',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('bert2lstm_1')
config={'student_exp_name':'gc_f_std9304',
'teacher_exp_name':'gc_o_tchr8324',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp9',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('bert2lstm_2')
config={'student_exp_name':'gc_f_std9301',
'teacher_exp_name':'gc_o_tchr9301',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp9',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('bert2lstm_3')
config={'student_exp_name':'gc_f_std9302',
'teacher_exp_name':'gc_o_tchr9302',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp9',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('bert2lstm_4')
config={'student_exp_name':'gc_f_std8331',
'teacher_exp_name':'gc_o_tchr8321',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_gpt2_shared',
'teacher_config':'small_gpt_v9',
'student_config':'small_ugpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2ugpt_1')
config={'student_exp_name':'gc_f_std8332',
'teacher_exp_name':'gc_o_tchr8322',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_gpt2_shared',
'teacher_config':'small_gpt_v9',
'student_config':'small_ugpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2ugpt_2')
config={'student_exp_name':'gc_f_std8333',
'teacher_exp_name':'gc_o_tchr8323',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_gpt2_shared',
'teacher_config':'small_gpt_v9',
'student_config':'small_ugpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2ugpt_3')
config={'student_exp_name':'gc_f_std8334',
'teacher_exp_name':'gc_o_tchr8324',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_gpt2_shared',
'teacher_config':'small_gpt_v9',
'student_config':'small_ugpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2ugpt_4')
config={'student_exp_name':'gc_f_std8311',
'teacher_exp_name':'gc_o_tchr8311',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_bert',
'teacher_config':'small_gpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2bert_1')
config={'student_exp_name':'gc_f_std8312',
'teacher_exp_name':'gc_o_tchr8322',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_bert',
'teacher_config':'small_gpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2bert_2')
config={'student_exp_name':'gc_f_std8313',
'teacher_exp_name':'gc_o_tchr8323',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_bert',
'teacher_config':'small_gpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2bert_3')
config={'student_exp_name':'gc_f_std8314',
'teacher_exp_name':'gc_o_tchr8324',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_bert',
'teacher_config':'small_gpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2bert_4')
config={'student_exp_name':'gc_f_std8321',
'teacher_exp_name':'gc_o_tchr8321',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_gpt2',
'teacher_config':'small_gpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2gpt_1')
config={'student_exp_name':'gc_f_std8322',
'teacher_exp_name':'gc_o_tchr8322',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_gpt2',
'teacher_config':'small_gpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2gpt_2')
config={'student_exp_name':'gc_f_std8323',
'teacher_exp_name':'gc_o_tchr8323',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_gpt2',
'teacher_config':'small_gpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2gpt_3')
config={'student_exp_name':'gc_f_std8324',
'teacher_exp_name':'gc_o_tchr8324',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_gpt2',
'teacher_config':'small_gpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp8',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model[0])
labels.append('bert2gpt_4')
keys = labels
import tensorflow_probability as tfp
def test_for_calibration(model, task, n_bins=10):
preds = []
correct_class_probs = []
predicted_class_probs = []
pred_logits = []
y_trues = []
batch_count = task.n_valid_batches
for x, y in task.valid_dataset:
y = tf.cast(y, tf.int32)
logits = model(x)
pred_logits.extend(logits.numpy())
pred = tf.argmax(logits, axis=-1)
prob = task.get_probs_fn()(logits, labels=y, temperature=1)
preds.extend(pred.numpy())
y_trues.extend(y.numpy())
batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int32), dtype=tf.int32)
true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1)
pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int32)], axis=1)
correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy())
predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy())
batch_count -= 1
if batch_count == 0:
break
model_accuracy = np.asarray(preds) == np.asarray(y_trues)
return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues
for key,model in zip(labels, students):
print('##################################')
train = model.evaluate(task.train_dataset, steps=task.n_train_batches,verbose=0)
valid = model.evaluate(task.valid_dataset, steps=task.n_valid_batches,verbose=0)
test = model.evaluate(task.test_dataset, steps=task.n_test_batches,verbose=0)
print(key)
print(train[0],'\t',train[1],'\t',train[2],'\t', valid[0],'\t', valid[1],'\t', valid[2], '\t', test[0], '\t', test[1], '\t', test[2])
# print("Teachers:")
# for key,model in zip(labels, models):
# print('##################################')
# model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20)
# model_ece = tfp.stats.expected_calibration_error(
# 20,
# logits=model_logits,
# labels_true=model_trues,
# )
# print(key, model_ece.numpy())
print("Students:")
for key,model in zip(labels, students):
print('##################################')
model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20)
model_ece = tfp.stats.expected_calibration_error(
20,
logits=model_logits,
labels_true=model_trues,
)
print(key, model_ece.numpy())
# infl_eng = inflect.engine()
# verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab')
# print(labels)
# for key,model in zip(labels, models):
# print('##################################')
# print(key)
# distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task)
# compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total)
| 19,508 | 33.962366 | 137 | py |
Reflect | Reflect-master/notebooks/eval_scripts/eval_vp-ugpt.py | import os
import tensorflow as tf
from util import constants
from util.config_util import get_model_params, get_task_params, get_train_params
from tf2_models.trainer import Trainer
from absl import app
from absl import flags
import numpy as np
from util.models import MODELS
from util.tasks import TASKS
from notebook_utils import *
import pandas as pd
import seaborn as sns; sns.set()
from collections import Counter
from tqdm import tqdm
import logging
tf.get_logger().setLevel(logging.ERROR)
log_dir = "../logs"
chkpt_dir = "../tf_ckpts"
task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(batch_size=512),data_dir='../data')
cl_token = task.databuilder.sentence_encoder().encode(constants.bos)
models = []
students = []
labels = []
#Bert to LSTM
config={'student_exp_name':'gc_f_std4104',
'teacher_exp_name':'gc_o_tchr4112',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_lstm',
'teacher_config':'small_ugpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2lstm_1')
config={'student_exp_name':'gc_f_std4103',
'teacher_exp_name':'gc_o_tchr4113',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_lstm',
'teacher_config':'small_ugpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2lstm_2')
config={'student_exp_name':'gc_f_std4102',
'teacher_exp_name':'gc_o_tchr4102',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_lstm',
'teacher_config':'small_ugpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2lstm_3')
config={'student_exp_name':'gc_f_std4101',
'teacher_exp_name':'gc_o_tch4101',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_lstm',
'teacher_config':'small_ugpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2lstm_4')
config={'student_exp_name':'gc_f_std4131',
'teacher_exp_name':'gc_o_tchr4131',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_gpt2_shared',
'teacher_config':'small_ugpt_v9',
'student_config':'small_ugpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2ugpt_1')
config={'student_exp_name':'gc_f_std4132',
'teacher_exp_name':'gc_o_tchr4132',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_gpt2_shared',
'teacher_config':'small_ugpt_v9',
'student_config':'small_ugpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2ugpt_2')
config={'student_exp_name':'gc_f_std4130',
'teacher_exp_name':'gc_o_tchr4130',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_gpt2_shared',
'teacher_config':'small_ugpt_v9',
'student_config':'small_ugpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2ugpt_3')
config={'student_exp_name':'gc_f_std4133',
'teacher_exp_name':'gc_o_tchr4123',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_gpt2_shared',
'teacher_config':'small_ugpt_v9',
'student_config':'small_ugpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2ugpt_4')
config={'student_exp_name':'gc_f_std4110',
'teacher_exp_name':'gc_o_tchr4110',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_bert',
'teacher_config':'small_ugpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2bert_1')
config={'student_exp_name':'gc_f_std4111',
'teacher_exp_name':'gc_o_tchr4111',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_bert',
'teacher_config':'small_ugpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2bert_2')
config={'student_exp_name':'gc_f_std4112',
'teacher_exp_name':'gc_o_tchr4112',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_bert',
'teacher_config':'small_ugpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2bert_3')
config={'student_exp_name':'gc_f_std4113',
'teacher_exp_name':'gc_o_tchr4113',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_bert',
'teacher_config':'small_ugpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2bert_4')
config={'student_exp_name':'gc_f_std4120',
'teacher_exp_name':'gc_o_tchr4120',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_gpt2',
'teacher_config':'small_ugpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2gpt_1')
config={'student_exp_name':'gc_f_std4121',
'teacher_exp_name':'gc_o_tchr4121',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_gpt2',
'teacher_config':'small_ugpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2gpt_2')
config={'student_exp_name':'gc_f_std4122',
'teacher_exp_name':'gc_o_tchr4122',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_gpt2',
'teacher_config':'small_ugpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2gpt_3')
config={'student_exp_name':'gc_f_std4123',
'teacher_exp_name':'gc_o_tchr4123',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_gpt2',
'teacher_config':'small_ugpt_v9',
'student_config':'small_gpt_v9',
'distill_config':'pure_dstl_4_exp_vp4',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
students.append(student_model)
models.append(teacher_model)
labels.append('ugpt2gpt_4')
keys = labels
import tensorflow_probability as tfp
def test_for_calibration(model, task, n_bins=10):
preds = []
correct_class_probs = []
predicted_class_probs = []
pred_logits = []
y_trues = []
batch_count = task.n_valid_batches
for x, y in task.valid_dataset:
y = tf.cast(y, tf.int32)
logits = model(x)
pred_logits.extend(logits.numpy())
pred = tf.argmax(logits, axis=-1)
prob = task.get_probs_fn()(logits, labels=y, temperature=1)
preds.extend(pred.numpy())
y_trues.extend(y.numpy())
batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int32), dtype=tf.int32)
true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1)
pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int32)], axis=1)
correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy())
predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy())
batch_count -= 1
if batch_count == 0:
break
model_accuracy = np.asarray(preds) == np.asarray(y_trues)
return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues
# for key,model in zip(labels,models):
# model = model[0]
# print('##################################')
# train = model.evaluate(task.train_dataset, steps=task.n_train_batches, verbose=0)
# valid = model.evaluate(task.valid_dataset, steps=task.n_valid_batches, verbose=0)
# test = model.evaluate(task.test_dataset, steps=task.n_test_batches, verbose=0)
# print(key)
# print(train[0],'\t',train[1],'\t',train[2],'\t', valid[0],'\t', valid[1],'\t', valid[2], '\t', test[0], '\t', test[1], '\t', test[2])
print("Teachers ****")
for key,model in zip(labels,models):
model = model[0]
print('##################################')
print(key)
model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20)
model_ece = tfp.stats.expected_calibration_error(
20,
logits=model_logits,
labels_true=model_trues,
)
print(model_ece.numpy())
print("Students ****")
for key,model in zip(labels,students):
print('##################################')
print(key)
model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20)
model_ece = tfp.stats.expected_calibration_error(
20,
logits=model_logits,
labels_true=model_trues,
)
print(model_ece.numpy())
# infl_eng = inflect.engine()
# verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab')
# print(labels)
# for key,model in zip(labels,students):
# print('##################################')
# print(key)
# distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task)
# compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total)
| 19,655 | 33.851064 | 139 | py |
Reflect | Reflect-master/notebooks/eval_scripts/eval_vp-lstm.py | import os
import tensorflow as tf
from util import constants
from util.config_util import get_model_params, get_task_params, get_train_params
from tf2_models.trainer import Trainer
from absl import app
from absl import flags
import numpy as np
from util.models import MODELS
from util.tasks import TASKS
from notebook_utils import *
import pandas as pd
import seaborn as sns; sns.set()
from collections import Counter
from tqdm import tqdm
import logging
tf.get_logger().setLevel(logging.ERROR)
log_dir = "../logs"
chkpt_dir = "../tf_ckpts"
task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(batch_size=512),data_dir='../data')
cl_token = task.databuilder.sentence_encoder().encode(constants.bos)
models = []
labels = []
#Bert to LSTM
config={'student_exp_name':'gc_f_std5004',
'teacher_exp_name':'gc_o_tchr5021',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp5',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
models.append(teacher_model)
labels.append('lstm2lstm_1')
config={'student_exp_name':'gc_f_std5001',
'teacher_exp_name':'gc_o_tchr5011',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp5',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
models.append(teacher_model)
labels.append('lstm2lstm_2')
config={'student_exp_name':'gc_f_std5002',
'teacher_exp_name':'gc_o_tchr5020',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp5',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
models.append(teacher_model)
labels.append('lstm2lstm_3')
config={'student_exp_name':'gc_f_std5003',
'teacher_exp_name':'gc_o_tchr5030',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_exp_vp5',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# config['distill_mode'] = 'online'
# config['student_exp_name'] = config['student_exp_name'].replace('_f_', '_o_')
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
std_hparams.output_attentions = True
std_hparams.output_embeddings = True
student_model, ckpt = get_student_model(config, task, std_hparams, cl_token)
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model = get_teacher_model(config, task, tchr_hparams, cl_token)
models.append(teacher_model)
labels.append('lstm2lstm_4')
keys = labels
import tensorflow_probability as tfp
def test_for_calibration(model, task, n_bins=10):
preds = []
correct_class_probs = []
predicted_class_probs = []
pred_logits = []
y_trues = []
batch_count = task.n_valid_batches
for x, y in task.valid_dataset:
y = tf.cast(y, tf.int32)
logits = model(x)
pred_logits.extend(logits.numpy())
pred = tf.argmax(logits, axis=-1)
prob = task.get_probs_fn()(logits, labels=y, temperature=1)
preds.extend(pred.numpy())
y_trues.extend(y.numpy())
batch_indexes = tf.cast(tf.range(len(y), dtype=tf.int32), dtype=tf.int32)
true_indexes = tf.concat([batch_indexes[:,None], y[:,None]], axis=1)
pred_indexes = tf.concat([batch_indexes[:,None], tf.cast(pred[:,None], tf.int32)], axis=1)
correct_class_probs.extend(tf.gather_nd(prob, true_indexes).numpy())
predicted_class_probs.extend(tf.gather_nd(prob, pred_indexes).numpy())
batch_count -= 1
if batch_count == 0:
break
model_accuracy = np.asarray(preds) == np.asarray(y_trues)
return model_accuracy, predicted_class_probs, correct_class_probs, pred_logits, y_trues
# for key in keys:
# model = models[key]
# print('##################################')
# train = model.evaluate(task.train_dataset, steps=task.n_train_batches)
# valid = model.evaluate(task.valid_dataset, steps=task.n_valid_batches)
# test = model.evaluate(task.test_dataset, steps=task.n_test_batches)
# print(key)
# print(train[0],'\t',train[1],'\t',train[2],'\t', valid[0],'\t', valid[1],'\t', valid[2], '\t', test[0], '\t', test[1], '\t', test[2])
# for key in keys:
# model = models[key]
# print('##################################')
# model_accuracy, predicted_class_probs, correct_class_probs, model_logits, model_trues= test_for_calibration(model, task, n_bins=20)
# model_ece = tfp.stats.expected_calibration_error(
# 1000000,
# logits=model_logits,
# labels_true=model_trues,
# )
# print(model_ece.numpy())
infl_eng = inflect.engine()
verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab')
print(labels)
for key,model in zip(labels,models):
model = model[0]
print('##################################')
print(key)
distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task)
compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total)
| 7,129 | 32.009259 | 139 | py |
Reflect | Reflect-master/notebooks/eval_scripts/eval_full_sv_cl.py | import os
import tensorflow as tf
from util import constants
from util.config_util import get_model_params, get_task_params, get_train_params
from tf2_models.trainer import Trainer
from absl import app
from absl import flags
import numpy as np
from util.models import MODELS
from util.tasks import TASKS
from notebook_utils import *
import pandas as pd
import seaborn as sns; sns.set()
from collections import Counter
from tqdm import tqdm
log_dir = "../logs"
chkpt_dir = "../tf_ckpts"
task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(),data_dir='../data')
cl_token = task.databuilder.sentence_encoder().encode(constants.bos)
modelz = {}
ckptz = {}
config={'student_exp_name':'gc_f_std124',
'teacher_exp_name':'gc_o_tchr124',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
model, ckpt = get_student_model(config, task, std_hparams, cl_token)
modelz['l2l_std124'] = model
ckptz['l2l_std124'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['l2l_tchr124'] = teacher_model
ckptz['l2l_tchr124'] = teacger_ckpt
config={'student_exp_name':'gc_f_std125',
'teacher_exp_name':'gc_o_tchr125',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
model, ckpt = get_student_model(config, task, std_hparams, cl_token)
modelz['l2l_std125'] = model
ckptz['l2l_std125'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['l2l_tchr125'] = teacher_model
ckptz['l2l_tchr125'] = teacger_ckpt
config={'student_exp_name':'gc_f_std130',
'teacher_exp_name':'gc_o_tchr130',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
model, ckpt = get_student_model(config, task, std_hparams, cl_token)
modelz['l2l_std130'] = model
ckptz['l2l_std130'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['l2l_tchr130'] = teacher_model
ckptz['l2l_tchr130'] = teacger_ckpt
config={'student_exp_name':'gc_f_std131',
'teacher_exp_name':'gc_o_tchr131',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_lstm',
'student_model':'cl_lstm',
'teacher_config':'small_lstm_v4',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
model, ckpt = get_student_model(config, task, std_hparams, cl_token)
modelz['l2l_std131'] = model
ckptz['l2l_std131'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['l2l_tchr131'] = teacher_model
ckptz['l2l_tchr131'] = teacger_ckpt
infl_eng = inflect.engine()
verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab')
keys = modelz.keys()
for key in keys:
model = modelz[key]
print('##################################')
print(key, ckptz[key])
distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task)
compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total) | 4,451 | 29.285714 | 108 | py |
Reflect | Reflect-master/notebooks/eval_scripts/eval_lm.py | import os
import tensorflow as tf
from util import constants
from util.config_util import get_model_params, get_task_params, get_train_params
from tf2_models.trainer import Trainer
from absl import app
from absl import flags
import numpy as np
from util.models import MODELS
from util.tasks import TASKS
from notebook_utils import *
from tqdm import tqdm
log_dir = "../logs"
chkpt_dir = "../tf_ckpts"
task = TASKS['word_sv_agreement_lm'](task_params=get_task_params(),data_dir='../data')
cl_token = task.databuilder.sentence_encoder().encode(constants.bos)
modelz = {}
ckptz = {}
config = {'model_name':'lm_lstm_shared_emb',
'model_config':'lstm_drop31_v2',
'learning_rate':0.001,
'exp_name':'lisa_crs_fst_offlineteacher_v23',
'chkpt_dir': '../tf_ckpts'
}
hparams=get_model_params(task, config['model_name'], config['model_config'])
hparams.output_attentions = True
hparams.output_embeddings = True
lstm1, lstm_ckpt1 = get_model(config, task, hparams, cl_token)
modelz['lstm1'] = lstm1
ckptz['lstm1'] = lstm_ckpt1
config = {'model_name':'lm_lstm_shared_emb',
'model_config':'lstm_drop31_v2',
'learning_rate':0.001,
'exp_name':'lisa_crs_fst_offlineteacher_v24',
'chkpt_dir': '../tf_ckpts'
}
hparams=get_model_params(task, config['model_name'], config['model_config'])
hparams.output_attentions = True
hparams.output_embeddings = True
lstm2, lstm_ckpt2 = get_model(config, task, hparams, cl_token)
modelz['lstm2'] = lstm2
ckptz['lstm2'] = lstm_ckpt2
config = {'model_name':'lm_lstm_shared_emb',
'model_config':'lstm_drop31_v2',
'learning_rate':0.001,
'exp_name':'lisa_crs_fst_offlineteacher_v25',
'chkpt_dir': '../tf_ckpts'
}
hparams=get_model_params(task, config['model_name'], config['model_config'])
hparams.output_attentions = True
hparams.output_embeddings = True
lstm3, lstm_ckpt3 = get_model(config, task, hparams, cl_token)
modelz['lstm3'] = lstm3
ckptz['lstm3'] = lstm_ckpt3
keys = ['lstm1', 'lstm2']
print("Evaluations ...")
for key in keys:
model = modelz[key]
print('##################################')
print(ckptz[key])
train = model.evaluate(task.train_dataset, steps=task.n_train_batches)
valid = model.evaluate(task.valid_dataset, steps=task.n_valid_batches)
test = model.evaluate(task.test_dataset, steps=task.n_test_batches)
print("train:", train)
print("valid:", valid)
print("test:", test) | 2,524 | 28.360465 | 86 | py |
Reflect | Reflect-master/notebooks/eval_scripts/eval_full_sv_cl_gpt2.py | import os
import tensorflow as tf
from util import constants
from util.config_util import get_model_params, get_task_params, get_train_params
from tf2_models.trainer import Trainer
from absl import app
from absl import flags
import numpy as np
from util.models import MODELS
from util.tasks import TASKS
from notebook_utils import *
import pandas as pd
import seaborn as sns; sns.set()
from collections import Counter
from tqdm import tqdm
log_dir = "../logs"
chkpt_dir = "../tf_ckpts"
task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(),data_dir='../data')
cl_token = task.databuilder.sentence_encoder().encode(constants.bos)
modelz = {}
ckptz = {}
config={'student_exp_name':'gc_f_std144',
'teacher_exp_name':'gc_o_tchr144',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# std_hparams=get_model_params(task, config['student_model'], config['student_config'])
# model, ckpt = get_student_model(config, task, std_hparams, cl_token)
# modelz['g2l_std144'] = model
# ckptz['g2l_std144'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['ug2l_tchr144'] = teacher_model
ckptz['ug2l_tchr144'] = teacger_ckpt
config={'student_exp_name':'gc_f_std145',
'teacher_exp_name':'gc_o_tchr145',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# std_hparams=get_model_params(task, config['student_model'], config['student_config'])
# model, ckpt = get_student_model(config, task, std_hparams, cl_token)
# modelz['g2l_std145'] = model
# ckptz['g2l_std145'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['ug2l_tchr145'] = teacher_model
ckptz['ug2l_tchr145'] = teacger_ckpt
config={'student_exp_name':'gc_f_std146',
'teacher_exp_name':'gc_o_tchr146',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# std_hparams=get_model_params(task, config['student_model'], config['student_config'])
# model, ckpt = get_student_model(config, task, std_hparams, cl_token)
# modelz['g2l_std146'] = model
# ckptz['g2l_std146'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['ug2l_tchr146'] = teacher_model
ckptz['ug2l_tchr146'] = teacger_ckpt
config={'student_exp_name':'gc_f_std147',
'teacher_exp_name':'gc_o_tchr147',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_gpt2_shared',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
# std_hparams=get_model_params(task, config['student_model'], config['student_config'])
# model, ckpt = get_student_model(config, task, std_hparams, cl_token)
# modelz['g2l_std147'] = model
# ckptz['g2l_std147'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['ug2l_tchr147'] = teacher_model
ckptz['ug2l_tchr147'] = teacger_ckpt
infl_eng = inflect.engine()
verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab')
keys = modelz.keys()
for key in keys:
model = modelz[key]
print('##################################')
print(key, ckptz[key])
distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task)
compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total) | 4,517 | 29.322148 | 108 | py |
Reflect | Reflect-master/notebooks/eval_scripts/eval_full_sv_cl_bert.py | import os
import tensorflow as tf
from util import constants
from util.config_util import get_model_params, get_task_params, get_train_params
from tf2_models.trainer import Trainer
from absl import app
from absl import flags
import numpy as np
from util.models import MODELS
from util.tasks import TASKS
from notebook_utils import *
import pandas as pd
import seaborn as sns; sns.set()
from collections import Counter
from tqdm import tqdm
log_dir = "../logs"
chkpt_dir = "../tf_ckpts"
task = TASKS['word_sv_agreement_vp'](task_params=get_task_params(),data_dir='../data')
cl_token = task.databuilder.sentence_encoder().encode(constants.bos)
modelz = {}
ckptz = {}
config={'student_exp_name':'gc_f_std100',
'teacher_exp_name':'gc_o_tchr100',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
model, ckpt = get_student_model(config, task, std_hparams, cl_token)
modelz['b2l_std100'] = model
ckptz['b2l_std100'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['b2l_tchr100'] = teacher_model
ckptz['b2l_tchr100'] = teacger_ckpt
config={'student_exp_name':'gc_f_std101',
'teacher_exp_name':'gc_o_tchr101',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
model, ckpt = get_student_model(config, task, std_hparams, cl_token)
modelz['b2l_std101'] = model
ckptz['b2l_std101'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['b2l_tchr101'] = teacher_model
ckptz['b2l_tchr101'] = teacger_ckpt
config={'student_exp_name':'gc_f_std102',
'teacher_exp_name':'gc_o_tchr102',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
model, ckpt = get_student_model(config, task, std_hparams, cl_token)
modelz['l2l_std102'] = model
ckptz['l2l_std102'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['l2l_tchr102'] = teacher_model
ckptz['l2l_tchr102'] = teacger_ckpt
config={'student_exp_name':'gc_f_std103',
'teacher_exp_name':'gc_o_tchr103',
'task_name':'word_sv_agreement_vp',
'teacher_model':'cl_bert',
'student_model':'cl_lstm',
'teacher_config':'small_gpt_v9',
'student_config':'small_lstm_v4',
'distill_config':'pure_dstl_4_crs_slw',
'distill_mode':'offline',
'chkpt_dir':'../tf_ckpts',
}
std_hparams=get_model_params(task, config['student_model'], config['student_config'])
model, ckpt = get_student_model(config, task, std_hparams, cl_token)
modelz['b2l_std103'] = model
ckptz['b2l_std103'] = ckpt
tchr_hparams=get_model_params(task, config['teacher_model'], config['teacher_config'])
teacher_model, teacger_ckpt = get_teacher_model(config, task, tchr_hparams, cl_token)
modelz['b2l_tchr103'] = teacher_model
ckptz['b2l_tchr103'] = teacger_ckpt
infl_eng = inflect.engine()
verb_infl, noun_infl = gen_inflect_from_vocab(infl_eng, 'wiki.vocab')
keys = modelz.keys()
for key in keys:
model = modelz[key]
print('##################################')
print(key, ckptz[key])
distance_hits, distance_total, diff_hits, diff_total = evaluate_vp_cl(model, verb_infl, noun_infl, task)
compute_and_print_acc_stats(distance_hits, distance_total, diff_hits, diff_total) | 4,447 | 29.258503 | 108 | py |
Reflect | Reflect-master/prep_data/split.py | import sys
import os
import errno
import random
from util.text_util import deps_from_tsv, deps_to_tsv
def make_splits(fname, expr_dir, prop_train=0.1, prop_valid=0.01):
# for reproducibility
random.seed(42)
print('| read in the data')
data = deps_from_tsv(fname)
print('| shuffling')
random.shuffle(data)
n_train = int(len(data) * prop_train)
n_valid = int(len(data) * prop_valid)
train = data[:n_train]
valid = data[n_train: n_train+n_valid]
test = data[n_train+n_valid:]
try:
os.mkdir(expr_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
print('| splitting')
deps_to_tsv(train, os.path.join(expr_dir, 'train.tsv'))
deps_to_tsv(valid, os.path.join(expr_dir, 'valid.tsv'))
deps_to_tsv(test, os.path.join(expr_dir, 'test.tsv'))
print('| done!')
if __name__ == '__main__':
make_splits(sys.argv[1], sys.argv[2]) | 947 | 25.333333 | 66 | py |
Reflect | Reflect-master/prep_data/gen_bowman_logic.py | from itertools import chain
from itertools import combinations
from collections import Counter
import random
def powerset(iterable):
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
def get_candidate_worlds(num_vars):
return powerset(set(range(num_vars)))
def get_satisfying_worlds_for_tree(tree, candidate_worlds):
if isinstance(tree, tuple):
if tree[0] == 'not':
child = get_satisfying_worlds_for_tree(tree[1], candidate_worlds)
return candidate_worlds.difference(child)
else:
left = get_satisfying_worlds_for_tree(tree[0], candidate_worlds)
right = get_satisfying_worlds_for_tree(tree[2], candidate_worlds)
if tree[1] == "and":
return left.intersection(right)
elif tree[1] == "or":
return left.union(right)
else:
print('syntax error', tree)
else:
result = []
for world in candidate_worlds:
if tree in world:
result.append(world)
return set(result)
def compute_relation(left, right, universe):
ne_intersection = left.intersection(right)
ne_just_left = left.difference(right)
ne_just_right = right.difference(left)
ne_outside = universe.difference(left.union(right))
if (ne_intersection and not ne_just_right
and not ne_just_left and ne_outside):
return "="
elif (ne_intersection and ne_just_right
and not ne_just_left and ne_outside):
return "<"
elif (ne_intersection and not ne_just_right
and ne_just_left and ne_outside):
return ">"
elif (not ne_intersection and ne_just_right
and ne_just_left and not ne_outside):
return "^"
elif (not ne_intersection and ne_just_right
and ne_just_left and ne_outside):
return "|"
elif (ne_intersection and ne_just_right
and ne_just_left and not ne_outside):
return "v"
else:
return "#"
def create_sub_statement(universe, maxlen):
operator = random.choice(operators)
temp = ()
if operator == '0' or maxlen < 2:
temp = random.choice(list(universe))
else:
lhs = create_sub_statement(universe, maxlen / 2)
rhs = create_sub_statement(universe, maxlen / 2)
temp = tuple([lhs, operator, rhs])
neg_or_none = random.choice(neg_or_nones)
if neg_or_none == '0':
return temp
else:
return tuple([neg_or_none, temp])
def uniq(seq, idfun=None):
# order preserving
if idfun is None:
def idfun(x):
return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
def to_string(expr, individuals):
if isinstance(expr, int):
return individuals[expr]
if isinstance(expr, str):
return expr
elif len(expr) == 3:
return "( " + to_string(expr[0], individuals) \
+ " ( " + to_string(expr[1], individuals) \
+ " " + to_string(expr[2], individuals) + " ) )"
else:
return "( " + to_string(expr[0], individuals) \
+ " " + to_string(expr[1], individuals) + " )"
def get_len(tree):
if isinstance(tree, tuple):
accum = 0
for entry in tree:
accum += get_len(entry)
return accum
elif tree == 'and' or tree == 'or' or tree == 'not':
return 1
else:
return 0
individuals = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
worlds = set(get_candidate_worlds(6))
universe = set(range(6))
neg_or_nones = ['not', '0', '0']
operators = ['and', 'or', 'and', 'or', '0', '0', '0', '0', '0']
stats = Counter()
total = 0
outputs = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [],
6: [], 7: [], 8: [], 9: [], 10: [], 11: [], 12: []}
while total < 600000:
subuniverse = random.sample(universe, 4)
lhs = create_sub_statement(subuniverse, 12)
rhs = create_sub_statement(subuniverse, 12)
sat1 = get_satisfying_worlds_for_tree(lhs, worlds)
sat2 = get_satisfying_worlds_for_tree(rhs, worlds)
if sat1 == worlds or len(sat1) == 0:
continue
if sat2 == worlds or len(sat2) == 0:
continue
rel = compute_relation(sat1, sat2, worlds)
if rel != "?":
stats[rel] += 1
total += 1
max_len = min(max(get_len(rhs), get_len(lhs)), 12)
outputs[max_len].append("" + rel + "\t" + to_string(
lhs, individuals) + "\t" + to_string(rhs, individuals))
TRAIN_PORTION = 0.8
VALID_PORTION = 0.1
for length in outputs.keys():
outputs[length] = uniq(outputs[length])
total = len(outputs[length])
filename = 'train' + str(length)
f = open(filename, 'w')
for i in range(int(TRAIN_PORTION * total)):
output = outputs[length][i]
f.write(output + "\n")
f.close()
filename = 'valid' + str(length)
f = open(filename, 'w')
validx = int((TRAIN_PORTION + VALID_PORTION) * total)
for i in range(int(TRAIN_PORTION * total), validx):
output = outputs[length][i]
f.write(output + "\n")
f.close()
filename = 'test' + str(length)
f = open(filename, 'w')
for i in range(validx, total):
output = outputs[length][i]
f.write(output + "\n")
f.close()
print(stats) | 5,553 | 28.386243 | 77 | py |
Reflect | Reflect-master/prep_data/__init__.py | 0 | 0 | 0 | py |
|
Reflect | Reflect-master/prep_data/build_dictionary.py | from util import text_util as utils
from util import constants
from sys import argv
import numpy as np
import os
def build_and_save_dic(input_file, data_dir):
worddict = {}
worddict[constants.pad] = constants.pad_idx
worddict[constants.unk] = constants.unk_idx
worddict[constants.bos] = constants.bos_idx
worddict[constants.eos] = constants.eos_idx
input_file = os.path.join(data_dir, input_file)
for dep in utils.deps_from_tsv(input_file):
for w in dep['sentence'].split():
if w not in worddict:
worddict[w] = len(worddict)
vocab_file = os.path.join(data_dir, 'vocab')
print('| write vocabulary to %s' % vocab_file)
np.save(vocab_file, arr=worddict)
print('| vocabulary size %d' % len(worddict))
print('| done!')
if __name__ == '__main__':
data_dir = argv[1]
input_file = argv[2]
build_and_save_dic(input_file=input_file,
data_dir=data_dir) | 969 | 26.714286 | 51 | py |
PyKrige | PyKrige-main/setup.py | # -*- coding: utf-8 -*-
"""Kriging Toolkit for Python."""
import os
import numpy as np
from Cython.Build import cythonize
from setuptools import Extension, setup
# cython extensions
CY_MODULES = [
Extension(
name=f"pykrige.{ext}",
sources=[os.path.join("src", "pykrige", *ext.split(".")) + ".pyx"],
include_dirs=[np.get_include()],
define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")],
)
for ext in ["lib.cok", "lib.variogram_models"]
]
# setup - do not include package data to ignore .pyx files in wheels
setup(ext_modules=cythonize(CY_MODULES), include_package_data=False)
| 634 | 27.863636 | 75 | py |
PyKrige | PyKrige-main/benchmarks/kriging_benchmarks.py | # -*- coding: utf-8 -*-
"""Benchmarks."""
from time import time
import numpy as np
from pykrige.ok import OrdinaryKriging
np.random.seed(19999)
VARIOGRAM_MODELS = ["power", "gaussian", "spherical", "exponential", "linear"]
BACKENDS = ["vectorized", "loop", "C"]
N_MOVING_WINDOW = [None, 10, 50, 100]
def make_benchark(n_train, n_test, n_dim=2):
"""Compute the benchmarks for Ordianry Kriging.
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
Returns
-------
res : dict
a dictionary with the timing results
"""
X_train = np.random.rand(n_train, n_dim)
y_train = np.random.rand(n_train)
X_test = np.random.rand(n_test, n_dim)
res = {}
for variogram_model in VARIOGRAM_MODELS:
tic = time()
OK = OrdinaryKriging(
X_train[:, 0],
X_train[:, 1],
y_train,
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
res["t_train_{}".format(variogram_model)] = time() - tic
# All the following tests are performed with the linear variogram model
for backend in BACKENDS:
for n_closest_points in N_MOVING_WINDOW:
if backend == "vectorized" and n_closest_points is not None:
continue # this is not supported
tic = time()
OK.execute(
"points",
X_test[:, 0],
X_test[:, 1],
backend=backend,
n_closest_points=n_closest_points,
)
res["t_test_{}_{}".format(backend, n_closest_points)] = time() - tic
return res
def print_benchmark(n_train, n_test, n_dim, res):
"""Print the benchmarks.
Parameters
----------
n_train : int
number of points in the training set
n_test : int
number of points in the test set
n_dim : int
number of dimensions (default=2)
res : dict
a dictionary with the timing results
"""
print("=" * 80)
print(" " * 10, "N_dim={}, N_train={}, N_test={}".format(n_dim, n_train, n_test))
print("=" * 80)
print("\n", "# Training the model", "\n")
print("|".join(["{:>11} ".format(el) for el in ["t_train (s)"] + VARIOGRAM_MODELS]))
print("-" * (11 + 2) * (len(VARIOGRAM_MODELS) + 1))
print(
"|".join(
["{:>11} ".format("Training")]
+ [
"{:>11.2} ".format(el)
for el in [res["t_train_{}".format(mod)] for mod in VARIOGRAM_MODELS]
]
)
)
print("\n", "# Predicting kriging points", "\n")
print("|".join(["{:>11} ".format(el) for el in ["t_test (s)"] + BACKENDS]))
print("-" * (11 + 2) * (len(BACKENDS) + 1))
for n_closest_points in N_MOVING_WINDOW:
timing_results = [
res.get("t_test_{}_{}".format(mod, n_closest_points), "")
for mod in BACKENDS
]
print(
"|".join(
["{:>11} ".format("N_nn=" + str(n_closest_points))]
+ ["{:>11.2} ".format(el) for el in timing_results]
)
)
if __name__ == "__main__":
for no_train, no_test in [(400, 1000), (400, 2000), (800, 2000)]:
results = make_benchark(no_train, no_test)
print_benchmark(no_train, no_test, 2, results)
| 3,473 | 27.47541 | 88 | py |
PyKrige | PyKrige-main/examples/06_exact_values_example_1D.py | # -*- coding: utf-8 -*-
"""
Exact Values
============
PyKrige demonstration and usage
as a non-exact interpolator in 1D.
"""
import matplotlib.pyplot as plt
import numpy as np
from pykrige.ok import OrdinaryKriging
plt.style.use("ggplot")
np.random.seed(42)
x = np.linspace(0, 12.5, 50)
xpred = np.linspace(0, 12.5, 393)
y = np.sin(x) * np.exp(-0.25 * x) + np.random.normal(-0.25, 0.25, 50)
# compare OrdinaryKriging as an exact and non exact interpolator
uk = OrdinaryKriging(
x, np.zeros(x.shape), y, variogram_model="linear", exact_values=False
)
uk_exact = OrdinaryKriging(x, np.zeros(x.shape), y, variogram_model="linear")
y_pred, y_std = uk.execute("grid", xpred, np.array([0.0]), backend="loop")
y_pred_exact, y_std_exact = uk_exact.execute(
"grid", xpred, np.array([0.0]), backend="loop"
)
y_pred = np.squeeze(y_pred)
y_std = np.squeeze(y_std)
y_pred_exact = np.squeeze(y_pred_exact)
y_std_exact = np.squeeze(y_std_exact)
fig, ax = plt.subplots(1, 1, figsize=(10, 4))
ax.scatter(x, y, label="Input Data")
ax.plot(xpred, y_pred_exact, label="Exact Prediction")
ax.plot(xpred, y_pred, label="Non Exact Prediction")
ax.fill_between(
xpred,
y_pred - 3 * y_std,
y_pred + 3 * y_std,
alpha=0.3,
label="Confidence interval",
)
ax.legend(loc=9)
ax.set_ylim(-1.8, 1.3)
ax.legend(loc=9)
plt.xlabel("X")
plt.ylabel("Field")
plt.show()
| 1,375 | 21.557377 | 77 | py |
PyKrige | PyKrige-main/examples/00_ordinary.py | """
Ordinary Kriging Example
========================
First we will create a 2D dataset together with the associated x, y grids.
"""
import matplotlib.pyplot as plt
import numpy as np
import pykrige.kriging_tools as kt
from pykrige.ok import OrdinaryKriging
data = np.array(
[
[0.3, 1.2, 0.47],
[1.9, 0.6, 0.56],
[1.1, 3.2, 0.74],
[3.3, 4.4, 1.47],
[4.7, 3.8, 1.74],
]
)
gridx = np.arange(0.0, 5.5, 0.5)
gridy = np.arange(0.0, 5.5, 0.5)
###############################################################################
# Create the ordinary kriging object. Required inputs are the X-coordinates of
# the data points, the Y-coordinates of the data points, and the Z-values of the
# data points. If no variogram model is specified, defaults to a linear variogram
# model. If no variogram model parameters are specified, then the code automatically
# calculates the parameters by fitting the variogram model to the binned
# experimental semivariogram. The verbose kwarg controls code talk-back, and
# the enable_plotting kwarg controls the display of the semivariogram.
OK = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
###############################################################################
# Creates the kriged grid and the variance grid. Allows for kriging on a rectangular
# grid of points, on a masked rectangular grid of points, or with arbitrary points.
# (See OrdinaryKriging.__doc__ for more information.)
z, ss = OK.execute("grid", gridx, gridy)
###############################################################################
# Writes the kriged grid to an ASCII grid file and plot it.
kt.write_asc_grid(gridx, gridy, z, filename="output.asc")
plt.imshow(z)
plt.show()
| 1,840 | 30.20339 | 84 | py |
PyKrige | PyKrige-main/examples/07_regression_kriging2d.py | """
Regression kriging
------------------
An example of regression kriging
"""
import sys
from sklearn.datasets import fetch_california_housing
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR
from pykrige.rk import RegressionKriging
svr_model = SVR(C=0.1, gamma="auto")
rf_model = RandomForestRegressor(n_estimators=100)
lr_model = LinearRegression(normalize=True, copy_X=True, fit_intercept=False)
models = [svr_model, rf_model, lr_model]
try:
housing = fetch_california_housing()
except PermissionError:
# this dataset can occasionally fail to download on Windows
sys.exit(0)
# take the first 5000 as Kriging is memory intensive
p = housing["data"][:5000, :-2]
x = housing["data"][:5000, -2:]
target = housing["target"][:5000]
p_train, p_test, x_train, x_test, target_train, target_test = train_test_split(
p, x, target, test_size=0.3, random_state=42
)
for m in models:
print("=" * 40)
print("regression model:", m.__class__.__name__)
m_rk = RegressionKriging(regression_model=m, n_closest_points=10)
m_rk.fit(p_train, x_train, target_train)
print("Regression Score: ", m_rk.regression_model.score(p_test, target_test))
print("RK score: ", m_rk.score(p_test, x_test, target_test))
| 1,368 | 28.76087 | 81 | py |
PyKrige | PyKrige-main/examples/10_classification_kriging2d.py | """
Classification kriging
----------------------
An example of classification kriging
"""
import sys
from sklearn.datasets import fetch_california_housing
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.svm import SVC
from pykrige.ck import ClassificationKriging
svc_model = SVC(C=0.1, gamma="auto", probability=True)
rf_model = RandomForestClassifier(n_estimators=100)
lr_model = LogisticRegression(max_iter=10000)
models = [svc_model, rf_model, lr_model]
try:
housing = fetch_california_housing()
except PermissionError:
# this dataset can occasionally fail to download on Windows
sys.exit(0)
# take the first 5000 as Kriging is memory intensive
p = housing["data"][:5000, :-2]
x = housing["data"][:5000, -2:]
target = housing["target"][:5000]
discretizer = KBinsDiscretizer(encode="ordinal")
target = discretizer.fit_transform(target.reshape(-1, 1))
p_train, p_test, x_train, x_test, target_train, target_test = train_test_split(
p, x, target, test_size=0.3, random_state=42
)
for m in models:
print("=" * 40)
print("classification model:", m.__class__.__name__)
m_ck = ClassificationKriging(classification_model=m, n_closest_points=10)
m_ck.fit(p_train, x_train, target_train)
print(
"Classification Score: ", m_ck.classification_model.score(p_test, target_test)
)
print("CK score: ", m_ck.score(p_test, x_test, target_test))
| 1,566 | 29.72549 | 86 | py |
PyKrige | PyKrige-main/examples/01_universal.py | """
Universal Kriging Example
=========================
In this example we apply a regional linear trend to the kriging system.
"""
import matplotlib.pyplot as plt
import numpy as np
from pykrige.uk import UniversalKriging
data = np.array(
[
[0.3, 1.2, 0.47],
[1.9, 0.6, 0.56],
[1.1, 3.2, 0.74],
[3.3, 4.4, 1.47],
[4.7, 3.8, 1.74],
]
)
gridx = np.arange(0.0, 5.5, 0.5)
gridy = np.arange(0.0, 5.5, 0.5)
###############################################################################
# Create the universal kriging object. Required inputs are the X-coordinates of
# the data points, the Y-coordinates of the data points, and the Z-values of the
# data points. Variogram is handled as in the ordinary kriging case.
# drift_terms is a list of the drift terms to include; currently supported terms
# are 'regional_linear', 'point_log', and 'external_Z'. Refer to
# UniversalKriging.__doc__ for more information.
UK = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
###############################################################################
# Creates the kriged grid and the variance grid. Allows for kriging on a rectangular
# grid of points, on a masked rectangular grid of points, or with arbitrary points.
# (See UniversalKriging.__doc__ for more information.)
z, ss = UK.execute("grid", gridx, gridy)
plt.imshow(z)
plt.show()
| 1,475 | 27.941176 | 84 | py |
PyKrige | PyKrige-main/examples/08_krige_cv.py | # -*- coding: utf-8 -*-
"""
Krige CV
--------
Searching for optimal kriging parameters with cross validation
"""
import numpy as np
from sklearn.model_selection import GridSearchCV
from pykrige.rk import Krige
# 2D Kring param opt
param_dict = {
"method": ["ordinary", "universal"],
"variogram_model": ["linear", "power", "gaussian", "spherical"],
# "nlags": [4, 6, 8],
# "weight": [True, False]
}
estimator = GridSearchCV(Krige(), param_dict, verbose=True, return_train_score=True)
# dummy data
X = np.random.randint(0, 400, size=(100, 2)).astype(float)
y = 5 * np.random.rand(100)
# run the gridsearch
estimator.fit(X=X, y=y)
if hasattr(estimator, "best_score_"):
print("best_score R² = {:.3f}".format(estimator.best_score_))
print("best_params = ", estimator.best_params_)
print("\nCV results::")
if hasattr(estimator, "cv_results_"):
for key in [
"mean_test_score",
"mean_train_score",
"param_method",
"param_variogram_model",
]:
print(" - {} : {}".format(key, estimator.cv_results_[key]))
# 3D Kring param opt
param_dict3d = {
"method": ["ordinary3d", "universal3d"],
"variogram_model": ["linear", "power", "gaussian", "spherical"],
# "nlags": [4, 6, 8],
# "weight": [True, False]
}
estimator = GridSearchCV(Krige(), param_dict3d, verbose=True, return_train_score=True)
# dummy data
X3 = np.random.randint(0, 400, size=(100, 3)).astype(float)
y = 5 * np.random.rand(100)
# run the gridsearch
estimator.fit(X=X3, y=y)
if hasattr(estimator, "best_score_"):
print("best_score R² = {:.3f}".format(estimator.best_score_))
print("best_params = ", estimator.best_params_)
print("\nCV results::")
if hasattr(estimator, "cv_results_"):
for key in [
"mean_test_score",
"mean_train_score",
"param_method",
"param_variogram_model",
]:
print(" - {} : {}".format(key, estimator.cv_results_[key]))
| 1,951 | 23.708861 | 86 | py |
PyKrige | PyKrige-main/examples/02_kriging3D.py | """
Three-Dimensional Kriging Example
=================================
"""
import numpy as np
from matplotlib import pyplot as plt
from pykrige.ok3d import OrdinaryKriging3D
from pykrige.uk3d import UniversalKriging3D
data = np.array(
[
[0.1, 0.1, 0.3, 0.9],
[0.2, 0.1, 0.4, 0.8],
[0.1, 0.3, 0.1, 0.9],
[0.5, 0.4, 0.4, 0.5],
[0.3, 0.3, 0.2, 0.7],
]
)
gridx = np.arange(0.0, 0.6, 0.05)
gridy = np.arange(0.0, 0.6, 0.01)
gridz = np.arange(0.0, 0.6, 0.1)
###############################################################################
# Create the 3D ordinary kriging object and solves for the three-dimension kriged
# volume and variance. Refer to OrdinaryKriging3D.__doc__ for more information.
ok3d = OrdinaryKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k3d1, ss3d = ok3d.execute("grid", gridx, gridy, gridz)
###############################################################################
# Create the 3D universal kriging object and solves for the three-dimension kriged
# volume and variance. Refer to UniversalKriging3D.__doc__ for more information.
uk3d = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["regional_linear"],
)
k3d2, ss3d = uk3d.execute("grid", gridx, gridy, gridz)
###############################################################################
# To use the generic 'specified' drift term, the user must provide the drift values
# at each data point and at every grid point. The following example is equivalent to
# using a linear drift in all three spatial dimensions. Refer to
# UniversalKriging3D.__doc__ for more information.
zg, yg, xg = np.meshgrid(gridz, gridy, gridx, indexing="ij")
uk3d = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:, 0], data[:, 1], data[:, 2]],
)
k3d3, ss3d = uk3d.execute(
"grid", gridx, gridy, gridz, specified_drift_arrays=[xg, yg, zg]
)
###############################################################################
# To use the generic 'functional' drift term, the user must provide a callable
# function that takes only the spatial dimensions as arguments. The following example
# is equivalent to using a linear drift only in the x-direction. Refer to
# UniversalKriging3D.__doc__ for more information.
func = lambda x, y, z: x
uk3d = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func],
)
k3d4, ss3d = uk3d.execute("grid", gridx, gridy, gridz)
###############################################################################
# Note that the use of the 'specified' and 'functional' generic drift capabilities is
# essentially identical in the two-dimensional universal kriging class (except for a
# difference in the number of spatial coordinates for the passed drift functions).
# See UniversalKriging.__doc__ for more information.
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4)
ax1.imshow(k3d1[:, :, 0], origin="lower")
ax1.set_title("ordinary kriging")
ax2.imshow(k3d2[:, :, 0], origin="lower")
ax2.set_title("regional lin. drift")
ax3.imshow(k3d3[:, :, 0], origin="lower")
ax3.set_title("specified drift")
ax4.imshow(k3d4[:, :, 0], origin="lower")
ax4.set_title("functional drift")
plt.tight_layout()
plt.show()
| 3,514 | 32.47619 | 85 | py |
PyKrige | PyKrige-main/examples/05_kriging_1D.py | """
1D Kriging
==========
An example of 1D kriging with PyKrige
"""
import matplotlib.pyplot as plt
import numpy as np
from pykrige import OrdinaryKriging
plt.style.use("ggplot")
# fmt: off
# Data taken from
# https://blog.dominodatalab.com/fitting-gaussian-process-models-python/
X, y = np.array([
[-5.01, 1.06], [-4.90, 0.92], [-4.82, 0.35], [-4.69, 0.49], [-4.56, 0.52],
[-4.52, 0.12], [-4.39, 0.47], [-4.32,-0.19], [-4.19, 0.08], [-4.11,-0.19],
[-4.00,-0.03], [-3.89,-0.03], [-3.78,-0.05], [-3.67, 0.10], [-3.59, 0.44],
[-3.50, 0.66], [-3.39,-0.12], [-3.28, 0.45], [-3.20, 0.14], [-3.07,-0.28],
[-3.01,-0.46], [-2.90,-0.32], [-2.77,-1.58], [-2.69,-1.44], [-2.60,-1.51],
[-2.49,-1.50], [-2.41,-2.04], [-2.28,-1.57], [-2.19,-1.25], [-2.10,-1.50],
[-2.00,-1.42], [-1.91,-1.10], [-1.80,-0.58], [-1.67,-1.08], [-1.61,-0.79],
[-1.50,-1.00], [-1.37,-0.04], [-1.30,-0.54], [-1.19,-0.15], [-1.06,-0.18],
[-0.98,-0.25], [-0.87,-1.20], [-0.78,-0.49], [-0.68,-0.83], [-0.57,-0.15],
[-0.50, 0.00], [-0.38,-1.10], [-0.29,-0.32], [-0.18,-0.60], [-0.09,-0.49],
[0.03 ,-0.50], [0.09 ,-0.02], [0.20 ,-0.47], [0.31 ,-0.11], [0.41 ,-0.28],
[0.53 , 0.40], [0.61 , 0.11], [0.70 , 0.32], [0.94 , 0.42], [1.02 , 0.57],
[1.13 , 0.82], [1.24 , 1.18], [1.30 , 0.86], [1.43 , 1.11], [1.50 , 0.74],
[1.63 , 0.75], [1.74 , 1.15], [1.80 , 0.76], [1.93 , 0.68], [2.03 , 0.03],
[2.12 , 0.31], [2.23 ,-0.14], [2.31 ,-0.88], [2.40 ,-1.25], [2.50 ,-1.62],
[2.63 ,-1.37], [2.72 ,-0.99], [2.80 ,-1.92], [2.83 ,-1.94], [2.91 ,-1.32],
[3.00 ,-1.69], [3.13 ,-1.84], [3.21 ,-2.05], [3.30 ,-1.69], [3.41 ,-0.53],
[3.52 ,-0.55], [3.63 ,-0.92], [3.72 ,-0.76], [3.80 ,-0.41], [3.91 , 0.12],
[4.04 , 0.25], [4.13 , 0.16], [4.24 , 0.26], [4.32 , 0.62], [4.44 , 1.69],
[4.52 , 1.11], [4.65 , 0.36], [4.74 , 0.79], [4.84 , 0.87], [4.93 , 1.01],
[5.02 , 0.55]
]).T
# fmt: on
X_pred = np.linspace(-6, 6, 200)
# pykrige doesn't support 1D data for now, only 2D or 3D
# adapting the 1D input to 2D
uk = OrdinaryKriging(X, np.zeros(X.shape), y, variogram_model="gaussian")
y_pred, y_std = uk.execute("grid", X_pred, np.array([0.0]))
y_pred = np.squeeze(y_pred)
y_std = np.squeeze(y_std)
fig, ax = plt.subplots(1, 1, figsize=(10, 4))
ax.scatter(X, y, s=40, label="Input data")
ax.plot(X_pred, y_pred, label="Predicted values")
ax.fill_between(
X_pred,
y_pred - 3 * y_std,
y_pred + 3 * y_std,
alpha=0.3,
label="Confidence interval",
)
ax.legend(loc=9)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_xlim(-6, 6)
ax.set_ylim(-2.8, 3.5)
plt.show()
| 2,626 | 36 | 79 | py |
PyKrige | PyKrige-main/examples/04_krige_geometric.py | # -*- coding: utf-8 -*-
"""
Geometric example
=================
A small example script showing the usage of the 'geographic' coordinates type
for ordinary kriging on a sphere.
"""
import numpy as np
from matplotlib import pyplot as plt
from pykrige.ok import OrdinaryKriging
# Make this example reproducible:
np.random.seed(89239413)
# Generate random data following a uniform spatial distribution
# of nodes and a uniform distribution of values in the interval
# [2.0, 5.5]:
N = 7
lon = 360.0 * np.random.random(N)
lat = 180.0 / np.pi * np.arcsin(2 * np.random.random(N) - 1)
z = 3.5 * np.random.rand(N) + 2.0
# Generate a regular grid with 60° longitude and 30° latitude steps:
grid_lon = np.linspace(0.0, 360.0, 7)
grid_lat = np.linspace(-90.0, 90.0, 7)
# Create ordinary kriging object:
OK = OrdinaryKriging(
lon,
lat,
z,
variogram_model="linear",
verbose=False,
enable_plotting=False,
coordinates_type="geographic",
)
# Execute on grid:
z1, ss1 = OK.execute("grid", grid_lon, grid_lat)
# Create ordinary kriging object ignoring curvature:
OK = OrdinaryKriging(
lon, lat, z, variogram_model="linear", verbose=False, enable_plotting=False
)
# Execute on grid:
z2, ss2 = OK.execute("grid", grid_lon, grid_lat)
###############################################################################
# Print data at equator (last longitude index will show periodicity):
print("Original data:")
print("Longitude:", lon.astype(int))
print("Latitude: ", lat.astype(int))
print("z: ", np.array_str(z, precision=2))
print("\nKrige at 60° latitude:\n======================")
print("Longitude:", grid_lon)
print("Value: ", np.array_str(z1[5, :], precision=2))
print("Sigma²: ", np.array_str(ss1[5, :], precision=2))
print("\nIgnoring curvature:\n=====================")
print("Value: ", np.array_str(z2[5, :], precision=2))
print("Sigma²: ", np.array_str(ss2[5, :], precision=2))
###############################################################################
# We can see that the data point at longitude 122, latitude 50 correctly
# dominates the kriged results, since it is the closest node in spherical
# distance metric, as longitude differences scale with cos(latitude).
# When kriging using longitude / latitude linearly, the value for grid points
# with longitude values further away as longitude is now incorrectly
# weighted equally as latitude.
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(z1, extent=[0, 360, -90, 90], origin="lower")
ax1.set_title("geo-coordinates")
ax2.imshow(z2, extent=[0, 360, -90, 90], origin="lower")
ax2.set_title("non geo-coordinates")
plt.show()
| 2,636 | 31.555556 | 79 | py |
PyKrige | PyKrige-main/examples/03_gstools_covmodel.py | # -*- coding: utf-8 -*-
"""
GSTools Interface
=================
Example how to use the PyKrige routines with a GSTools CovModel.
"""
import gstools as gs
import numpy as np
from matplotlib import pyplot as plt
from pykrige.ok import OrdinaryKriging
# conditioning data
data = np.array(
[
[0.3, 1.2, 0.47],
[1.9, 0.6, 0.56],
[1.1, 3.2, 0.74],
[3.3, 4.4, 1.47],
[4.7, 3.8, 1.74],
]
)
# grid definition for output field
gridx = np.arange(0.0, 5.5, 0.1)
gridy = np.arange(0.0, 6.5, 0.1)
# a GSTools based covariance model
cov_model = gs.Gaussian(dim=2, len_scale=4, anis=0.2, angles=-0.5, var=0.5, nugget=0.1)
# ordinary kriging with pykrige
OK1 = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], cov_model)
z1, ss1 = OK1.execute("grid", gridx, gridy)
plt.imshow(z1, origin="lower")
plt.show()
| 844 | 23.852941 | 87 | py |
PyKrige | PyKrige-main/src/pykrige/ok.py | # coding: utf-8
"""
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
[email protected]
Summary
-------
Contains class OrdinaryKriging, which provides easy access to
2D Ordinary Kriging.
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
.. [2] N. Cressie, Statistics for spatial data,
(Wiley Series in Probability and Statistics, 1993) 137 p.
Copyright (c) 2015-2020, PyKrige Developers
"""
import warnings
import numpy as np
import scipy.linalg
from scipy.spatial.distance import cdist
from . import core, variogram_models
from .compat_gstools import validate_gstools
from .core import (
P_INV,
_adjust_for_anisotropy,
_find_statistics,
_initialize_variogram_model,
_make_variogram_parameter_list,
)
class OrdinaryKriging:
r"""Convenience class for easy access to 2D Ordinary Kriging.
Parameters
----------
x : array_like
X-coordinates of data points.
y : array_like
Y-coordinates of data points.
z : array-like
Values at data points.
variogram_model : str or GSTools CovModel, optional
Specifies which variogram model to use; may be one of the following:
linear, power, gaussian, spherical, exponential, hole-effect.
Default is linear variogram model. To utilize a custom variogram model,
specify 'custom'; you must also provide variogram_parameters and
variogram_function. Note that the hole-effect model is only technically
correct for one-dimensional problems.
You can also use a
`GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel.
variogram_parameters : list or dict, optional
Parameters that define the specified variogram model. If not provided,
parameters will be automatically calculated using a "soft" L1 norm
minimization scheme. For variogram model parameters provided in a dict,
the required dict keys vary according to the specified variogram
model: ::
# linear
{'slope': slope, 'nugget': nugget}
# power
{'scale': scale, 'exponent': exponent, 'nugget': nugget}
# gaussian, spherical, exponential and hole-effect:
{'sill': s, 'range': r, 'nugget': n}
# OR
{'psill': p, 'range': r, 'nugget': n}
Note that either the full sill or the partial sill
(psill = sill - nugget) can be specified in the dict.
For variogram model parameters provided in a list, the entries
must be as follows: ::
# linear
[slope, nugget]
# power
[scale, exponent, nugget]
# gaussian, spherical, exponential and hole-effect:
[sill, range, nugget]
Note that the full sill (NOT the partial sill) must be specified
in the list format.
For a custom variogram model, the parameters are required, as custom
variogram models will not automatically be fit to the data.
Furthermore, the parameters must be specified in list format, in the
order in which they are used in the callable function (see
variogram_function for more information). The code does not check
that the provided list contains the appropriate number of parameters
for the custom variogram model, so an incorrect parameter list in
such a case will probably trigger an esoteric exception someplace
deep in the code.
NOTE that, while the list format expects the full sill, the code
itself works internally with the partial sill.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. The function must take only two arguments:
first, a list of parameters for the variogram model; second, the
distances at which to calculate the variogram model. The list
provided in variogram_parameters will be passed to the function
as the first argument.
nlags : int, optional
Number of averaging bins for the semivariogram. Default is 6.
weight : bool, optional
Flag that specifies if semivariance at smaller lags should be weighted
more heavily when automatically calculating variogram model.
The routine is currently hard-coded such that the weights are
calculated from a logistic function, so weights at small lags are ~1
and weights at the longest lags are ~0; the center of the logistic
weighting is hard-coded to be at 70% of the distance from the shortest
lag to the largest lag. Setting this parameter to True indicates that
weights will be applied. Default is False. (Kitanidis suggests that the
values at smaller lags are more important in fitting a variogram model,
so the option is provided to enable such weighting.)
anisotropy_scaling : float, optional
Scalar stretching value to take into account anisotropy.
Default is 1 (effectively no stretching).
Scaling is applied in the y-direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle, if anisotropy_angle
is not 0). This parameter has no effect if coordinate_types is
set to 'geographic'.
anisotropy_angle : float, optional
CCW angle (in degrees) by which to rotate coordinate system in
order to take into account anisotropy. Default is 0 (no rotation).
Note that the coordinate system is rotated. This parameter has
no effect if coordinate_types is set to 'geographic'.
verbose : bool, optional
Enables program text output to monitor kriging process.
Default is False (off).
enable_plotting : bool, optional
Enables plotting to display variogram. Default is False (off).
enable_statistics : bool, optional
Default is False
coordinates_type : str, optional
One of 'euclidean' or 'geographic'. Determines if the x and y
coordinates are interpreted as on a plane ('euclidean') or as
coordinates on a sphere ('geographic'). In case of geographic
coordinates, x is interpreted as longitude and y as latitude
coordinates, both given in degree. Longitudes are expected in
[0, 360] and latitudes in [-90, 90]. Default is 'euclidean'.
exact_values : bool, optional
If True, interpolation provides input values at input locations.
If False, interpolation accounts for variance/nugget within input
values at input locations and does not behave as an
exact-interpolator [2]. Note that this only has an effect if
there is variance/nugget present within the input data since it is
interpreted as measurement error. If the nugget is zero, the kriged
field will behave as an exact interpolator.
pseudo_inv : :class:`bool`, optional
Whether the kriging system is solved with the pseudo inverted
kriging matrix. If `True`, this leads to more numerical stability
and redundant points are averaged. But it can take more time.
Default: False
pseudo_inv_type : :class:`str`, optional
Here you can select the algorithm to compute the pseudo-inverse matrix:
* `"pinv"`: use `pinv` from `scipy` which uses `lstsq`
* `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values
Default: `"pinv"`
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
.. [2] N. Cressie, Statistics for spatial data,
(Wiley Series in Probability and Statistics, 1993) 137 p.
"""
eps = 1.0e-10 # Cutoff for comparison to zero
variogram_dict = {
"linear": variogram_models.linear_variogram_model,
"power": variogram_models.power_variogram_model,
"gaussian": variogram_models.gaussian_variogram_model,
"spherical": variogram_models.spherical_variogram_model,
"exponential": variogram_models.exponential_variogram_model,
"hole-effect": variogram_models.hole_effect_variogram_model,
}
def __init__(
self,
x,
y,
z,
variogram_model="linear",
variogram_parameters=None,
variogram_function=None,
nlags=6,
weight=False,
anisotropy_scaling=1.0,
anisotropy_angle=0.0,
verbose=False,
enable_plotting=False,
enable_statistics=False,
coordinates_type="euclidean",
exact_values=True,
pseudo_inv=False,
pseudo_inv_type="pinv",
):
# config the pseudo inverse
self.pseudo_inv = bool(pseudo_inv)
self.pseudo_inv_type = str(pseudo_inv_type)
if self.pseudo_inv_type not in P_INV:
raise ValueError("pseudo inv type not valid: " + str(pseudo_inv_type))
# set up variogram model and parameters...
self.variogram_model = variogram_model
self.model = None
if not isinstance(exact_values, bool):
raise ValueError("exact_values has to be boolean True or False")
self.exact_values = exact_values
self.coordinates_type = coordinates_type
# check if a GSTools covariance model is given
if hasattr(self.variogram_model, "pykrige_kwargs"):
# save the model in the class
self.model = self.variogram_model
validate_gstools(self.model)
if self.model.field_dim == 3:
raise ValueError("GSTools: model dim is not 1 or 2")
# check if coordinate types match
if self.model.latlon and (self.coordinates_type == "euclidean"):
raise ValueError(
"GSTools: latlon models require geographic coordinates"
)
self.variogram_model = "custom"
variogram_function = self.model.pykrige_vario
variogram_parameters = []
anisotropy_scaling = self.model.pykrige_anis
anisotropy_angle = self.model.pykrige_angle
if (
self.variogram_model not in self.variogram_dict.keys()
and self.variogram_model != "custom"
):
raise ValueError(
"Specified variogram model '%s' is not supported." % variogram_model
)
elif self.variogram_model == "custom":
if variogram_function is None or not callable(variogram_function):
raise ValueError(
"Must specify callable function for custom variogram model."
)
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
# Code assumes 1D input arrays of floats. Ensures that any extraneous
# dimensions don't get in the way. Copies are created to avoid any
# problems with referencing the original passed arguments.
# Also, values are forced to be float... in the future, might be worth
# developing complex-number kriging (useful for vector field kriging)
self.X_ORIG = np.atleast_1d(
np.squeeze(np.array(x, copy=True, dtype=np.float64))
)
self.Y_ORIG = np.atleast_1d(
np.squeeze(np.array(y, copy=True, dtype=np.float64))
)
self.Z = np.atleast_1d(np.squeeze(np.array(z, copy=True, dtype=np.float64)))
self.verbose = verbose
self.enable_plotting = enable_plotting
if self.enable_plotting and self.verbose:
print("Plotting Enabled\n")
# adjust for anisotropy... only implemented for euclidean (rectangular)
# coordinates, as anisotropy is ambiguous for geographic coordinates...
if self.coordinates_type == "euclidean":
self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG)) / 2.0
self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG)) / 2.0
self.anisotropy_scaling = anisotropy_scaling
self.anisotropy_angle = anisotropy_angle
if self.verbose:
print("Adjusting data for anisotropy...")
self.X_ADJUSTED, self.Y_ADJUSTED = _adjust_for_anisotropy(
np.vstack((self.X_ORIG, self.Y_ORIG)).T,
[self.XCENTER, self.YCENTER],
[self.anisotropy_scaling],
[self.anisotropy_angle],
).T
elif self.coordinates_type == "geographic":
# Leave everything as is in geographic case.
# May be open to discussion?
if anisotropy_scaling != 1.0:
warnings.warn(
"Anisotropy is not compatible with geographic "
"coordinates. Ignoring user set anisotropy.",
UserWarning,
)
self.XCENTER = 0.0
self.YCENTER = 0.0
self.anisotropy_scaling = 1.0
self.anisotropy_angle = 0.0
self.X_ADJUSTED = self.X_ORIG
self.Y_ADJUSTED = self.Y_ORIG
else:
raise ValueError(
"Only 'euclidean' and 'geographic' are valid "
"values for coordinates-keyword."
)
if self.verbose:
print("Initializing variogram model...")
vp_temp = _make_variogram_parameter_list(
self.variogram_model, variogram_parameters
)
(
self.lags,
self.semivariance,
self.variogram_model_parameters,
) = _initialize_variogram_model(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T,
self.Z,
self.variogram_model,
vp_temp,
self.variogram_function,
nlags,
weight,
self.coordinates_type,
)
if self.verbose:
print("Coordinates type: '%s'" % self.coordinates_type, "\n")
if self.variogram_model == "linear":
print("Using '%s' Variogram Model" % "linear")
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], "\n")
elif self.variogram_model == "power":
print("Using '%s' Variogram Model" % "power")
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
elif self.variogram_model == "custom":
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print(
"Full Sill:",
self.variogram_model_parameters[0]
+ self.variogram_model_parameters[2],
)
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
if enable_statistics:
self.delta, self.sigma, self.epsilon = _find_statistics(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T,
self.Z,
self.variogram_function,
self.variogram_model_parameters,
self.coordinates_type,
self.pseudo_inv,
)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, "\n")
else:
self.delta, self.sigma, self.epsilon, self.Q1, self.Q2, self.cR = [None] * 6
def update_variogram_model(
self,
variogram_model,
variogram_parameters=None,
variogram_function=None,
nlags=6,
weight=False,
anisotropy_scaling=1.0,
anisotropy_angle=0.0,
):
"""Allows user to update variogram type and/or
variogram model parameters.
Parameters
__________
variogram_model : str or GSTools CovModel
May be any of the variogram models listed
above. May also be 'custom', in which case variogram_parameters
and variogram_function must be specified.
You can also use a
`GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel.
variogram_parameters : list or dict, optional
List or dict of
variogram model parameters, as explained above. If not provided,
a best fit model will be calculated as described above.
variogram_function : callable, optional
A callable function that must
be provided if variogram_model is specified as 'custom'.
See above for more information.
nlags : int, optional
Number of averaging bins for the semivariogram.
Default is 6.
weight : boolean, optional
Flag that specifies if semivariance at
smaller lags should be weighted more heavily when automatically
calculating the variogram model. See above for more information.
True indicates that weights will be applied. Default is False.
anisotropy_scaling : float, optional
Scalar stretching value to
take into account anisotropy. Default is 1 (effectively no
stretching). Scaling is applied in the y-direction.
anisotropy_angle : float, optional
CCW angle (in degrees) by
which to rotate coordinate system in order to take into
account anisotropy. Default is 0 (no rotation).
"""
# set up variogram model and parameters...
self.variogram_model = variogram_model
self.model = None
# check if a GSTools covariance model is given
if hasattr(self.variogram_model, "pykrige_kwargs"):
# save the model in the class
self.model = self.variogram_model
validate_gstools(self.model)
if self.model.field_dim == 3:
raise ValueError("GSTools: model dim is not 1 or 2")
# check if coordinate types match
if self.model.latlon and (self.coordinates_type == "euclidean"):
raise ValueError(
"GSTools: latlon models require geographic coordinates"
)
self.variogram_model = "custom"
variogram_function = self.model.pykrige_vario
variogram_parameters = []
anisotropy_scaling = self.model.pykrige_anis
anisotropy_angle = self.model.pykrige_angle
if (
self.variogram_model not in self.variogram_dict.keys()
and self.variogram_model != "custom"
):
raise ValueError(
"Specified variogram model '%s' is not supported." % variogram_model
)
elif self.variogram_model == "custom":
if variogram_function is None or not callable(variogram_function):
raise ValueError(
"Must specify callable function for custom variogram model."
)
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if (
anisotropy_scaling != self.anisotropy_scaling
or anisotropy_angle != self.anisotropy_angle
):
if self.coordinates_type == "euclidean":
if self.verbose:
print("Adjusting data for anisotropy...")
self.anisotropy_scaling = anisotropy_scaling
self.anisotropy_angle = anisotropy_angle
self.X_ADJUSTED, self.Y_ADJUSTED = _adjust_for_anisotropy(
np.vstack((self.X_ORIG, self.Y_ORIG)).T,
[self.XCENTER, self.YCENTER],
[self.anisotropy_scaling],
[self.anisotropy_angle],
).T
elif self.coordinates_type == "geographic":
if anisotropy_scaling != 1.0:
warnings.warn(
"Anisotropy is not compatible with geographic"
" coordinates. Ignoring user set anisotropy.",
UserWarning,
)
self.anisotropy_scaling = 1.0
self.anisotropy_angle = 0.0
self.X_ADJUSTED = self.X_ORIG
self.Y_ADJUSTED = self.Y_ORIG
if self.verbose:
print("Updating variogram mode...")
# See note above about the 'use_psill' kwarg...
vp_temp = _make_variogram_parameter_list(
self.variogram_model, variogram_parameters
)
(
self.lags,
self.semivariance,
self.variogram_model_parameters,
) = _initialize_variogram_model(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T,
self.Z,
self.variogram_model,
vp_temp,
self.variogram_function,
nlags,
weight,
self.coordinates_type,
)
if self.verbose:
print("Coordinates type: '%s'" % self.coordinates_type, "\n")
if self.variogram_model == "linear":
print("Using '%s' Variogram Model" % "linear")
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], "\n")
elif self.variogram_model == "power":
print("Using '%s' Variogram Model" % "power")
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
elif self.variogram_model == "custom":
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print(
"Full Sill:",
self.variogram_model_parameters[0]
+ self.variogram_model_parameters[2],
)
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = _find_statistics(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T,
self.Z,
self.variogram_function,
self.variogram_model_parameters,
self.coordinates_type,
self.pseudo_inv,
)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, "\n")
def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, "r*")
ax.plot(
self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags),
"k-",
)
plt.show()
def get_variogram_points(self):
"""Returns both the lags and the variogram function evaluated at each
of them.
The evaluation of the variogram function and the lags are produced
internally. This method is convenient when the user wants to access to
the lags and the resulting variogram (according to the model provided)
for further analysis.
Returns
-------
(tuple) tuple containing:
lags (array) - the lags at which the variogram was evaluated
variogram (array) - the variogram function evaluated at the lags
"""
return (
self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags),
)
def switch_verbose(self):
"""Allows user to switch code talk-back on/off. Takes no arguments."""
self.verbose = not self.verbose
def switch_plotting(self):
"""Allows user to switch plot display on/off. Takes no arguments."""
self.enable_plotting = not self.enable_plotting
def get_epsilon_residuals(self):
"""Returns the epsilon residuals for the variogram fit."""
return self.epsilon
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c="k", marker="*")
ax.axhline(y=0.0)
plt.show()
def get_statistics(self):
"""Returns the Q1, Q2, and cR statistics for the variogram fit
(in that order). No arguments.
"""
return self.Q1, self.Q2, self.cR
def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
def _get_kriging_matrix(self, n):
"""Assembles the kriging matrix."""
if self.coordinates_type == "euclidean":
xy = np.concatenate(
(self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1
)
d = cdist(xy, xy, "euclidean")
elif self.coordinates_type == "geographic":
d = core.great_circle_distance(
self.X_ADJUSTED[:, np.newaxis],
self.Y_ADJUSTED[:, np.newaxis],
self.X_ADJUSTED,
self.Y_ADJUSTED,
)
a = np.zeros((n + 1, n + 1))
a[:n, :n] = -self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
return a
def _exec_vector(self, a, bd, mask):
"""Solves the kriging system as a vectorized operation. This method
can take a lot of memory for large grids and/or large datasets."""
npt = bd.shape[0]
n = self.X_ADJUSTED.shape[0]
zero_index = None
zero_value = False
# use the desired method to invert the kriging matrix
if self.pseudo_inv:
a_inv = P_INV[self.pseudo_inv_type](a)
else:
a_inv = scipy.linalg.inv(a)
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
b = np.zeros((npt, n + 1, 1))
b[:, :n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], zero_index[1], 0] = 0.0
b[:, n, 0] = 1.0
if (~mask).any():
mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n + 1, axis=1)
b = np.ma.array(b, mask=mask_b)
x = np.dot(a_inv, b.reshape((npt, n + 1)).T).reshape((1, n + 1, npt)).T
zvalues = np.sum(x[:, :n, 0] * self.Z, axis=1)
sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)
return zvalues, sigmasq
def _exec_loop(self, a, bd_all, mask):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
zvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
# use the desired method to invert the kriging matrix
if self.pseudo_inv:
a_inv = P_INV[self.pseudo_inv_type](a)
else:
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[
0
]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_index = None
zero_value = False
b = np.zeros((n + 1, 1))
b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = np.dot(a_inv, b)
zvalues[j] = np.sum(x[:n, 0] * self.Z)
sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])
return zvalues, sigmasq
def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
import scipy.linalg.lapack
npt = bd_all.shape[0]
n = bd_idx.shape[1]
zvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
for i in np.nonzero(~mask)[
0
]: # Note that this is the same thing as range(npt) if mask is not defined,
b_selector = bd_idx[i] # otherwise it takes the non-masked elements.
bd = bd_all[i]
a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1])))
a = a_all[a_selector[:, None], a_selector]
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_index = None
zero_value = False
b = np.zeros((n + 1, 1))
b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = scipy.linalg.solve(a, b)
zvalues[i] = x[:n, 0].dot(self.Z[b_selector])
sigmasq[i] = -x[:, 0].dot(b[:, 0])
return zvalues, sigmasq
def execute(
self,
style,
xpoints,
ypoints,
mask=None,
backend="vectorized",
n_closest_points=None,
):
"""Calculates a kriged grid and the associated variance.
Parameters
----------
style : str
Specifies how to treat input kriging points. Specifying 'grid'
treats xpoints and ypoints as two arrays of x and y coordinates
that define a rectangular grid. Specifying 'points' treats
xpoints and ypoints as two arrays that provide coordinate pairs
at which to solve the kriging system. Specifying 'masked'
treats xpoints and ypoints as two arrays of x and y coordinates
that define a rectangular grid and uses mask to only evaluate
specific points in the grid.
xpoints : array_like, shape (N,) or (N, 1)
If style is specific as 'grid' or 'masked',
x-coordinates of MxN grid. If style is specified as 'points',
x-coordinates of specific points at which to solve
kriging system.
ypoints : array_like, shape (M,) or (M, 1)
If style is specified as 'grid' or 'masked',
y-coordinates of MxN grid. If style is specified as 'points',
y-coordinates of specific points at which to solve kriging
system. Note that in this case, xpoints and ypoints must have
the same dimensions (i.e., M = N).
mask : bool, array_like, shape (M, N), optional
Specifies the points in the rectangular grid defined
by xpoints and ypoints that are to be excluded in the
kriging calculations. Must be provided if style is specified
as 'masked'. False indicates that the point should not be
masked, so the kriging system will be solved at the point.
True indicates that the point should be masked, so the kriging
system should will not be solved at the point.
backend : str, optional
Specifies which approach to use in kriging.
Specifying 'vectorized' will solve the entire kriging problem
at once in a vectorized operation. This approach is faster but
also can consume a significant amount of memory for large grids
and/or large datasets. Specifying 'loop' will loop through each
point at which the kriging system is to be solved.
This approach is slower but also less memory-intensive.
Specifying 'C' will utilize a loop in Cython.
Default is 'vectorized'.
n_closest_points : int, optional
For kriging with a moving window, specifies the number of
nearby points to use in the calculation. This can speed up the
calculation for large datasets, but should be used
with caution. As Kitanidis notes, kriging with a moving window
can produce unexpected oddities if the variogram model
is not carefully chosen.
Returns
-------
zvalues : ndarray, shape (M, N) or (N, 1)
Z-values of specified grid or at the specified set of points.
If style was specified as 'masked', zvalues will
be a numpy masked array.
sigmasq : ndarray, shape (M, N) or (N, 1)
Variance at specified grid points or at the specified
set of points. If style was specified as 'masked', sigmasq
will be a numpy masked array.
"""
if self.verbose:
print("Executing Ordinary Kriging...\n")
if style != "grid" and style != "masked" and style != "points":
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
if n_closest_points is not None and n_closest_points <= 1:
# If this is not checked, nondescriptive errors emerge
# later in the code.
raise ValueError("n_closest_points has to be at least two!")
xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True)))
ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True)))
n = self.X_ADJUSTED.shape[0]
nx = xpts.size
ny = ypts.size
a = self._get_kriging_matrix(n)
if style in ["grid", "masked"]:
if style == "masked":
if mask is None:
raise IOError(
"Must specify boolean masking array when style is 'masked'."
)
if mask.shape[0] != ny or mask.shape[1] != nx:
if mask.shape[0] == nx and mask.shape[1] == ny:
mask = mask.T
else:
raise ValueError(
"Mask dimensions do not match specified grid dimensions."
)
mask = mask.flatten()
npt = ny * nx
grid_x, grid_y = np.meshgrid(xpts, ypts)
xpts = grid_x.flatten()
ypts = grid_y.flatten()
elif style == "points":
if xpts.size != ypts.size:
raise ValueError(
"xpoints and ypoints must have "
"same dimensions when treated as "
"listing discrete points."
)
npt = nx
else:
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
if self.coordinates_type == "euclidean":
xpts, ypts = _adjust_for_anisotropy(
np.vstack((xpts, ypts)).T,
[self.XCENTER, self.YCENTER],
[self.anisotropy_scaling],
[self.anisotropy_angle],
).T
# Prepare for cdist:
xy_data = np.concatenate(
(self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1
)
xy_points = np.concatenate(
(xpts[:, np.newaxis], ypts[:, np.newaxis]), axis=1
)
elif self.coordinates_type == "geographic":
# In spherical coordinates, we do not correct for anisotropy.
# Also, we don't use scipy.spatial.cdist, so we do not have to
# format the input data accordingly.
pass
if style != "masked":
mask = np.zeros(npt, dtype="bool")
c_pars = None
if backend == "C":
try:
from .lib.cok import _c_exec_loop, _c_exec_loop_moving_window
except ImportError:
print(
"Warning: failed to load Cython extensions.\n"
" See https://github.com/GeoStat-Framework/PyKrige/issues/8 \n"
" Falling back to a pure python backend..."
)
backend = "loop"
except:
raise RuntimeError("Unknown error in trying to load Cython extension.")
c_pars = {
key: getattr(self, key)
for key in [
"Z",
"eps",
"variogram_model_parameters",
"variogram_function",
"exact_values",
"pseudo_inv",
"pseudo_inv_type",
]
}
if n_closest_points is not None:
if self.coordinates_type == "geographic":
# To make use of the KDTree, we have to convert the
# spherical coordinates into three dimensional Euclidean
# coordinates, since the standard KDTree cannot handle
# the periodicity.
# Do the conversion just for the step involving the KDTree:
lon_d = self.X_ADJUSTED[:, np.newaxis] * np.pi / 180.0
lat_d = self.Y_ADJUSTED[:, np.newaxis] * np.pi / 180.0
xy_data = np.concatenate(
(
np.cos(lon_d) * np.cos(lat_d),
np.sin(lon_d) * np.cos(lat_d),
np.sin(lat_d),
),
axis=1,
)
lon_p = xpts[:, np.newaxis] * np.pi / 180.0
lat_p = ypts[:, np.newaxis] * np.pi / 180.0
xy_points = np.concatenate(
(
np.cos(lon_p) * np.cos(lat_p),
np.sin(lon_p) * np.cos(lat_p),
np.sin(lat_p),
),
axis=1,
)
from scipy.spatial import cKDTree
tree = cKDTree(xy_data)
bd, bd_idx = tree.query(xy_points, k=n_closest_points, eps=0.0)
if self.coordinates_type == "geographic":
# Between the nearest neighbours from Euclidean search,
# calculate the great circle distance using the standard method:
x_points = np.tile(xpts[:, np.newaxis], (1, n_closest_points))
y_points = np.tile(ypts[:, np.newaxis], (1, n_closest_points))
bd = core.great_circle_distance(
x_points, y_points, self.X_ADJUSTED[bd_idx], self.Y_ADJUSTED[bd_idx]
)
if backend == "loop":
zvalues, sigmasq = self._exec_loop_moving_window(a, bd, mask, bd_idx)
elif backend == "C":
zvalues, sigmasq = _c_exec_loop_moving_window(
a,
bd,
mask.astype("int8"),
bd_idx.astype(int),
self.X_ADJUSTED.shape[0],
c_pars,
)
else:
raise ValueError(
"Specified backend {} for a moving window "
"is not supported.".format(backend)
)
else:
if self.coordinates_type == "euclidean":
bd = cdist(xy_points, xy_data, "euclidean")
elif self.coordinates_type == "geographic":
bd = core.great_circle_distance(
xpts[:, np.newaxis],
ypts[:, np.newaxis],
self.X_ADJUSTED,
self.Y_ADJUSTED,
)
if backend == "vectorized":
zvalues, sigmasq = self._exec_vector(a, bd, mask)
elif backend == "loop":
zvalues, sigmasq = self._exec_loop(a, bd, mask)
elif backend == "C":
zvalues, sigmasq = _c_exec_loop(
a, bd, mask.astype("int8"), self.X_ADJUSTED.shape[0], c_pars
)
else:
raise ValueError(
"Specified backend {} is not supported for "
"2D ordinary kriging.".format(backend)
)
if style == "masked":
zvalues = np.ma.array(zvalues, mask=mask)
sigmasq = np.ma.array(sigmasq, mask=mask)
if style in ["masked", "grid"]:
zvalues = zvalues.reshape((ny, nx))
sigmasq = sigmasq.reshape((ny, nx))
return zvalues, sigmasq
| 42,554 | 40.679726 | 88 | py |
PyKrige | PyKrige-main/src/pykrige/compat_gstools.py | # coding: utf-8
# pylint: disable= invalid-name, unused-import
"""For GSTools compatibility."""
# gstools
try:
import gstools as gs
GSTOOLS_INSTALLED = True
GSTOOLS_VERSION = list(map(int, gs.__version__.split(".")[:2]))
except ImportError:
gs = None
GSTOOLS_INSTALLED = False
GSTOOLS_VERSION = None
class GSToolsException(Exception):
"""Exception for GSTools."""
def validate_gstools(model):
"""Validate presence of GSTools."""
if not GSTOOLS_INSTALLED:
raise GSToolsException(
"GSTools needs to be installed in order to use their CovModel class."
)
if not isinstance(model, gs.CovModel):
raise GSToolsException(
"GSTools: given variogram model is not a CovModel instance."
)
if GSTOOLS_VERSION < [1, 3]:
raise GSToolsException("GSTools: need at least GSTools v1.3.")
if model.latlon and GSTOOLS_VERSION < [1, 4]:
raise GSToolsException(
"GSTools: latlon models in PyKrige are only supported from GSTools v1.4."
)
| 1,062 | 27.72973 | 85 | py |
PyKrige | PyKrige-main/src/pykrige/uk.py | # coding: utf-8
"""
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
[email protected]
Summary
-------
Contains class UniversalKriging, provides greater control over 2D kriging by
utilizing drift terms.
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
.. [2] N. Cressie, Statistics for spatial data,
(Wiley Series in Probability and Statistics, 1993) 137 p.
Copyright (c) 2015-2020, PyKrige Developers
"""
import warnings
import numpy as np
import scipy.linalg
from scipy.spatial.distance import cdist
from . import core, variogram_models
from .compat_gstools import validate_gstools
from .core import (
P_INV,
_adjust_for_anisotropy,
_find_statistics,
_initialize_variogram_model,
_make_variogram_parameter_list,
)
class UniversalKriging:
"""Provides greater control over 2D kriging by utilizing drift terms.
Parameters
----------
x : array_like
X-coordinates of data points.
y : array_like
Y-coordinates of data points.
z : array_like
Values at data points.
variogram_model: str or GSTools CovModel, optional
Specified which variogram model to use; may be one of the following:
linear, power, gaussian, spherical, exponential, hole-effect.
Default is linear variogram model. To utilize a custom variogram model,
specify 'custom'; you must also provide variogram_parameters and
variogram_function. Note that the hole-effect model is only
technically correct for one-dimensional problems.
You can also use a
`GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel.
variogram_parameters : list or dict, optional
Parameters that define the specified variogram model. If not provided,
parameters will be automatically calculated using a "soft" L1 norm
minimization scheme. For variogram model parameters provided in a dict,
the required dict keys vary according to the specified variogram
model: ::
# linear
{'slope': slope, 'nugget': nugget}
# power
{'scale': scale, 'exponent': exponent, 'nugget': nugget}
# gaussian, spherical, exponential and hole-effect:
{'sill': s, 'range': r, 'nugget': n}
# OR
{'psill': p, 'range': r, 'nugget': n}
Note that either the full sill or the partial sill
(psill = sill - nugget) can be specified in the dict.
For variogram model parameters provided in a list, the entries
must be as follows: ::
# linear
[slope, nugget]
# power
[scale, exponent, nugget]
# gaussian, spherical, exponential and hole-effect:
[sill, range, nugget]
Note that the full sill (NOT the partial sill) must be specified
in the list format.
For a custom variogram model, the parameters are required, as custom
variogram models will not automatically be fit to the data.
Furthermore, the parameters must be specified in list format, in the
order in which they are used in the callable function (see
variogram_function for more information). The code does not check
that the provided list contains the appropriate number of parameters
for the custom variogram model, so an incorrect parameter list in
such a case will probably trigger an esoteric exception someplace
deep in the code.
NOTE that, while the list format expects the full sill, the code
itself works internally with the partial sill.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. The function must take only two arguments:
first, a list of parameters for the variogram model; second,
the distances at which to calculate the variogram model. The list
provided in variogram_parameters will be passed to the function
as the first argument.
nlags : int, optional
Number of averaging bins for the semivariogram. Default is 6.
weight : bool, optional
Flag that specifies if semivariance at smaller lags should be weighted
more heavily when automatically calculating variogram model.
The routine is currently hard-coded such that the weights are
calculated from a logistic function, so weights at small lags are ~1
and weights at the longest lags are ~0; the center of the logistic
weighting is hard-coded to be at 70% of the distance from the shortest
lag to the largest lag. Setting this parameter to True indicates that
weights will be applied. Default is False.
(Kitanidis suggests that the values at smaller lags are more
important in fitting a variogram model, so the option is provided
to enable such weighting.)
anisotropy_scaling : float, optional
Scalar stretching value to take into account anisotropy.
Default is 1 (effectively no stretching).
Scaling is applied in the y-direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle, if anisotropy_angle
is not 0).
anisotropy_angle : float, optional
CCW angle (in degrees) by which to rotate coordinate system in order
to take into account anisotropy. Default is 0 (no rotation).
Note that the coordinate system is rotated.
drift_terms : list of strings, optional
List of drift terms to include in universal kriging. Supported drift
terms are currently 'regional_linear', 'point_log', 'external_Z',
'specified', and 'functional'.
point_drift : array_like, optional
Array-like object that contains the coordinates and strengths of the
point-logarithmic drift terms. Array shape must be (N, 3), where N is
the number of point drift terms. First column (index 0) must contain
x-coordinates, second column (index 1) must contain y-coordinates,
and third column (index 2) must contain the strengths of each
point term. Strengths are relative, so only the relation of the values
to each other matters. Note that the code will appropriately deal with
point-logarithmic terms that are at the same coordinates as an
evaluation point or data point, but Python will still kick out a
warning message that an ln(0) has been encountered. If the problem
involves anisotropy, the well coordinates will be adjusted and the
drift values will be calculated in the adjusted data frame.
external_drift : array_like, optional
Gridded data used for the external Z scalar drift term.
Must be shape (M, N), where M is in the y-direction and N is in the
x-direction. Grid spacing does not need to be constant. If grid spacing
is not constant, must specify the grid cell sizes. If the problem
involves anisotropy, the external drift values are extracted based on
the pre-adjusted coordinates (i.e., the original coordinate system).
external_drift_x : array_like, optional
X-coordinates for gridded external Z-scalar data. Must be shape (M,)
or (M, 1), where M is the number of grid cells in the x-direction.
The coordinate is treated as the center of the cell.
external_drift_y : array_like, optional
Y-coordinates for gridded external Z-scalar data. Must be shape (N,)
or (N, 1), where N is the number of grid cells in the y-direction.
The coordinate is treated as the center of the cell.
specified_drift : list of array-like objects, optional
List of arrays that contain the drift values at data points.
The arrays must be shape (N,) or (N, 1), where N is the number of
data points. Any number of specified-drift terms may be used.
functional_drift : list of callable objects, optional
List of callable functions that will be used to evaluate drift terms.
The function must be a function of only the two spatial coordinates
and must return a single value for each coordinate pair.
It must be set up to be called with only two arguments, first an array
of x values and second an array of y values. If the problem involves
anisotropy, the drift values are calculated in the adjusted data frame.
verbose : bool, optional
Enables program text output to monitor kriging process.
Default is False (off).
enable_plotting : boolean, optional
Enables plotting to display variogram. Default is False (off).
exact_values : bool, optional
If True, interpolation provides input values at input locations.
If False, interpolation accounts for variance/nugget within input
values at input locations and does not behave as an
exact-interpolator [2]. Note that this only has an effect if
there is variance/nugget present within the input data since it is
interpreted as measurement error. If the nugget is zero, the kriged
field will behave as an exact interpolator.
pseudo_inv : :class:`bool`, optional
Whether the kriging system is solved with the pseudo inverted
kriging matrix. If `True`, this leads to more numerical stability
and redundant points are averaged. But it can take more time.
Default: False
pseudo_inv_type : :class:`str`, optional
Here you can select the algorithm to compute the pseudo-inverse matrix:
* `"pinv"`: use `pinv` from `scipy` which uses `lstsq`
* `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values
Default: `"pinv"`
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
.. [2] N. Cressie, Statistics for spatial data,
(Wiley Series in Probability and Statistics, 1993) 137 p.
"""
UNBIAS = True # This can be changed to remove the unbiasedness condition
# Really for testing purposes only...
eps = 1.0e-10 # Cutoff for comparison to zero
variogram_dict = {
"linear": variogram_models.linear_variogram_model,
"power": variogram_models.power_variogram_model,
"gaussian": variogram_models.gaussian_variogram_model,
"spherical": variogram_models.spherical_variogram_model,
"exponential": variogram_models.exponential_variogram_model,
"hole-effect": variogram_models.hole_effect_variogram_model,
}
def __init__(
self,
x,
y,
z,
variogram_model="linear",
variogram_parameters=None,
variogram_function=None,
nlags=6,
weight=False,
anisotropy_scaling=1.0,
anisotropy_angle=0.0,
drift_terms=None,
point_drift=None,
external_drift=None,
external_drift_x=None,
external_drift_y=None,
specified_drift=None,
functional_drift=None,
verbose=False,
enable_plotting=False,
exact_values=True,
pseudo_inv=False,
pseudo_inv_type="pinv",
):
# config the pseudo inverse
self.pseudo_inv = bool(pseudo_inv)
self.pseudo_inv_type = str(pseudo_inv_type)
if self.pseudo_inv_type not in P_INV:
raise ValueError("pseudo inv type not valid: " + str(pseudo_inv_type))
# Deal with mutable default argument
if drift_terms is None:
drift_terms = []
if specified_drift is None:
specified_drift = []
if functional_drift is None:
functional_drift = []
# set up variogram model and parameters...
self.variogram_model = variogram_model
self.model = None
if not isinstance(exact_values, bool):
raise ValueError("exact_values has to be boolean True or False")
self.exact_values = exact_values
# check if a GSTools covariance model is given
if hasattr(self.variogram_model, "pykrige_kwargs"):
# save the model in the class
self.model = self.variogram_model
validate_gstools(self.model)
if self.model.field_dim == 3:
raise ValueError("GSTools: model dim is not 1 or 2")
if self.model.latlon:
raise ValueError(
"GSTools: latlon models not supported for universal kriging"
)
self.variogram_model = "custom"
variogram_function = self.model.pykrige_vario
variogram_parameters = []
anisotropy_scaling = self.model.pykrige_anis
anisotropy_angle = self.model.pykrige_angle
if (
self.variogram_model not in self.variogram_dict.keys()
and self.variogram_model != "custom"
):
raise ValueError(
"Specified variogram model '%s' is not supported." % variogram_model
)
elif self.variogram_model == "custom":
if variogram_function is None or not callable(variogram_function):
raise ValueError(
"Must specify callable function for custom variogram model."
)
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
# Code assumes 1D input arrays. Ensures that any extraneous dimensions
# don't get in the way. Copies are created to avoid any problems with
# referencing the original passed arguments.
self.X_ORIG = np.atleast_1d(
np.squeeze(np.array(x, copy=True, dtype=np.float64))
)
self.Y_ORIG = np.atleast_1d(
np.squeeze(np.array(y, copy=True, dtype=np.float64))
)
self.Z = np.atleast_1d(np.squeeze(np.array(z, copy=True, dtype=np.float64)))
self.verbose = verbose
self.enable_plotting = enable_plotting
if self.enable_plotting and self.verbose:
print("Plotting Enabled\n")
# adjust for anisotropy...
self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG)) / 2.0
self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG)) / 2.0
self.anisotropy_scaling = anisotropy_scaling
self.anisotropy_angle = anisotropy_angle
if self.verbose:
print("Adjusting data for anisotropy...")
self.X_ADJUSTED, self.Y_ADJUSTED = _adjust_for_anisotropy(
np.vstack((self.X_ORIG, self.Y_ORIG)).T,
[self.XCENTER, self.YCENTER],
[self.anisotropy_scaling],
[self.anisotropy_angle],
).T
if self.verbose:
print("Initializing variogram model...")
# see comment in ok.py about 'use_psill' kwarg...
vp_temp = _make_variogram_parameter_list(
self.variogram_model, variogram_parameters
)
(
self.lags,
self.semivariance,
self.variogram_model_parameters,
) = _initialize_variogram_model(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T,
self.Z,
self.variogram_model,
vp_temp,
self.variogram_function,
nlags,
weight,
"euclidean",
)
# TODO extend geographic capabilities to UK...
if self.verbose:
if self.variogram_model == "linear":
print("Using '%s' Variogram Model" % "linear")
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], "\n")
elif self.variogram_model == "power":
print("Using '%s' Variogram Model" % "power")
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
elif self.variogram_model == "custom":
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print(
"Full Sill:",
self.variogram_model_parameters[0]
+ self.variogram_model_parameters[2],
)
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = _find_statistics(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T,
self.Z,
self.variogram_function,
self.variogram_model_parameters,
"euclidean",
self.pseudo_inv,
)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, "\n")
if self.verbose:
print("Initializing drift terms...")
# Note that the regional linear drift values will be based
# on the adjusted coordinate system, Really, it doesn't actually
# matter which coordinate system is used here.
if "regional_linear" in drift_terms:
self.regional_linear_drift = True
if self.verbose:
print("Implementing regional linear drift.")
else:
self.regional_linear_drift = False
# External Z scalars are extracted using the original
# (unadjusted) coordinates.
if "external_Z" in drift_terms:
if external_drift is None:
raise ValueError("Must specify external Z drift terms.")
if external_drift_x is None or external_drift_y is None:
raise ValueError("Must specify coordinates of external Z drift terms.")
self.external_Z_drift = True
if (
external_drift.shape[0] != external_drift_y.shape[0]
or external_drift.shape[1] != external_drift_x.shape[0]
):
if (
external_drift.shape[0] == external_drift_x.shape[0]
and external_drift.shape[1] == external_drift_y.shape[0]
):
self.external_Z_array = np.array(external_drift.T)
else:
raise ValueError(
"External drift dimensions do not match "
"provided x- and y-coordinate dimensions."
)
else:
self.external_Z_array = np.array(external_drift)
self.external_Z_array_x = np.array(external_drift_x).flatten()
self.external_Z_array_y = np.array(external_drift_y).flatten()
self.z_scalars = self._calculate_data_point_zscalars(
self.X_ORIG, self.Y_ORIG
)
if self.verbose:
print("Implementing external Z drift.")
else:
self.external_Z_drift = False
# Well coordinates are rotated into adjusted coordinate frame.
if "point_log" in drift_terms:
if point_drift is None:
raise ValueError(
"Must specify location(s) and strength(s) of point drift terms."
)
self.point_log_drift = True
point_log = np.atleast_2d(np.squeeze(np.array(point_drift, copy=True)))
self.point_log_array = np.zeros(point_log.shape)
self.point_log_array[:, 2] = point_log[:, 2]
self.point_log_array[:, :2] = _adjust_for_anisotropy(
np.vstack((point_log[:, 0], point_log[:, 1])).T,
[self.XCENTER, self.YCENTER],
[self.anisotropy_scaling],
[self.anisotropy_angle],
)
if self.verbose:
print(
"Implementing external point-logarithmic drift; "
"number of points =",
self.point_log_array.shape[0],
"\n",
)
else:
self.point_log_drift = False
if "specified" in drift_terms:
if type(specified_drift) is not list:
raise TypeError(
"Arrays for specified drift terms must be "
"encapsulated in a list."
)
if len(specified_drift) == 0:
raise ValueError(
"Must provide at least one drift-value array "
"when using the 'specified' drift capability."
)
self.specified_drift = True
self.specified_drift_data_arrays = []
for term in specified_drift:
specified = np.squeeze(np.array(term, copy=True))
if specified.size != self.X_ORIG.size:
raise ValueError(
"Must specify the drift values for each "
"data point when using the 'specified' "
"drift capability."
)
self.specified_drift_data_arrays.append(specified)
else:
self.specified_drift = False
# The provided callable functions will be evaluated using
# the adjusted coordinates.
if "functional" in drift_terms:
if type(functional_drift) is not list:
raise TypeError(
"Callables for functional drift terms must "
"be encapsulated in a list."
)
if len(functional_drift) == 0:
raise ValueError(
"Must provide at least one callable object "
"when using the 'functional' drift capability."
)
self.functional_drift = True
self.functional_drift_terms = functional_drift
else:
self.functional_drift = False
def _calculate_data_point_zscalars(self, x, y, type_="array"):
"""Determines the Z-scalar values at the specified coordinates
for use when setting up the kriging matrix. Uses bilinear
interpolation.
Currently, the Z scalar values are extracted from the input Z grid
exactly at the specified coordinates. This means that if the Z grid
resolution is finer than the resolution of the desired kriged grid,
there is no averaging of the scalar values to return an average
Z value for that cell in the kriged grid. Rather, the exact Z value
right at the coordinate is used."""
if type_ == "scalar":
nx = 1
ny = 1
z_scalars = None
else:
if x.ndim == 1:
nx = x.shape[0]
ny = 1
else:
ny = x.shape[0]
nx = x.shape[1]
z_scalars = np.zeros(x.shape)
for m in range(ny):
for n in range(nx):
if type_ == "scalar":
xn = x
yn = y
else:
if x.ndim == 1:
xn = x[n]
yn = y[n]
else:
xn = x[m, n]
yn = y[m, n]
if (
xn > np.amax(self.external_Z_array_x)
or xn < np.amin(self.external_Z_array_x)
or yn > np.amax(self.external_Z_array_y)
or yn < np.amin(self.external_Z_array_y)
):
raise ValueError(
"External drift array does not cover "
"specified kriging domain."
)
# bilinear interpolation
external_x2_index = np.amin(np.where(self.external_Z_array_x >= xn)[0])
external_x1_index = np.amax(np.where(self.external_Z_array_x <= xn)[0])
external_y2_index = np.amin(np.where(self.external_Z_array_y >= yn)[0])
external_y1_index = np.amax(np.where(self.external_Z_array_y <= yn)[0])
if external_y1_index == external_y2_index:
if external_x1_index == external_x2_index:
z = self.external_Z_array[external_y1_index, external_x1_index]
else:
z = (
self.external_Z_array[external_y1_index, external_x1_index]
* (self.external_Z_array_x[external_x2_index] - xn)
+ self.external_Z_array[
external_y2_index, external_x2_index
]
* (xn - self.external_Z_array_x[external_x1_index])
) / (
self.external_Z_array_x[external_x2_index]
- self.external_Z_array_x[external_x1_index]
)
elif external_x1_index == external_x2_index:
if external_y1_index == external_y2_index:
z = self.external_Z_array[external_y1_index, external_x1_index]
else:
z = (
self.external_Z_array[external_y1_index, external_x1_index]
* (self.external_Z_array_y[external_y2_index] - yn)
+ self.external_Z_array[
external_y2_index, external_x2_index
]
* (yn - self.external_Z_array_y[external_y1_index])
) / (
self.external_Z_array_y[external_y2_index]
- self.external_Z_array_y[external_y1_index]
)
else:
z = (
self.external_Z_array[external_y1_index, external_x1_index]
* (self.external_Z_array_x[external_x2_index] - xn)
* (self.external_Z_array_y[external_y2_index] - yn)
+ self.external_Z_array[external_y1_index, external_x2_index]
* (xn - self.external_Z_array_x[external_x1_index])
* (self.external_Z_array_y[external_y2_index] - yn)
+ self.external_Z_array[external_y2_index, external_x1_index]
* (self.external_Z_array_x[external_x2_index] - xn)
* (yn - self.external_Z_array_y[external_y1_index])
+ self.external_Z_array[external_y2_index, external_x2_index]
* (xn - self.external_Z_array_x[external_x1_index])
* (yn - self.external_Z_array_y[external_y1_index])
) / (
(
self.external_Z_array_x[external_x2_index]
- self.external_Z_array_x[external_x1_index]
)
* (
self.external_Z_array_y[external_y2_index]
- self.external_Z_array_y[external_y1_index]
)
)
if type_ == "scalar":
z_scalars = z
else:
if z_scalars.ndim == 1:
z_scalars[n] = z
else:
z_scalars[m, n] = z
return z_scalars
def update_variogram_model(
self,
variogram_model,
variogram_parameters=None,
variogram_function=None,
nlags=6,
weight=False,
anisotropy_scaling=1.0,
anisotropy_angle=0.0,
):
"""Allows user to update variogram type and/or
variogram model parameters.
Parameters
----------
variogram_model : str or GSTools CovModel
May be any of the variogram models listed above.
May also be 'custom', in which case variogram_parameters and
variogram_function must be specified.
You can also use a
`GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel.
variogram_parameters : list or dict, optional
List or dict of variogram model parameters, as explained above.
If not provided, a best fit model will be calculated as
described above.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. See above for more information.
nlags : int, optional
Number of averaging bins for the semivariogram. Defualt is 6.
weight : boolean, optional
Flag that specifies if semivariance at smaller lags should be
weighted more heavily when automatically calculating the
variogram model. See above for more information. True indicates
that weights will be applied. Default is False.
anisotropy_scaling : float, optional
Scalar stretching value to take into account anisotropy.
Default is 1 (effectively no stretching).
Scaling is applied in the y-direction.
anisotropy_angle : float, optional
CCW angle (in degrees) by which to rotate coordinate system in
order to take into account anisotropy. Default is 0 (no rotation).
"""
# set up variogram model and parameters...
self.variogram_model = variogram_model
self.model = None
# check if a GSTools covariance model is given
if hasattr(self.variogram_model, "pykrige_kwargs"):
# save the model in the class
self.model = self.variogram_model
validate_gstools(self.model)
if self.model.field_dim == 3:
raise ValueError("GSTools: model dim is not 1 or 2")
if self.model.latlon:
raise ValueError(
"GSTools: latlon models not supported for universal kriging"
)
self.variogram_model = "custom"
variogram_function = self.model.pykrige_vario
variogram_parameters = []
anisotropy_scaling = self.model.pykrige_anis
anisotropy_angle = self.model.pykrige_angle
if (
self.variogram_model not in self.variogram_dict.keys()
and self.variogram_model != "custom"
):
raise ValueError(
"Specified variogram model '%s' is not supported." % variogram_model
)
elif self.variogram_model == "custom":
if variogram_function is None or not callable(variogram_function):
raise ValueError(
"Must specify callable function for custom variogram model."
)
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if (
anisotropy_scaling != self.anisotropy_scaling
or anisotropy_angle != self.anisotropy_angle
):
if self.verbose:
print("Adjusting data for anisotropy...")
self.anisotropy_scaling = anisotropy_scaling
self.anisotropy_angle = anisotropy_angle
self.X_ADJUSTED, self.Y_ADJUSTED = _adjust_for_anisotropy(
np.vstack((self.X_ORIG, self.Y_ORIG)).T,
[self.XCENTER, self.YCENTER],
[self.anisotropy_scaling],
[self.anisotropy_angle],
).T
if self.verbose:
print("Updating variogram mode...")
# See note above about the 'use_psill' kwarg...
vp_temp = _make_variogram_parameter_list(
self.variogram_model, variogram_parameters
)
(
self.lags,
self.semivariance,
self.variogram_model_parameters,
) = _initialize_variogram_model(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T,
self.Z,
self.variogram_model,
vp_temp,
self.variogram_function,
nlags,
weight,
"euclidean",
)
if self.verbose:
if self.variogram_model == "linear":
print("Using '%s' Variogram Model" % "linear")
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], "\n")
elif self.variogram_model == "power":
print("Using '%s' Variogram Model" % "power")
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
elif self.variogram_model == "custom":
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print(
"Full Sill:",
self.variogram_model_parameters[0]
+ self.variogram_model_parameters[2],
)
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = _find_statistics(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED)).T,
self.Z,
self.variogram_function,
self.variogram_model_parameters,
"euclidean",
self.pseudo_inv,
)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, "\n")
def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, "r*")
ax.plot(
self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags),
"k-",
)
plt.show()
def get_variogram_points(self):
"""Returns both the lags and the variogram function evaluated at each
of them.
The evaluation of the variogram function and the lags are produced
internally. This method is convenient when the user wants to access to
the lags and the resulting variogram (according to the model provided)
for further analysis.
Returns
-------
(tuple) tuple containing:
lags (array) - the lags at which the variogram was evaluated
variogram (array) - the variogram function evaluated at the lags
"""
return (
self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags),
)
def switch_verbose(self):
"""Allows user to switch code talk-back on/off. Takes no arguments."""
self.verbose = not self.verbose
def switch_plotting(self):
"""Allows user to switch plot display on/off. Takes no arguments."""
self.enable_plotting = not self.enable_plotting
def get_epsilon_residuals(self):
"""Returns the epsilon residuals for the variogram fit."""
return self.epsilon
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c="k", marker="*")
ax.axhline(y=0.0)
plt.show()
def get_statistics(self):
"""Returns the Q1, Q2, and cR statistics for the variogram fit
(in that order). No arguments.
"""
return self.Q1, self.Q2, self.cR
def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
def _get_kriging_matrix(self, n, n_withdrifts):
"""Assembles the kriging matrix."""
xy = np.concatenate(
(self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1
)
d = cdist(xy, xy, "euclidean")
if self.UNBIAS:
a = np.zeros((n_withdrifts + 1, n_withdrifts + 1))
else:
a = np.zeros((n_withdrifts, n_withdrifts))
a[:n, :n] = -self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
i = n
if self.regional_linear_drift:
a[:n, i] = self.X_ADJUSTED
a[i, :n] = self.X_ADJUSTED
i += 1
a[:n, i] = self.Y_ADJUSTED
a[i, :n] = self.Y_ADJUSTED
i += 1
if self.point_log_drift:
for well_no in range(self.point_log_array.shape[0]):
log_dist = np.log(
np.sqrt(
(self.X_ADJUSTED - self.point_log_array[well_no, 0]) ** 2
+ (self.Y_ADJUSTED - self.point_log_array[well_no, 1]) ** 2
)
)
if np.any(np.isinf(log_dist)):
log_dist[np.isinf(log_dist)] = -100.0
a[:n, i] = -self.point_log_array[well_no, 2] * log_dist
a[i, :n] = -self.point_log_array[well_no, 2] * log_dist
i += 1
if self.external_Z_drift:
a[:n, i] = self.z_scalars
a[i, :n] = self.z_scalars
i += 1
if self.specified_drift:
for arr in self.specified_drift_data_arrays:
a[:n, i] = arr
a[i, :n] = arr
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
a[:n, i] = func(self.X_ADJUSTED, self.Y_ADJUSTED)
a[i, :n] = func(self.X_ADJUSTED, self.Y_ADJUSTED)
i += 1
if i != n_withdrifts:
warnings.warn(
"Error in creating kriging matrix. Kriging may fail.", RuntimeWarning
)
if self.UNBIAS:
a[n_withdrifts, :n] = 1.0
a[:n, n_withdrifts] = 1.0
a[n : n_withdrifts + 1, n : n_withdrifts + 1] = 0.0
return a
def _exec_vector(self, a, bd, xy, xy_orig, mask, n_withdrifts, spec_drift_grids):
"""Solves the kriging system as a vectorized operation. This method
can take a lot of memory for large grids and/or large datasets."""
npt = bd.shape[0]
n = self.X_ADJUSTED.shape[0]
zero_index = None
zero_value = False
# use the desired method to invert the kriging matrix
if self.pseudo_inv:
a_inv = P_INV[self.pseudo_inv_type](a)
else:
a_inv = scipy.linalg.inv(a)
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
if self.UNBIAS:
b = np.zeros((npt, n_withdrifts + 1, 1))
else:
b = np.zeros((npt, n_withdrifts, 1))
b[:, :n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], zero_index[1], 0] = 0.0
i = n
if self.regional_linear_drift:
b[:, i, 0] = xy[:, 0]
i += 1
b[:, i, 0] = xy[:, 1]
i += 1
if self.point_log_drift:
for well_no in range(self.point_log_array.shape[0]):
log_dist = np.log(
np.sqrt(
(xy[:, 0] - self.point_log_array[well_no, 0]) ** 2
+ (xy[:, 1] - self.point_log_array[well_no, 1]) ** 2
)
)
if np.any(np.isinf(log_dist)):
log_dist[np.isinf(log_dist)] = -100.0
b[:, i, 0] = -self.point_log_array[well_no, 2] * log_dist
i += 1
if self.external_Z_drift:
b[:, i, 0] = self._calculate_data_point_zscalars(
xy_orig[:, 0], xy_orig[:, 1]
)
i += 1
if self.specified_drift:
for spec_vals in spec_drift_grids:
b[:, i, 0] = spec_vals.flatten()
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
b[:, i, 0] = func(xy[:, 0], xy[:, 1])
i += 1
if i != n_withdrifts:
warnings.warn(
"Error in setting up kriging system. Kriging may fail.",
RuntimeWarning,
)
if self.UNBIAS:
b[:, n_withdrifts, 0] = 1.0
if (~mask).any():
mask_b = np.repeat(
mask[:, np.newaxis, np.newaxis], n_withdrifts + 1, axis=1
)
b = np.ma.array(b, mask=mask_b)
if self.UNBIAS:
x = (
np.dot(a_inv, b.reshape((npt, n_withdrifts + 1)).T)
.reshape((1, n_withdrifts + 1, npt))
.T
)
else:
x = (
np.dot(a_inv, b.reshape((npt, n_withdrifts)).T)
.reshape((1, n_withdrifts, npt))
.T
)
zvalues = np.sum(x[:, :n, 0] * self.Z, axis=1)
sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)
return zvalues, sigmasq
def _exec_loop(self, a, bd_all, xy, xy_orig, mask, n_withdrifts, spec_drift_grids):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
zvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
# use the desired method to invert the kriging matrix
if self.pseudo_inv:
a_inv = P_INV[self.pseudo_inv_type](a)
else:
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[
0
]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_index = None
zero_value = False
if self.UNBIAS:
b = np.zeros((n_withdrifts + 1, 1))
else:
b = np.zeros((n_withdrifts, 1))
b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], 0] = 0.0
i = n
if self.regional_linear_drift:
b[i, 0] = xy[j, 0]
i += 1
b[i, 0] = xy[j, 1]
i += 1
if self.point_log_drift:
for well_no in range(self.point_log_array.shape[0]):
log_dist = np.log(
np.sqrt(
(xy[j, 0] - self.point_log_array[well_no, 0]) ** 2
+ (xy[j, 1] - self.point_log_array[well_no, 1]) ** 2
)
)
if np.any(np.isinf(log_dist)):
log_dist[np.isinf(log_dist)] = -100.0
b[i, 0] = -self.point_log_array[well_no, 2] * log_dist
i += 1
if self.external_Z_drift:
b[i, 0] = self._calculate_data_point_zscalars(
xy_orig[j, 0], xy_orig[j, 1], type_="scalar"
)
i += 1
if self.specified_drift:
for spec_vals in spec_drift_grids:
b[i, 0] = spec_vals.flatten()[i]
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
b[i, 0] = func(xy[j, 0], xy[j, 1])
i += 1
if i != n_withdrifts:
warnings.warn(
"Error in setting up kriging system. Kriging may fail.",
RuntimeWarning,
)
if self.UNBIAS:
b[n_withdrifts, 0] = 1.0
x = np.dot(a_inv, b)
zvalues[j] = np.sum(x[:n, 0] * self.Z)
sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])
return zvalues, sigmasq
def execute(
self,
style,
xpoints,
ypoints,
mask=None,
backend="vectorized",
specified_drift_arrays=None,
):
"""Calculates a kriged grid and the associated variance.
Includes drift terms.
Parameters
----------
style : str
Specifies how to treat input kriging points. Specifying 'grid'
treats xpoints and ypoints as two arrays of x and y coordinates
that define a rectangular grid. Specifying 'points' treats xpoints
and ypoints as two arrays that provide coordinate pairs at which
to solve the kriging system. Specifying 'masked' treats xpoints and
ypoints as two arrays of x and y coordinates that define a
rectangular grid and uses mask to only evaluate specific points
in the grid.
xpoints : array_like, shape (N,) or (N, 1)
If style is specific as 'grid' or 'masked', x-coordinates of
MxN grid. If style is specified as 'points', x-coordinates of
specific points at which to solve kriging system.
ypoints : array-like, shape (M,) or (M, 1)
If style is specified as 'grid' or 'masked', y-coordinates of
MxN grid. If style is specified as 'points', y-coordinates of
specific points at which to solve kriging system.
Note that in this case, xpoints and ypoints must have the same
dimensions (i.e., M = N).
mask : boolean array, shape (M, N), optional
Specifies the points in the rectangular grid defined by xpoints and
ypoints that are to be excluded in the kriging calculations.
Must be provided if style is specified as 'masked'. False indicates
that the point should not be masked, so the kriging system will be
solved at the point. True indicates that the point should be masked,
so the kriging system should will not be solved at the point.
backend : str, optional
Specifies which approach to use in kriging. Specifying 'vectorized'
will solve the entire kriging problem at once in a vectorized
operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging
system is to be solved. This approach is slower but also less
memory-intensive. Default is 'vectorized'.
Note that Cython backend is not supported for UK.
specified_drift_arrays : list of array-like objects, optional
Specifies the drift values at the points at which the kriging
system is to be evaluated. Required if 'specified' drift provided
in the list of drift terms when instantiating the UniversalKriging
class. Must be a list of arrays in the same order as the list
provided when instantiating the kriging object. Array(s) must be
the same dimension as the specified grid or have the same number of
points as the specified points; i.e., the arrays either must be
shape (M, N), where M is the number of y grid-points and N is the
number of x grid-points, or shape (M, ) or (N, 1), where M is the
number of points at which to evaluate the kriging system.
Returns
-------
zvalues : ndarray, shape (M, N) or (N, 1)
Z-values of specified grid or at the specified set of points.
If style was specified as 'masked', zvalues will be a numpy
masked array.
sigmasq : ndarray, shape (M, N) or (N, 1)
Variance at specified grid points or at the specified set of points.
If style was specified as 'masked', sigmasq will be a numpy
masked array.
"""
if self.verbose:
print("Executing Universal Kriging...\n")
if style != "grid" and style != "masked" and style != "points":
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
n = self.X_ADJUSTED.shape[0]
n_withdrifts = n
xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True)))
ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True)))
nx = xpts.size
ny = ypts.size
if self.regional_linear_drift:
n_withdrifts += 2
if self.point_log_drift:
n_withdrifts += self.point_log_array.shape[0]
if self.external_Z_drift:
n_withdrifts += 1
if self.specified_drift:
n_withdrifts += len(self.specified_drift_data_arrays)
if self.functional_drift:
n_withdrifts += len(self.functional_drift_terms)
a = self._get_kriging_matrix(n, n_withdrifts)
if style in ["grid", "masked"]:
if style == "masked":
if mask is None:
raise IOError(
"Must specify boolean masking array when style is 'masked'."
)
if mask.shape[0] != ny or mask.shape[1] != nx:
if mask.shape[0] == nx and mask.shape[1] == ny:
mask = mask.T
else:
raise ValueError(
"Mask dimensions do not match specified grid dimensions."
)
mask = mask.flatten()
npt = ny * nx
grid_x, grid_y = np.meshgrid(xpts, ypts)
xpts = grid_x.flatten()
ypts = grid_y.flatten()
elif style == "points":
if xpts.size != ypts.size:
raise ValueError(
"xpoints and ypoints must have same "
"dimensions when treated as listing "
"discrete points."
)
npt = nx
else:
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
if specified_drift_arrays is None:
specified_drift_arrays = []
spec_drift_grids = []
if self.specified_drift:
if len(specified_drift_arrays) == 0:
raise ValueError(
"Must provide drift values for kriging points "
"when using 'specified' drift capability."
)
if type(specified_drift_arrays) is not list:
raise TypeError(
"Arrays for specified drift terms must be "
"encapsulated in a list."
)
for spec in specified_drift_arrays:
if style in ["grid", "masked"]:
if spec.ndim < 2:
raise ValueError(
"Dimensions of drift values array do "
"not match specified grid dimensions."
)
elif spec.shape[0] != ny or spec.shape[1] != nx:
if spec.shape[0] == nx and spec.shape[1] == ny:
spec_drift_grids.append(np.squeeze(spec.T))
else:
raise ValueError(
"Dimensions of drift values array "
"do not match specified grid "
"dimensions."
)
else:
spec_drift_grids.append(np.squeeze(spec))
elif style == "points":
if spec.ndim != 1:
raise ValueError(
"Dimensions of drift values array do "
"not match specified grid dimensions."
)
elif spec.shape[0] != xpts.size:
raise ValueError(
"Number of supplied drift values in "
"array do not match specified number "
"of kriging points."
)
else:
spec_drift_grids.append(np.squeeze(spec))
if len(spec_drift_grids) != len(self.specified_drift_data_arrays):
raise ValueError(
"Inconsistent number of specified drift terms supplied."
)
else:
if len(specified_drift_arrays) != 0:
warnings.warn(
"Provided specified drift values, but "
"'specified' drift was not initialized during "
"instantiation of UniversalKriging class.",
RuntimeWarning,
)
xy_points_original = np.concatenate(
(xpts[:, np.newaxis], ypts[:, np.newaxis]), axis=1
)
xpts, ypts = _adjust_for_anisotropy(
np.vstack((xpts, ypts)).T,
[self.XCENTER, self.YCENTER],
[self.anisotropy_scaling],
[self.anisotropy_angle],
).T
xy_points = np.concatenate((xpts[:, np.newaxis], ypts[:, np.newaxis]), axis=1)
xy_data = np.concatenate(
(self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis]), axis=1
)
if style != "masked":
mask = np.zeros(npt, dtype="bool")
bd = cdist(xy_points, xy_data, "euclidean")
if backend == "vectorized":
zvalues, sigmasq = self._exec_vector(
a,
bd,
xy_points,
xy_points_original,
mask,
n_withdrifts,
spec_drift_grids,
)
elif backend == "loop":
zvalues, sigmasq = self._exec_loop(
a,
bd,
xy_points,
xy_points_original,
mask,
n_withdrifts,
spec_drift_grids,
)
else:
raise ValueError(
"Specified backend {} is not supported "
"for 2D universal kriging.".format(backend)
)
if style == "masked":
zvalues = np.ma.array(zvalues, mask=mask)
sigmasq = np.ma.array(sigmasq, mask=mask)
if style in ["masked", "grid"]:
zvalues = zvalues.reshape((ny, nx))
sigmasq = sigmasq.reshape((ny, nx))
return zvalues, sigmasq
| 56,799 | 41.706767 | 87 | py |
PyKrige | PyKrige-main/src/pykrige/core.py | # coding: utf-8
"""
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
[email protected]
Summary
-------
Methods used by multiple classes.
References
----------
[1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology,
(Cambridge University Press, 1997) 272 p.
[2] T. Vincenty, Direct and Inverse Solutions of Geodesics on the Ellipsoid
with Application of Nested Equations, Survey Review 23 (176),
(Directorate of Overseas Survey, Kingston Road, Tolworth, Surrey 1975)
Copyright (c) 2015-2020, PyKrige Developers
"""
import numpy as np
import scipy.linalg as spl
from scipy.optimize import least_squares
from scipy.spatial.distance import cdist, pdist, squareform
eps = 1.0e-10 # Cutoff for comparison to zero
P_INV = {"pinv": spl.pinv, "pinvh": spl.pinvh}
def great_circle_distance(lon1, lat1, lon2, lat2):
"""Calculate the great circle distance between one or multiple pairs of
points given in spherical coordinates. Spherical coordinates are expected
in degrees. Angle definition follows standard longitude/latitude definition.
This uses the arctan version of the great-circle distance function
(en.wikipedia.org/wiki/Great-circle_distance) for increased
numerical stability.
Parameters
----------
lon1: float scalar or numpy array
Longitude coordinate(s) of the first element(s) of the point
pair(s), given in degrees.
lat1: float scalar or numpy array
Latitude coordinate(s) of the first element(s) of the point
pair(s), given in degrees.
lon2: float scalar or numpy array
Longitude coordinate(s) of the second element(s) of the point
pair(s), given in degrees.
lat2: float scalar or numpy array
Latitude coordinate(s) of the second element(s) of the point
pair(s), given in degrees.
Calculation of distances follows numpy elementwise semantics, so if
an array of length N is passed, all input parameters need to be
arrays of length N or scalars.
Returns
-------
distance: float scalar or numpy array
The great circle distance(s) (in degrees) between the
given pair(s) of points.
"""
# Convert to radians:
lat1 = np.array(lat1) * np.pi / 180.0
lat2 = np.array(lat2) * np.pi / 180.0
dlon = (lon1 - lon2) * np.pi / 180.0
# Evaluate trigonometric functions that need to be evaluated more
# than once:
c1 = np.cos(lat1)
s1 = np.sin(lat1)
c2 = np.cos(lat2)
s2 = np.sin(lat2)
cd = np.cos(dlon)
# This uses the arctan version of the great-circle distance function
# from en.wikipedia.org/wiki/Great-circle_distance for increased
# numerical stability.
# Formula can be obtained from [2] combining eqns. (14)-(16)
# for spherical geometry (f=0).
return (
180.0
/ np.pi
* np.arctan2(
np.sqrt((c2 * np.sin(dlon)) ** 2 + (c1 * s2 - s1 * c2 * cd) ** 2),
s1 * s2 + c1 * c2 * cd,
)
)
def euclid3_to_great_circle(euclid3_distance):
"""Convert euclidean distance between points on a unit sphere to
the corresponding great circle distance.
Parameters
----------
euclid3_distance: float scalar or numpy array
The euclidean three-space distance(s) between points on a
unit sphere, thus between [0,2].
Returns
-------
great_circle_dist: float scalar or numpy array
The corresponding great circle distance(s) between the points.
"""
# Eliminate some possible numerical errors:
euclid3_distance[euclid3_distance > 2.0] = 2.0
return 180.0 - 360.0 / np.pi * np.arccos(0.5 * euclid3_distance)
def _adjust_for_anisotropy(X, center, scaling, angle):
"""Adjusts data coordinates to take into account anisotropy.
Can also be used to take into account data scaling. Angles are CCW about
specified axes. Scaling is applied in rotated coordinate system.
Parameters
----------
X : ndarray
float array [n_samples, n_dim], the input array of coordinates
center : ndarray
float array [n_dim], the coordinate of centers
scaling : ndarray
float array [n_dim - 1], the scaling of last two dimensions
angle : ndarray
float array [2*n_dim - 3], the anisotropy angle (degrees)
Returns
-------
X_adj : ndarray
float array [n_samples, n_dim], the X array adjusted for anisotropy.
"""
center = np.asarray(center)[None, :]
angle = np.asarray(angle) * np.pi / 180
X -= center
Ndim = X.shape[1]
if Ndim == 1:
raise NotImplementedError("Not implemnented yet?")
elif Ndim == 2:
stretch = np.array([[1, 0], [0, scaling[0]]])
rot_tot = np.array(
[
[np.cos(-angle[0]), -np.sin(-angle[0])],
[np.sin(-angle[0]), np.cos(-angle[0])],
]
)
elif Ndim == 3:
stretch = np.array(
[[1.0, 0.0, 0.0], [0.0, scaling[0], 0.0], [0.0, 0.0, scaling[1]]]
)
rotate_x = np.array(
[
[1.0, 0.0, 0.0],
[0.0, np.cos(-angle[0]), -np.sin(-angle[0])],
[0.0, np.sin(-angle[0]), np.cos(-angle[0])],
]
)
rotate_y = np.array(
[
[np.cos(-angle[1]), 0.0, np.sin(-angle[1])],
[0.0, 1.0, 0.0],
[-np.sin(-angle[1]), 0.0, np.cos(-angle[1])],
]
)
rotate_z = np.array(
[
[np.cos(-angle[2]), -np.sin(-angle[2]), 0.0],
[np.sin(-angle[2]), np.cos(-angle[2]), 0.0],
[0.0, 0.0, 1.0],
]
)
rot_tot = np.dot(rotate_z, np.dot(rotate_y, rotate_x))
else:
raise ValueError(
"Adjust for anisotropy function doesn't support ND spaces where N>3"
)
X_adj = np.dot(stretch, np.dot(rot_tot, X.T)).T
X_adj += center
return X_adj
def _make_variogram_parameter_list(variogram_model, variogram_model_parameters):
"""Converts the user input for the variogram model parameters into the
format expected in the rest of the code.
Makes a list of variogram model parameters in the expected order if the
user has provided the model parameters. If not, returns None, which
will ensure that the automatic variogram estimation routine is
triggered.
Parameters
----------
variogram_model : str
specifies the variogram model type
variogram_model_parameters : list, dict, or None
parameters provided by the user, can also be None if the user
did not specify the variogram model parameters; if None,
this function returns None, that way the automatic variogram
estimation routine will kick in down the road...
Returns
-------
parameter_list : list
variogram model parameters stored in a list in the expected order;
if variogram_model is 'custom', model parameters should already
be encapsulated in a list, so the list is returned unaltered;
if variogram_model_parameters was not specified by the user,
None is returned; order for internal variogram models is as follows...
linear - [slope, nugget]
power - [scale, exponent, nugget]
gaussian - [psill, range, nugget]
spherical - [psill, range, nugget]
exponential - [psill, range, nugget]
hole-effect - [psill, range, nugget]
"""
if variogram_model_parameters is None:
parameter_list = None
elif type(variogram_model_parameters) is dict:
if variogram_model in ["linear"]:
if (
"slope" not in variogram_model_parameters.keys()
or "nugget" not in variogram_model_parameters.keys()
):
raise KeyError(
"'linear' variogram model requires 'slope' "
"and 'nugget' specified in variogram model "
"parameter dictionary."
)
else:
parameter_list = [
variogram_model_parameters["slope"],
variogram_model_parameters["nugget"],
]
elif variogram_model in ["power"]:
if (
"scale" not in variogram_model_parameters.keys()
or "exponent" not in variogram_model_parameters.keys()
or "nugget" not in variogram_model_parameters.keys()
):
raise KeyError(
"'power' variogram model requires 'scale', "
"'exponent', and 'nugget' specified in "
"variogram model parameter dictionary."
)
else:
parameter_list = [
variogram_model_parameters["scale"],
variogram_model_parameters["exponent"],
variogram_model_parameters["nugget"],
]
elif variogram_model in ["gaussian", "spherical", "exponential", "hole-effect"]:
if (
"range" not in variogram_model_parameters.keys()
or "nugget" not in variogram_model_parameters.keys()
):
raise KeyError(
"'%s' variogram model requires 'range', "
"'nugget', and either 'sill' or 'psill' "
"specified in variogram model parameter "
"dictionary." % variogram_model
)
else:
if "sill" in variogram_model_parameters.keys():
parameter_list = [
variogram_model_parameters["sill"]
- variogram_model_parameters["nugget"],
variogram_model_parameters["range"],
variogram_model_parameters["nugget"],
]
elif "psill" in variogram_model_parameters.keys():
parameter_list = [
variogram_model_parameters["psill"],
variogram_model_parameters["range"],
variogram_model_parameters["nugget"],
]
else:
raise KeyError(
"'%s' variogram model requires either "
"'sill' or 'psill' specified in "
"variogram model parameter "
"dictionary." % variogram_model
)
elif variogram_model in ["custom"]:
raise TypeError(
"For user-specified custom variogram model, "
"parameters must be specified in a list, "
"not a dict."
)
else:
raise ValueError(
"Specified variogram model must be one of the "
"following: 'linear', 'power', 'gaussian', "
"'spherical', 'exponential', 'hole-effect', "
"'custom'."
)
elif type(variogram_model_parameters) is list:
if variogram_model in ["linear"]:
if len(variogram_model_parameters) != 2:
raise ValueError(
"Variogram model parameter list must have "
"exactly two entries when variogram model "
"set to 'linear'."
)
parameter_list = variogram_model_parameters
elif variogram_model in ["power"]:
if len(variogram_model_parameters) != 3:
raise ValueError(
"Variogram model parameter list must have "
"exactly three entries when variogram model "
"set to 'power'."
)
parameter_list = variogram_model_parameters
elif variogram_model in ["gaussian", "spherical", "exponential", "hole-effect"]:
if len(variogram_model_parameters) != 3:
raise ValueError(
"Variogram model parameter list must have "
"exactly three entries when variogram model "
"set to '%s'." % variogram_model
)
parameter_list = [
variogram_model_parameters[0] - variogram_model_parameters[2],
variogram_model_parameters[1],
variogram_model_parameters[2],
]
elif variogram_model in ["custom"]:
parameter_list = variogram_model_parameters
else:
raise ValueError(
"Specified variogram model must be one of the "
"following: 'linear', 'power', 'gaussian', "
"'spherical', 'exponential', 'hole-effect', "
"'custom'."
)
else:
raise TypeError(
"Variogram model parameters must be provided in either "
"a list or a dict when they are explicitly specified."
)
return parameter_list
def _initialize_variogram_model(
X,
y,
variogram_model,
variogram_model_parameters,
variogram_function,
nlags,
weight,
coordinates_type,
):
"""Initializes the variogram model for kriging. If user does not specify
parameters, calls automatic variogram estimation routine.
Returns lags, semivariance, and variogram model parameters.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of values to be kriged
variogram_model: str
user-specified variogram model to use
variogram_model_parameters: list
user-specified parameters for variogram model
variogram_function: callable
function that will be called to evaluate variogram model
(only used if user does not specify variogram model parameters)
nlags: int
integer scalar, number of bins into which to group inter-point distances
weight: bool
boolean flag that indicates whether the semivariances at smaller lags
should be weighted more heavily in the automatic variogram estimation
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
lags: ndarray
float array [nlags], distance values for bins into which the
semivariances were grouped
semivariance: ndarray
float array [nlags], averaged semivariance for each bin
variogram_model_parameters: list
parameters for the variogram model, either returned unaffected if the
user specified them or returned from the automatic variogram
estimation routine
"""
# distance calculation for rectangular coords now leverages
# scipy.spatial.distance's pdist function, which gives pairwise distances
# in a condensed distance vector (distance matrix flattened to a vector)
# to calculate semivariances...
if coordinates_type == "euclidean":
d = pdist(X, metric="euclidean")
g = 0.5 * pdist(y[:, None], metric="sqeuclidean")
# geographic coordinates only accepted if the problem is 2D
# assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat
# old method of distance calculation is retained here...
# could be improved in the future
elif coordinates_type == "geographic":
if X.shape[1] != 2:
raise ValueError(
"Geographic coordinate type only supported for 2D datasets."
)
x1, x2 = np.meshgrid(X[:, 0], X[:, 0], sparse=True)
y1, y2 = np.meshgrid(X[:, 1], X[:, 1], sparse=True)
z1, z2 = np.meshgrid(y, y, sparse=True)
d = great_circle_distance(x1, y1, x2, y2)
g = 0.5 * (z1 - z2) ** 2.0
indices = np.indices(d.shape)
d = d[(indices[0, :, :] > indices[1, :, :])]
g = g[(indices[0, :, :] > indices[1, :, :])]
else:
raise ValueError(
"Specified coordinate type '%s' is not supported." % coordinates_type
)
# Equal-sized bins are now implemented. The upper limit on the bins
# is appended to the list (instead of calculated as part of the
# list comprehension) to avoid any numerical oddities
# (specifically, say, ending up as 0.99999999999999 instead of 1.0).
# Appending dmax + 0.001 ensures that the largest distance value
# is included in the semivariogram calculation.
dmax = np.amax(d)
dmin = np.amin(d)
dd = (dmax - dmin) / nlags
bins = [dmin + n * dd for n in range(nlags)]
dmax += 0.001
bins.append(dmax)
# This old binning method was experimental and doesn't seem
# to work too well. Bins were computed such that there are more
# at shorter lags. This effectively weights smaller distances more
# highly in determining the variogram. As Kitanidis points out,
# the variogram fit to the data at smaller lag distances is more
# important. However, the value at the largest lag probably ends up
# being biased too high for the larger values and thereby throws off
# automatic variogram calculation and confuses comparison of the
# semivariogram with the variogram model.
#
# dmax = np.amax(d)
# dmin = np.amin(d)
# dd = dmax - dmin
# bins = [dd*(0.5**n) + dmin for n in range(nlags, 1, -1)]
# bins.insert(0, dmin)
# bins.append(dmax)
lags = np.zeros(nlags)
semivariance = np.zeros(nlags)
for n in range(nlags):
# This 'if... else...' statement ensures that there are data
# in the bin so that numpy can actually find the mean. If we
# don't test this first, then Python kicks out an annoying warning
# message when there is an empty bin and we try to calculate the mean.
if d[(d >= bins[n]) & (d < bins[n + 1])].size > 0:
lags[n] = np.mean(d[(d >= bins[n]) & (d < bins[n + 1])])
semivariance[n] = np.mean(g[(d >= bins[n]) & (d < bins[n + 1])])
else:
lags[n] = np.nan
semivariance[n] = np.nan
lags = lags[~np.isnan(semivariance)]
semivariance = semivariance[~np.isnan(semivariance)]
# a few tests the make sure that, if the variogram_model_parameters
# are supplied, they have been supplied as expected...
# if variogram_model_parameters was not defined, then estimate the variogram
if variogram_model_parameters is not None:
if variogram_model == "linear" and len(variogram_model_parameters) != 2:
raise ValueError(
"Exactly two parameters required for linear variogram model."
)
elif (
variogram_model
in ["power", "spherical", "exponential", "gaussian", "hole-effect"]
and len(variogram_model_parameters) != 3
):
raise ValueError(
"Exactly three parameters required for "
"%s variogram model" % variogram_model
)
else:
if variogram_model == "custom":
raise ValueError(
"Variogram parameters must be specified when "
"implementing custom variogram model."
)
else:
variogram_model_parameters = _calculate_variogram_model(
lags, semivariance, variogram_model, variogram_function, weight
)
return lags, semivariance, variogram_model_parameters
def _variogram_residuals(params, x, y, variogram_function, weight):
"""Function used in variogram model estimation. Returns residuals between
calculated variogram and actual data (lags/semivariance).
Called by _calculate_variogram_model.
Parameters
----------
params: list or 1D array
parameters for calculating the model variogram
x: ndarray
lags (distances) at which to evaluate the model variogram
y: ndarray
experimental semivariances at the specified lags
variogram_function: callable
the actual funtion that evaluates the model variogram
weight: bool
flag for implementing the crude weighting routine, used in order to
fit smaller lags better
Returns
-------
resid: 1d array
residuals, dimension same as y
"""
# this crude weighting routine can be used to better fit the model
# variogram to the experimental variogram at smaller lags...
# the weights are calculated from a logistic function, so weights at small
# lags are ~1 and weights at the longest lags are ~0;
# the center of the logistic weighting is hard-coded to be at 70% of the
# distance from the shortest lag to the largest lag
if weight:
drange = np.amax(x) - np.amin(x)
k = 2.1972 / (0.1 * drange)
x0 = 0.7 * drange + np.amin(x)
weights = 1.0 / (1.0 + np.exp(-k * (x0 - x)))
weights /= np.sum(weights)
resid = (variogram_function(params, x) - y) * weights
else:
resid = variogram_function(params, x) - y
return resid
def _calculate_variogram_model(
lags, semivariance, variogram_model, variogram_function, weight
):
"""Function that fits a variogram model when parameters are not specified.
Returns variogram model parameters that minimize the RMSE between the
specified variogram function and the actual calculated variogram points.
Parameters
----------
lags: 1d array
binned lags/distances to use for variogram model parameter estimation
semivariance: 1d array
binned/averaged experimental semivariances to use for variogram model
parameter estimation
variogram_model: str/unicode
specified variogram model to use for parameter estimation
variogram_function: callable
the actual funtion that evaluates the model variogram
weight: bool
flag for implementing the crude weighting routine, used in order to fit
smaller lags better this is passed on to the residual calculation
cfunction, where weighting is actually applied...
Returns
-------
res: list
list of estimated variogram model parameters
NOTE that the estimation routine works in terms of the partial sill
(psill = sill - nugget) -- setting bounds such that psill > 0 ensures that
the sill will always be greater than the nugget...
"""
if variogram_model == "linear":
x0 = [
(np.amax(semivariance) - np.amin(semivariance))
/ (np.amax(lags) - np.amin(lags)),
np.amin(semivariance),
]
bnds = ([0.0, 0.0], [np.inf, np.amax(semivariance)])
elif variogram_model == "power":
x0 = [
(np.amax(semivariance) - np.amin(semivariance))
/ (np.amax(lags) - np.amin(lags)),
1.1,
np.amin(semivariance),
]
bnds = ([0.0, 0.001, 0.0], [np.inf, 1.999, np.amax(semivariance)])
else:
x0 = [
np.amax(semivariance) - np.amin(semivariance),
0.25 * np.amax(lags),
np.amin(semivariance),
]
bnds = (
[0.0, 0.0, 0.0],
[10.0 * np.amax(semivariance), np.amax(lags), np.amax(semivariance)],
)
# use 'soft' L1-norm minimization in order to buffer against
# potential outliers (weird/skewed points)
res = least_squares(
_variogram_residuals,
x0,
bounds=bnds,
loss="soft_l1",
args=(lags, semivariance, variogram_function, weight),
)
return res.x
def _krige(
X,
y,
coords,
variogram_function,
variogram_model_parameters,
coordinates_type,
pseudo_inv=False,
):
"""Sets up and solves the ordinary kriging system for the given
coordinate pair. This function is only used for the statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
coords: ndarray
float array [1, n_dim], point at which to evaluate the kriging system
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
pseudo_inv : :class:`bool`, optional
Whether the kriging system is solved with the pseudo inverted
kriging matrix. If `True`, this leads to more numerical stability
and redundant points are averaged. But it can take more time.
Default: False
Returns
-------
zinterp: float
kriging estimate at the specified point
sigmasq: float
mean square error of the kriging estimate
"""
zero_index = None
zero_value = False
# calculate distance between points... need a square distance matrix
# of inter-measurement-point distances and a vector of distances between
# measurement points (X) and the kriging point (coords)
if coordinates_type == "euclidean":
d = squareform(pdist(X, metric="euclidean"))
bd = np.squeeze(cdist(X, coords[None, :], metric="euclidean"))
# geographic coordinate distances still calculated in the old way...
# assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat
# also assume problem is 2D; check done earlier in initializing variogram
elif coordinates_type == "geographic":
x1, x2 = np.meshgrid(X[:, 0], X[:, 0], sparse=True)
y1, y2 = np.meshgrid(X[:, 1], X[:, 1], sparse=True)
d = great_circle_distance(x1, y1, x2, y2)
bd = great_circle_distance(
X[:, 0],
X[:, 1],
coords[0] * np.ones(X.shape[0]),
coords[1] * np.ones(X.shape[0]),
)
# this check is done when initializing variogram, but kept here anyways...
else:
raise ValueError(
"Specified coordinate type '%s' is not supported." % coordinates_type
)
# check if kriging point overlaps with measurement point
if np.any(np.absolute(bd) <= 1e-10):
zero_value = True
zero_index = np.where(bd <= 1e-10)[0][0]
# set up kriging matrix
n = X.shape[0]
a = np.zeros((n + 1, n + 1))
a[:n, :n] = -variogram_function(variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
# set up RHS
b = np.zeros((n + 1, 1))
b[:n, 0] = -variogram_function(variogram_model_parameters, bd)
if zero_value:
b[zero_index, 0] = 0.0
b[n, 0] = 1.0
# solve
if pseudo_inv:
res = np.linalg.lstsq(a, b, rcond=None)[0]
else:
res = np.linalg.solve(a, b)
zinterp = np.sum(res[:n, 0] * y)
sigmasq = np.sum(res[:, 0] * -b[:, 0])
return zinterp, sigmasq
def _find_statistics(
X,
y,
variogram_function,
variogram_model_parameters,
coordinates_type,
pseudo_inv=False,
):
"""Calculates variogram fit statistics.
Returns the delta, sigma, and epsilon values for the variogram fit.
These arrays are used for statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
pseudo_inv : :class:`bool`, optional
Whether the kriging system is solved with the pseudo inverted
kriging matrix. If `True`, this leads to more numerical stability
and redundant points are averaged. But it can take more time.
Default: False
Returns
-------
delta: ndarray
residuals between observed values and kriged estimates for those values
sigma: ndarray
mean error in kriging estimates
epsilon: ndarray
residuals normalized by their mean error
"""
delta = np.zeros(y.shape)
sigma = np.zeros(y.shape)
for i in range(y.shape[0]):
# skip the first value in the kriging problem
if i == 0:
continue
else:
k, ss = _krige(
X[:i, :],
y[:i],
X[i, :],
variogram_function,
variogram_model_parameters,
coordinates_type,
pseudo_inv,
)
# if the estimation error is zero, it's probably because
# the evaluation point X[i, :] is really close to one of the
# kriging system points in X[:i, :]...
# in the case of zero estimation error, the results are not stored
if np.absolute(ss) < eps:
continue
delta[i] = y[i] - k
sigma[i] = np.sqrt(ss)
# only use non-zero entries in these arrays... sigma is used to pull out
# non-zero entries in both cases because it is guaranteed to be positive,
# whereas delta can be either positive or negative
delta = delta[sigma > eps]
sigma = sigma[sigma > eps]
epsilon = delta / sigma
return delta, sigma, epsilon
def calcQ1(epsilon):
"""Returns the Q1 statistic for the variogram fit (see [1])."""
return abs(np.sum(epsilon) / (epsilon.shape[0] - 1))
def calcQ2(epsilon):
"""Returns the Q2 statistic for the variogram fit (see [1])."""
return np.sum(epsilon**2) / (epsilon.shape[0] - 1)
def calc_cR(Q2, sigma):
"""Returns the cR statistic for the variogram fit (see [1])."""
return Q2 * np.exp(np.sum(np.log(sigma**2)) / sigma.shape[0])
| 30,289 | 33.538198 | 88 | py |
PyKrige | PyKrige-main/src/pykrige/uk3d.py | # coding: utf-8
"""
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
[email protected]
Summary
-------
Contains class UniversalKriging3D.
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
.. [2] N. Cressie, Statistics for spatial data,
(Wiley Series in Probability and Statistics, 1993) 137 p.
Copyright (c) 2015-2020, PyKrige Developers
"""
import warnings
import numpy as np
import scipy.linalg
from scipy.spatial.distance import cdist
from . import core, variogram_models
from .compat_gstools import validate_gstools
from .core import (
P_INV,
_adjust_for_anisotropy,
_find_statistics,
_initialize_variogram_model,
_make_variogram_parameter_list,
)
class UniversalKriging3D:
"""Three-dimensional universal kriging.
Parameters
----------
x : array_like
X-coordinates of data points.
y : array_like
Y-coordinates of data points.
z : array_like
Z-coordinates of data points.
val : array_like
Values at data points.
variogram_model : str or GSTools CovModel, optional
Specified which variogram model to use; may be one of the following:
linear, power, gaussian, spherical, exponential, hole-effect.
Default is linear variogram model. To utilize a custom variogram model,
specify 'custom'; you must also provide variogram_parameters and
variogram_function. Note that the hole-effect model is only
technically correct for one-dimensional problems.
You can also use a
`GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel.
variogram_parameters : list or dict, optional
Parameters that define the specified variogram model. If not provided,
parameters will be automatically calculated using a "soft" L1 norm
minimization scheme. For variogram model parameters provided in a dict,
the required dict keys vary according to the specified variogram
model: ::
# linear
{'slope': slope, 'nugget': nugget}
# power
{'scale': scale, 'exponent': exponent, 'nugget': nugget}
# gaussian, spherical, exponential and hole-effect:
{'sill': s, 'range': r, 'nugget': n}
# OR
{'psill': p, 'range': r, 'nugget': n}
Note that either the full sill or the partial sill
(psill = sill - nugget) can be specified in the dict.
For variogram model parameters provided in a list, the entries
must be as follows: ::
# linear
[slope, nugget]
# power
[scale, exponent, nugget]
# gaussian, spherical, exponential and hole-effect:
[sill, range, nugget]
Note that the full sill (NOT the partial sill) must be specified
in the list format.
For a custom variogram model, the parameters are required, as custom
variogram models will not automatically be fit to the data.
Furthermore, the parameters must be specified in list format, in the
order in which they are used in the callable function (see
variogram_function for more information). The code does not check
that the provided list contains the appropriate number of parameters
for the custom variogram model, so an incorrect parameter list in
such a case will probably trigger an esoteric exception someplace
deep in the code.
NOTE that, while the list format expects the full sill, the code
itself works internally with the partial sill.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. The function must take only two arguments:
first, a list of parameters for the variogram model;
second, the distances at which to calculate the variogram model.
The list provided in variogram_parameters will be passed to the
function as the first argument.
nlags : int, optional
Number of averaging bins for the semivariogram. Default is 6.
weight : bool, optional
Flag that specifies if semivariance at smaller lags should be weighted
more heavily when automatically calculating variogram model.
The routine is currently hard-coded such that the weights are
calculated from a logistic function, so weights at small lags are ~1
and weights at the longest lags are ~0; the center of the logistic
weighting is hard-coded to be at 70% of the distance from the shortest
lag to the largest lag. Setting this parameter to True indicates that
weights will be applied. Default is False.
(Kitanidis suggests that the values at smaller lags are more
important in fitting a variogram model, so the option is provided
to enable such weighting.)
anisotropy_scaling_y : float, optional
Scalar stretching value to take into account anisotropy in
the y direction. Default is 1 (effectively no stretching).
Scaling is applied in the y direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z,
if anisotropy_angle_x/y/z is/are not 0).
anisotropy_scaling_z : float, optional
Scalar stretching value to take into account anisotropy in
the z direction. Default is 1 (effectively no stretching).
Scaling is applied in the z direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z,
if anisotropy_angle_x/y/z is/are not 0).
anisotropy_angle_x : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the x axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
anisotropy_angle_y : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the y axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
anisotropy_angle_z : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the z axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
drift_terms : list of strings, optional
List of drift terms to include in three-dimensional universal kriging.
Supported drift terms are currently 'regional_linear', 'specified',
and 'functional'.
specified_drift : list of array-like objects, optional
List of arrays that contain the drift values at data points.
The arrays must be shape (N,) or (N, 1), where N is the number of
data points. Any number of specified-drift terms may be used.
functional_drift : list of callable objects, optional
List of callable functions that will be used to evaluate drift terms.
The function must be a function of only the three spatial coordinates
and must return a single value for each coordinate triplet.
It must be set up to be called with only three arguments,
first an array of x values, the second an array of y values,
and the third an array of z values. If the problem involves anisotropy,
the drift values are calculated in the adjusted data frame.
verbose : boolean, optional
Enables program text output to monitor kriging process.
Default is False (off).
enable_plotting : boolean, optional
Enables plotting to display variogram. Default is False (off).
exact_values : bool, optional
If True, interpolation provides input values at input locations.
If False, interpolation accounts for variance/nugget within input
values at input locations and does not behave as an
exact-interpolator [2]. Note that this only has an effect if
there is variance/nugget present within the input data since it is
interpreted as measurement error. If the nugget is zero, the kriged
field will behave as an exact interpolator.
pseudo_inv : :class:`bool`, optional
Whether the kriging system is solved with the pseudo inverted
kriging matrix. If `True`, this leads to more numerical stability
and redundant points are averaged. But it can take more time.
Default: False
pseudo_inv_type : :class:`str`, optional
Here you can select the algorithm to compute the pseudo-inverse matrix:
* `"pinv"`: use `pinv` from `scipy` which uses `lstsq`
* `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values
Default: `"pinv"`
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
.. [2] N. Cressie, Statistics for spatial data,
(Wiley Series in Probability and Statistics, 1993) 137 p.
"""
UNBIAS = True # This can be changed to remove the unbiasedness condition
# Really for testing purposes only...
eps = 1.0e-10 # Cutoff for comparison to zero
variogram_dict = {
"linear": variogram_models.linear_variogram_model,
"power": variogram_models.power_variogram_model,
"gaussian": variogram_models.gaussian_variogram_model,
"spherical": variogram_models.spherical_variogram_model,
"exponential": variogram_models.exponential_variogram_model,
"hole-effect": variogram_models.hole_effect_variogram_model,
}
def __init__(
self,
x,
y,
z,
val,
variogram_model="linear",
variogram_parameters=None,
variogram_function=None,
nlags=6,
weight=False,
anisotropy_scaling_y=1.0,
anisotropy_scaling_z=1.0,
anisotropy_angle_x=0.0,
anisotropy_angle_y=0.0,
anisotropy_angle_z=0.0,
drift_terms=None,
specified_drift=None,
functional_drift=None,
verbose=False,
enable_plotting=False,
exact_values=True,
pseudo_inv=False,
pseudo_inv_type="pinv",
):
# config the pseudo inverse
self.pseudo_inv = bool(pseudo_inv)
self.pseudo_inv_type = str(pseudo_inv_type)
if self.pseudo_inv_type not in P_INV:
raise ValueError("pseudo inv type not valid: " + str(pseudo_inv_type))
# Deal with mutable default argument
if drift_terms is None:
drift_terms = []
if specified_drift is None:
specified_drift = []
if functional_drift is None:
functional_drift = []
# set up variogram model and parameters...
self.variogram_model = variogram_model
self.model = None
if not isinstance(exact_values, bool):
raise ValueError("exact_values has to be boolean True or False")
self.exact_values = exact_values
# check if a GSTools covariance model is given
if hasattr(self.variogram_model, "pykrige_kwargs"):
# save the model in the class
self.model = self.variogram_model
validate_gstools(self.model)
if self.model.field_dim < 3:
raise ValueError("GSTools: model dim is not 3")
self.variogram_model = "custom"
variogram_function = self.model.pykrige_vario
variogram_parameters = []
anisotropy_scaling_y = self.model.pykrige_anis_y
anisotropy_scaling_z = self.model.pykrige_anis_z
anisotropy_angle_x = self.model.pykrige_angle_x
anisotropy_angle_y = self.model.pykrige_angle_y
anisotropy_angle_z = self.model.pykrige_angle_z
if (
self.variogram_model not in self.variogram_dict.keys()
and self.variogram_model != "custom"
):
raise ValueError(
"Specified variogram model '%s' is not supported." % variogram_model
)
elif self.variogram_model == "custom":
if variogram_function is None or not callable(variogram_function):
raise ValueError(
"Must specify callable function for custom variogram model."
)
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
# Code assumes 1D input arrays. Ensures that any extraneous dimensions
# don't get in the way. Copies are created to avoid any problems with
# referencing the original passed arguments.
self.X_ORIG = np.atleast_1d(
np.squeeze(np.array(x, copy=True, dtype=np.float64))
)
self.Y_ORIG = np.atleast_1d(
np.squeeze(np.array(y, copy=True, dtype=np.float64))
)
self.Z_ORIG = np.atleast_1d(
np.squeeze(np.array(z, copy=True, dtype=np.float64))
)
self.VALUES = np.atleast_1d(
np.squeeze(np.array(val, copy=True, dtype=np.float64))
)
self.verbose = verbose
self.enable_plotting = enable_plotting
if self.enable_plotting and self.verbose:
print("Plotting Enabled\n")
self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG)) / 2.0
self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG)) / 2.0
self.ZCENTER = (np.amax(self.Z_ORIG) + np.amin(self.Z_ORIG)) / 2.0
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
if self.verbose:
print("Adjusting data for anisotropy...")
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = _adjust_for_anisotropy(
np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z],
).T
if self.verbose:
print("Initializing variogram model...")
vp_temp = _make_variogram_parameter_list(
self.variogram_model, variogram_parameters
)
(
self.lags,
self.semivariance,
self.variogram_model_parameters,
) = _initialize_variogram_model(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_model,
vp_temp,
self.variogram_function,
nlags,
weight,
"euclidean",
)
if self.verbose:
if self.variogram_model == "linear":
print("Using '%s' Variogram Model" % "linear")
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], "\n")
elif self.variogram_model == "power":
print("Using '%s' Variogram Model" % "power")
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
elif self.variogram_model == "custom":
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print(
"Full Sill:",
self.variogram_model_parameters[0]
+ self.variogram_model_parameters[2],
)
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = _find_statistics(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_function,
self.variogram_model_parameters,
"euclidean",
self.pseudo_inv,
)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, "\n")
if self.verbose:
print("Initializing drift terms...")
# Note that the regional linear drift values will be based on the
# adjusted coordinate system. Really, it doesn't actually matter
# which coordinate system is used here.
if "regional_linear" in drift_terms:
self.regional_linear_drift = True
if self.verbose:
print("Implementing regional linear drift.")
else:
self.regional_linear_drift = False
if "specified" in drift_terms:
if type(specified_drift) is not list:
raise TypeError(
"Arrays for specified drift terms must be "
"encapsulated in a list."
)
if len(specified_drift) == 0:
raise ValueError(
"Must provide at least one drift-value array "
"when using the 'specified' drift capability."
)
self.specified_drift = True
self.specified_drift_data_arrays = []
for term in specified_drift:
specified = np.squeeze(np.array(term, copy=True))
if specified.size != self.X_ORIG.size:
raise ValueError(
"Must specify the drift values for each "
"data point when using the "
"'specified' drift capability."
)
self.specified_drift_data_arrays.append(specified)
else:
self.specified_drift = False
# The provided callable functions will be evaluated using
# the adjusted coordinates.
if "functional" in drift_terms:
if type(functional_drift) is not list:
raise TypeError(
"Callables for functional drift terms must "
"be encapsulated in a list."
)
if len(functional_drift) == 0:
raise ValueError(
"Must provide at least one callable object "
"when using the 'functional' drift capability."
)
self.functional_drift = True
self.functional_drift_terms = functional_drift
else:
self.functional_drift = False
def update_variogram_model(
self,
variogram_model,
variogram_parameters=None,
variogram_function=None,
nlags=6,
weight=False,
anisotropy_scaling_y=1.0,
anisotropy_scaling_z=1.0,
anisotropy_angle_x=0.0,
anisotropy_angle_y=0.0,
anisotropy_angle_z=0.0,
):
"""Changes the variogram model and variogram parameters
for the kriging system.
Parameters
----------
variogram_model : str or GSTools CovModel
May be any of the variogram models listed above.
May also be 'custom', in which case variogram_parameters and
variogram_function must be specified.
You can also use a
`GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel.
variogram_parameters : list or dict, optional
List or dict of variogram model parameters, as explained above.
If not provided, a best fit model will be calculated as
described above.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. See above for more information.
nlags : int, optional)
Number of averaging bins for the semivariogram. Default is 6.
weight : boolean, optional
Flag that specifies if semivariance at smaller lags should be
weighted more heavily when automatically calculating variogram
model. See above for more information. True indicates that
weights will be applied. Default is False.
anisotropy_scaling_y : float, optional
Scalar stretching value to take into account anisotropy
in y-direction. Default is 1 (effectively no stretching).
See above for more information.
anisotropy_scaling_z : float, optional
Scalar stretching value to take into account anisotropy
in z-direction. Default is 1 (effectively no stretching).
See above for more information.
anisotropy_angle_x : float, optional
Angle (in degrees) by which to rotate coordinate system about
the x axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
anisotropy_angle_y : float, optional
Angle (in degrees) by which to rotate coordinate system about
the y axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
anisotropy_angle_z : float, optional
Angle (in degrees) by which to rotate coordinate system about
the z axis in order to take into account anisotropy.
Default is 0 (no rotation).
See above for more information.
"""
# set up variogram model and parameters...
self.variogram_model = variogram_model
self.model = None
# check if a GSTools covariance model is given
if hasattr(self.variogram_model, "pykrige_kwargs"):
# save the model in the class
self.model = self.variogram_model
validate_gstools(self.model)
if self.model.field_dim < 3:
raise ValueError("GSTools: model dim is not 3")
self.variogram_model = "custom"
variogram_function = self.model.pykrige_vario
variogram_parameters = []
anisotropy_scaling_y = self.model.pykrige_anis_y
anisotropy_scaling_z = self.model.pykrige_anis_z
anisotropy_angle_x = self.model.pykrige_angle_x
anisotropy_angle_y = self.model.pykrige_angle_y
anisotropy_angle_z = self.model.pykrige_angle_z
if (
self.variogram_model not in self.variogram_dict.keys()
and self.variogram_model != "custom"
):
raise ValueError(
"Specified variogram model '%s' is not supported." % variogram_model
)
elif self.variogram_model == "custom":
if variogram_function is None or not callable(variogram_function):
raise ValueError(
"Must specify callable function for custom variogram model."
)
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if (
anisotropy_scaling_y != self.anisotropy_scaling_y
or anisotropy_scaling_z != self.anisotropy_scaling_z
or anisotropy_angle_x != self.anisotropy_angle_x
or anisotropy_angle_y != self.anisotropy_angle_y
or anisotropy_angle_z != self.anisotropy_angle_z
):
if self.verbose:
print("Adjusting data for anisotropy...")
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = _adjust_for_anisotropy(
np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[
self.anisotropy_angle_x,
self.anisotropy_angle_y,
self.anisotropy_angle_z,
],
).T
if self.verbose:
print("Updating variogram mode...")
vp_temp = _make_variogram_parameter_list(
self.variogram_model, variogram_parameters
)
(
self.lags,
self.semivariance,
self.variogram_model_parameters,
) = _initialize_variogram_model(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_model,
vp_temp,
self.variogram_function,
nlags,
weight,
"euclidean",
)
if self.verbose:
if self.variogram_model == "linear":
print("Using '%s' Variogram Model" % "linear")
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], "\n")
elif self.variogram_model == "power":
print("Using '%s' Variogram Model" % "power")
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
elif self.variogram_model == "custom":
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print(
"Full Sill:",
self.variogram_model_parameters[0]
+ self.variogram_model_parameters[2],
)
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = _find_statistics(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_function,
self.variogram_model_parameters,
"euclidean",
self.pseudo_inv,
)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, "\n")
def display_variogram_model(self):
"""Displays semivariogram and variogram model."""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, "r*")
ax.plot(
self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags),
"k-",
)
plt.show()
def switch_verbose(self):
"""Enables/disables program text output. No arguments."""
self.verbose = not self.verbose
def switch_plotting(self):
"""Enables/disable variogram plot display. No arguments."""
self.enable_plotting = not self.enable_plotting
def get_epsilon_residuals(self):
"""Returns the epsilon residuals for the variogram fit. No arguments."""
return self.epsilon
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit. No arguments."""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c="k", marker="*")
ax.axhline(y=0.0)
plt.show()
def get_statistics(self):
"""Returns the Q1, Q2, and cR statistics for the
variogram fit (in that order). No arguments.
"""
return self.Q1, self.Q2, self.cR
def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
def _get_kriging_matrix(self, n, n_withdrifts):
"""Assembles the kriging matrix."""
xyz = np.concatenate(
(
self.X_ADJUSTED[:, np.newaxis],
self.Y_ADJUSTED[:, np.newaxis],
self.Z_ADJUSTED[:, np.newaxis],
),
axis=1,
)
d = cdist(xyz, xyz, "euclidean")
if self.UNBIAS:
a = np.zeros((n_withdrifts + 1, n_withdrifts + 1))
else:
a = np.zeros((n_withdrifts, n_withdrifts))
a[:n, :n] = -self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
i = n
if self.regional_linear_drift:
a[:n, i] = self.X_ADJUSTED
a[i, :n] = self.X_ADJUSTED
i += 1
a[:n, i] = self.Y_ADJUSTED
a[i, :n] = self.Y_ADJUSTED
i += 1
a[:n, i] = self.Z_ADJUSTED
a[i, :n] = self.Z_ADJUSTED
i += 1
if self.specified_drift:
for arr in self.specified_drift_data_arrays:
a[:n, i] = arr
a[i, :n] = arr
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
a[:n, i] = func(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)
a[i, :n] = func(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)
i += 1
if i != n_withdrifts:
warnings.warn(
"Error in creating kriging matrix. Kriging may fail.", RuntimeWarning
)
if self.UNBIAS:
a[n_withdrifts, :n] = 1.0
a[:n, n_withdrifts] = 1.0
a[n : n_withdrifts + 1, n : n_withdrifts + 1] = 0.0
return a
def _exec_vector(self, a, bd, xyz, mask, n_withdrifts, spec_drift_grids):
"""Solves the kriging system as a vectorized operation. This method
can take a lot of memory for large grids and/or large datasets."""
npt = bd.shape[0]
n = self.X_ADJUSTED.shape[0]
zero_index = None
zero_value = False
# use the desired method to invert the kriging matrix
if self.pseudo_inv:
a_inv = P_INV[self.pseudo_inv_type](a)
else:
a_inv = scipy.linalg.inv(a)
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
if self.UNBIAS:
b = np.zeros((npt, n_withdrifts + 1, 1))
else:
b = np.zeros((npt, n_withdrifts, 1))
b[:, :n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], zero_index[1], 0] = 0.0
i = n
if self.regional_linear_drift:
b[:, i, 0] = xyz[:, 2]
i += 1
b[:, i, 0] = xyz[:, 1]
i += 1
b[:, i, 0] = xyz[:, 0]
i += 1
if self.specified_drift:
for spec_vals in spec_drift_grids:
b[:, i, 0] = spec_vals.flatten()
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
b[:, i, 0] = func(xyz[:, 2], xyz[:, 1], xyz[:, 0])
i += 1
if i != n_withdrifts:
warnings.warn(
"Error in setting up kriging system. Kriging may fail.",
RuntimeWarning,
)
if self.UNBIAS:
b[:, n_withdrifts, 0] = 1.0
if (~mask).any():
mask_b = np.repeat(
mask[:, np.newaxis, np.newaxis], n_withdrifts + 1, axis=1
)
b = np.ma.array(b, mask=mask_b)
if self.UNBIAS:
x = (
np.dot(a_inv, b.reshape((npt, n_withdrifts + 1)).T)
.reshape((1, n_withdrifts + 1, npt))
.T
)
else:
x = (
np.dot(a_inv, b.reshape((npt, n_withdrifts)).T)
.reshape((1, n_withdrifts, npt))
.T
)
kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1)
sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)
return kvalues, sigmasq
def _exec_loop(self, a, bd_all, xyz, mask, n_withdrifts, spec_drift_grids):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
# use the desired method to invert the kriging matrix
if self.pseudo_inv:
a_inv = P_INV[self.pseudo_inv_type](a)
else:
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[
0
]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
if self.UNBIAS:
b = np.zeros((n_withdrifts + 1, 1))
else:
b = np.zeros((n_withdrifts, 1))
b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], 0] = 0.0
i = n
if self.regional_linear_drift:
b[i, 0] = xyz[j, 2]
i += 1
b[i, 0] = xyz[j, 1]
i += 1
b[i, 0] = xyz[j, 0]
i += 1
if self.specified_drift:
for spec_vals in spec_drift_grids:
b[i, 0] = spec_vals.flatten()[i]
i += 1
if self.functional_drift:
for func in self.functional_drift_terms:
b[i, 0] = func(xyz[j, 2], xyz[j, 1], xyz[j, 0])
i += 1
if i != n_withdrifts:
warnings.warn(
"Error in setting up kriging system. Kriging may fail.",
RuntimeWarning,
)
if self.UNBIAS:
b[n_withdrifts, 0] = 1.0
x = np.dot(a_inv, b)
kvalues[j] = np.sum(x[:n, 0] * self.VALUES)
sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])
return kvalues, sigmasq
def execute(
self,
style,
xpoints,
ypoints,
zpoints,
mask=None,
backend="vectorized",
specified_drift_arrays=None,
):
"""Calculates a kriged grid and the associated variance.
This is now the method that performs the main kriging calculation.
Note that currently measurements (i.e., z values) are
considered 'exact'. This means that, when a specified coordinate for
interpolation is exactly the same as one of the data points,
the variogram evaluated at the point is forced to be zero. Also, the
diagonal of the kriging matrix is also always forced to be zero.
In forcing the variogram evaluated at data points to be zero, we are
effectively saying that there is no variance at that point
(no uncertainty, so the value is 'exact').
In the future, the code may include an extra 'exact_values' boolean
flag that can be adjusted to specify whether to treat the measurements
as 'exact'. Setting the flag to false would indicate that the variogram
should not be forced to be zero at zero distance (i.e., when evaluated
at data points). Instead, the uncertainty in the point will be equal
to the nugget. This would mean that the diagonal of the kriging matrix
would be set to the nugget instead of to zero.
Parameters
----------
style : str
Specifies how to treat input kriging points. Specifying 'grid'
treats xpoints, ypoints, and zpoints as arrays of x, y, and z
coordinates that define a rectangular grid. Specifying 'points'
treats xpoints, ypoints, and zpoints as arrays that provide
coordinates at which to solve the kriging system. Specifying
'masked' treats xpoints, ypoints, and zpoints as arrays of x, y,
and z coordinates that define a rectangular grid and uses mask
to only evaluate specific points in the grid.
xpoints : array_like, shape (N,) or (N, 1)
If style is specific as 'grid' or 'masked', x-coordinates of
LxMxN grid. If style is specified as 'points', x-coordinates of
specific points at which to solve kriging system.
ypoints : array_like, shape (M,) or (M, 1)
If style is specified as 'grid' or 'masked', y-coordinates of
LxMxN grid. If style is specified as 'points', y-coordinates of
specific points at which to solve kriging system. Note that in this
case, xpoints, ypoints, and zpoints must have the same dimensions
(i.e., L = M = N).
zpoints : array_like, shape (L,) or (L, 1)
If style is specified as 'grid' or 'masked', z-coordinates of
LxMxN grid. If style is specified as 'points', z-coordinates of
specific points at which to solve kriging system. Note that in this
case, xpoints, ypoints, and zpoints must have the same dimensions
(i.e., L = M = N).
mask : boolean array, shape (L, M, N), optional
Specifies the points in the rectangular grid defined by xpoints,
ypoints, zpoints that are to be excluded in the kriging
calculations. Must be provided if style is specified as 'masked'.
False indicates that the point should not be masked, so the kriging
system will be solved at the point.
True indicates that the point should be masked, so the kriging
system will not be solved at the point.
backend : string, optional
Specifies which approach to use in kriging. Specifying 'vectorized'
will solve the entire kriging problem at once in a vectorized
operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging
system is to be solved. This approach is slower but also less
memory-intensive. Default is 'vectorized'.
specified_drift_arrays : list of array-like objects, optional
Specifies the drift values at the points at which the kriging
system is to be evaluated. Required if 'specified' drift provided
in the list of drift terms when instantiating the UniversalKriging3D
class. Must be a list of arrays in the same order as the list
provided when instantiating the kriging object. Array(s) must be
the same dimension as the specified grid or have the same number
of points as the specified points; i.e., the arrays either must be
shape (L, M, N), where L is the number of z grid-points,
M is the number of y grid-points, and N is the number of
x grid-points, or shape (N,) or (N, 1), where N is the number of
points at which to evaluate the kriging system.
Returns
-------
kvalues : ndarray, shape (L, M, N) or (N,) or (N, 1)
Interpolated values of specified grid or at the specified set
of points. If style was specified as 'masked', kvalues will be a
numpy masked array.
sigmasq : ndarray, shape (L, M, N) or (N,) or (N, 1)
Variance at specified grid points or at the specified set of points.
If style was specified as 'masked', sigmasq will be a numpy
masked array.
"""
if self.verbose:
print("Executing Ordinary Kriging...\n")
if style != "grid" and style != "masked" and style != "points":
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True)))
ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True)))
zpts = np.atleast_1d(np.squeeze(np.array(zpoints, copy=True)))
n = self.X_ADJUSTED.shape[0]
n_withdrifts = n
if self.regional_linear_drift:
n_withdrifts += 3
if self.specified_drift:
n_withdrifts += len(self.specified_drift_data_arrays)
if self.functional_drift:
n_withdrifts += len(self.functional_drift_terms)
nx = xpts.size
ny = ypts.size
nz = zpts.size
a = self._get_kriging_matrix(n, n_withdrifts)
if style in ["grid", "masked"]:
if style == "masked":
if mask is None:
raise IOError(
"Must specify boolean masking array when style is 'masked'."
)
if mask.ndim != 3:
raise ValueError("Mask is not three-dimensional.")
if mask.shape[0] != nz or mask.shape[1] != ny or mask.shape[2] != nx:
if (
mask.shape[0] == nx
and mask.shape[2] == nz
and mask.shape[1] == ny
):
mask = mask.swapaxes(0, 2)
else:
raise ValueError(
"Mask dimensions do not match specified grid dimensions."
)
mask = mask.flatten()
npt = nz * ny * nx
grid_z, grid_y, grid_x = np.meshgrid(zpts, ypts, xpts, indexing="ij")
xpts = grid_x.flatten()
ypts = grid_y.flatten()
zpts = grid_z.flatten()
elif style == "points":
if xpts.size != ypts.size and ypts.size != zpts.size:
raise ValueError(
"xpoints and ypoints must have same "
"dimensions when treated as listing "
"discrete points."
)
npt = nx
else:
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
if specified_drift_arrays is None:
specified_drift_arrays = []
spec_drift_grids = []
if self.specified_drift:
if len(specified_drift_arrays) == 0:
raise ValueError(
"Must provide drift values for kriging "
"points when using 'specified' drift "
"capability."
)
if type(specified_drift_arrays) is not list:
raise TypeError(
"Arrays for specified drift terms must "
"be encapsulated in a list."
)
for spec in specified_drift_arrays:
if style in ["grid", "masked"]:
if spec.ndim < 3:
raise ValueError(
"Dimensions of drift values array do "
"not match specified grid dimensions."
)
elif (
spec.shape[0] != nz
or spec.shape[1] != ny
or spec.shape[2] != nx
):
if (
spec.shape[0] == nx
and spec.shape[2] == nz
and spec.shape[1] == ny
):
spec_drift_grids.append(np.squeeze(spec.swapaxes(0, 2)))
else:
raise ValueError(
"Dimensions of drift values array "
"do not match specified grid "
"dimensions."
)
else:
spec_drift_grids.append(np.squeeze(spec))
elif style == "points":
if spec.ndim != 1:
raise ValueError(
"Dimensions of drift values array do "
"not match specified grid dimensions."
)
elif spec.shape[0] != xpts.size:
raise ValueError(
"Number of supplied drift values in "
"array do not match specified number "
"of kriging points."
)
else:
spec_drift_grids.append(np.squeeze(spec))
if len(spec_drift_grids) != len(self.specified_drift_data_arrays):
raise ValueError(
"Inconsistent number of specified drift terms supplied."
)
else:
if len(specified_drift_arrays) != 0:
warnings.warn(
"Provided specified drift values, but "
"'specified' drift was not initialized during "
"instantiation of UniversalKriging3D class.",
RuntimeWarning,
)
xpts, ypts, zpts = _adjust_for_anisotropy(
np.vstack((xpts, ypts, zpts)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z],
).T
if style != "masked":
mask = np.zeros(npt, dtype="bool")
xyz_points = np.concatenate(
(zpts[:, np.newaxis], ypts[:, np.newaxis], xpts[:, np.newaxis]), axis=1
)
xyz_data = np.concatenate(
(
self.Z_ADJUSTED[:, np.newaxis],
self.Y_ADJUSTED[:, np.newaxis],
self.X_ADJUSTED[:, np.newaxis],
),
axis=1,
)
bd = cdist(xyz_points, xyz_data, "euclidean")
if backend == "vectorized":
kvalues, sigmasq = self._exec_vector(
a, bd, xyz_points, mask, n_withdrifts, spec_drift_grids
)
elif backend == "loop":
kvalues, sigmasq = self._exec_loop(
a, bd, xyz_points, mask, n_withdrifts, spec_drift_grids
)
else:
raise ValueError(
"Specified backend {} is not supported for "
"3D ordinary kriging.".format(backend)
)
if style == "masked":
kvalues = np.ma.array(kvalues, mask=mask)
sigmasq = np.ma.array(sigmasq, mask=mask)
if style in ["masked", "grid"]:
kvalues = kvalues.reshape((nz, ny, nx))
sigmasq = sigmasq.reshape((nz, ny, nx))
return kvalues, sigmasq
| 49,151 | 41.852659 | 88 | py |
PyKrige | PyKrige-main/src/pykrige/ok3d.py | # coding: utf-8
"""
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
[email protected]
Summary
-------
Contains class OrdinaryKriging3D.
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
.. [2] N. Cressie, Statistics for spatial data,
(Wiley Series in Probability and Statistics, 1993) 137 p.
Copyright (c) 2015-2020, PyKrige Developers
"""
import warnings
import numpy as np
import scipy.linalg
from scipy.spatial.distance import cdist
from . import core, variogram_models
from .compat_gstools import validate_gstools
from .core import (
P_INV,
_adjust_for_anisotropy,
_find_statistics,
_initialize_variogram_model,
_make_variogram_parameter_list,
)
class OrdinaryKriging3D:
"""Three-dimensional ordinary kriging.
Parameters
----------
x : array_like
X-coordinates of data points.
y : array_like
Y-coordinates of data points.
z : array_like
Z-coordinates of data points.
val : array_like
Values at data points.
variogram_model : str or GSTools CovModel, optional
Specified which variogram model to use; may be one of the following:
linear, power, gaussian, spherical, exponential, hole-effect.
Default is linear variogram model. To utilize a custom variogram model,
specify 'custom'; you must also provide variogram_parameters and
variogram_function. Note that the hole-effect model is only technically
correct for one-dimensional problems.
You can also use a
`GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel.
variogram_parameters : list or dict, optional
Parameters that define the specified variogram model. If not provided,
parameters will be automatically calculated using a "soft" L1 norm
minimization scheme. For variogram model parameters provided in a dict,
the required dict keys vary according to the specified variogram
model: ::
# linear
{'slope': slope, 'nugget': nugget}
# power
{'scale': scale, 'exponent': exponent, 'nugget': nugget}
# gaussian, spherical, exponential and hole-effect:
{'sill': s, 'range': r, 'nugget': n}
# OR
{'psill': p, 'range': r, 'nugget': n}
Note that either the full sill or the partial sill
(psill = sill - nugget) can be specified in the dict.
For variogram model parameters provided in a list, the entries
must be as follows: ::
# linear
[slope, nugget]
# power
[scale, exponent, nugget]
# gaussian, spherical, exponential and hole-effect:
[sill, range, nugget]
Note that the full sill (NOT the partial sill) must be specified
in the list format.
For a custom variogram model, the parameters are required, as custom
variogram models will not automatically be fit to the data.
Furthermore, the parameters must be specified in list format, in the
order in which they are used in the callable function (see
variogram_function for more information). The code does not check
that the provided list contains the appropriate number of parameters
for the custom variogram model, so an incorrect parameter list in
such a case will probably trigger an esoteric exception someplace
deep in the code.
NOTE that, while the list format expects the full sill, the code
itself works internally with the partial sill.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. The function must take only two arguments:
first, a list of parameters for the variogram model;
second, the distances at which to calculate the variogram model.
The list provided in variogram_parameters will be passed to the
function as the first argument.
nlags : int, optional
Number of averaging bins for the semivariogram. Default is 6.
weight : boolean, optional
Flag that specifies if semivariance at smaller lags should be weighted
more heavily when automatically calculating variogram model.
The routine is currently hard-coded such that the weights are
calculated from a logistic function, so weights at small lags are ~1
and weights at the longest lags are ~0; the center of the logistic
weighting is hard-coded to be at 70% of the distance from the shortest
lag to the largest lag. Setting this parameter to True indicates that
weights will be applied. Default is False.
(Kitanidis suggests that the values at smaller lags are more
important in fitting a variogram model, so the option is provided
to enable such weighting.)
anisotropy_scaling_y : float, optional
Scalar stretching value to take into account anisotropy
in the y direction. Default is 1 (effectively no stretching).
Scaling is applied in the y direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z,
if anisotropy_angle_x/y/z is/are not 0).
anisotropy_scaling_z : float, optional
Scalar stretching value to take into account anisotropy
in the z direction. Default is 1 (effectively no stretching).
Scaling is applied in the z direction in the rotated data frame
(i.e., after adjusting for the anisotropy_angle_x/y/z,
if anisotropy_angle_x/y/z is/are not 0).
anisotropy_angle_x : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the x axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
anisotropy_angle_y : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the y axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
anisotropy_angle_z : float, optional
CCW angle (in degrees) by which to rotate coordinate system about
the z axis in order to take into account anisotropy.
Default is 0 (no rotation). Note that the coordinate system is rotated.
X rotation is applied first, then y rotation, then z rotation.
Scaling is applied after rotation.
verbose : bool, optional
Enables program text output to monitor kriging process.
Default is False (off).
enable_plotting : bool, optional
Enables plotting to display variogram. Default is False (off).
exact_values : bool, optional
If True, interpolation provides input values at input locations.
If False, interpolation accounts for variance/nugget within input
values at input locations and does not behave as an
exact-interpolator [2]. Note that this only has an effect if
there is variance/nugget present within the input data since it is
interpreted as measurement error. If the nugget is zero, the kriged
field will behave as an exact interpolator.
pseudo_inv : :class:`bool`, optional
Whether the kriging system is solved with the pseudo inverted
kriging matrix. If `True`, this leads to more numerical stability
and redundant points are averaged. But it can take more time.
Default: False
pseudo_inv_type : :class:`str`, optional
Here you can select the algorithm to compute the pseudo-inverse matrix:
* `"pinv"`: use `pinv` from `scipy` which uses `lstsq`
* `"pinvh"`: use `pinvh` from `scipy` which uses eigen-values
Default: `"pinv"`
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
.. [2] N. Cressie, Statistics for spatial data,
(Wiley Series in Probability and Statistics, 1993) 137 p.
"""
eps = 1.0e-10 # Cutoff for comparison to zero
variogram_dict = {
"linear": variogram_models.linear_variogram_model,
"power": variogram_models.power_variogram_model,
"gaussian": variogram_models.gaussian_variogram_model,
"spherical": variogram_models.spherical_variogram_model,
"exponential": variogram_models.exponential_variogram_model,
"hole-effect": variogram_models.hole_effect_variogram_model,
}
def __init__(
self,
x,
y,
z,
val,
variogram_model="linear",
variogram_parameters=None,
variogram_function=None,
nlags=6,
weight=False,
anisotropy_scaling_y=1.0,
anisotropy_scaling_z=1.0,
anisotropy_angle_x=0.0,
anisotropy_angle_y=0.0,
anisotropy_angle_z=0.0,
verbose=False,
enable_plotting=False,
exact_values=True,
pseudo_inv=False,
pseudo_inv_type="pinv",
):
# config the pseudo inverse
self.pseudo_inv = bool(pseudo_inv)
self.pseudo_inv_type = str(pseudo_inv_type)
if self.pseudo_inv_type not in P_INV:
raise ValueError("pseudo inv type not valid: " + str(pseudo_inv_type))
# set up variogram model and parameters...
self.variogram_model = variogram_model
self.model = None
if not isinstance(exact_values, bool):
raise ValueError("exact_values has to be boolean True or False")
self.exact_values = exact_values
# check if a GSTools covariance model is given
if hasattr(self.variogram_model, "pykrige_kwargs"):
# save the model in the class
self.model = self.variogram_model
validate_gstools(self.model)
if self.model.field_dim < 3:
raise ValueError("GSTools: model dim is not 3")
self.variogram_model = "custom"
variogram_function = self.model.pykrige_vario
variogram_parameters = []
anisotropy_scaling_y = self.model.pykrige_anis_y
anisotropy_scaling_z = self.model.pykrige_anis_z
anisotropy_angle_x = self.model.pykrige_angle_x
anisotropy_angle_y = self.model.pykrige_angle_y
anisotropy_angle_z = self.model.pykrige_angle_z
if (
self.variogram_model not in self.variogram_dict.keys()
and self.variogram_model != "custom"
):
raise ValueError(
"Specified variogram model '%s' is not supported." % variogram_model
)
elif self.variogram_model == "custom":
if variogram_function is None or not callable(variogram_function):
raise ValueError(
"Must specify callable function for custom variogram model."
)
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
# Code assumes 1D input arrays. Ensures that any extraneous dimensions
# don't get in the way. Copies are created to avoid any problems with
# referencing the original passed arguments.
self.X_ORIG = np.atleast_1d(
np.squeeze(np.array(x, copy=True, dtype=np.float64))
)
self.Y_ORIG = np.atleast_1d(
np.squeeze(np.array(y, copy=True, dtype=np.float64))
)
self.Z_ORIG = np.atleast_1d(
np.squeeze(np.array(z, copy=True, dtype=np.float64))
)
self.VALUES = np.atleast_1d(
np.squeeze(np.array(val, copy=True, dtype=np.float64))
)
self.verbose = verbose
self.enable_plotting = enable_plotting
if self.enable_plotting and self.verbose:
print("Plotting Enabled\n")
self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG)) / 2.0
self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG)) / 2.0
self.ZCENTER = (np.amax(self.Z_ORIG) + np.amin(self.Z_ORIG)) / 2.0
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
if self.verbose:
print("Adjusting data for anisotropy...")
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = _adjust_for_anisotropy(
np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z],
).T
if self.verbose:
print("Initializing variogram model...")
vp_temp = _make_variogram_parameter_list(
self.variogram_model, variogram_parameters
)
(
self.lags,
self.semivariance,
self.variogram_model_parameters,
) = _initialize_variogram_model(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_model,
vp_temp,
self.variogram_function,
nlags,
weight,
"euclidean",
)
if self.verbose:
if self.variogram_model == "linear":
print("Using '%s' Variogram Model" % "linear")
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], "\n")
elif self.variogram_model == "power":
print("Using '%s' Variogram Model" % "power")
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
elif self.variogram_model == "custom":
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print(
"Full Sill:",
self.variogram_model_parameters[0]
+ self.variogram_model_parameters[2],
)
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = _find_statistics(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_function,
self.variogram_model_parameters,
"euclidean",
self.pseudo_inv,
)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, "\n")
def update_variogram_model(
self,
variogram_model,
variogram_parameters=None,
variogram_function=None,
nlags=6,
weight=False,
anisotropy_scaling_y=1.0,
anisotropy_scaling_z=1.0,
anisotropy_angle_x=0.0,
anisotropy_angle_y=0.0,
anisotropy_angle_z=0.0,
):
"""Changes the variogram model and variogram parameters for
the kriging system.
Parameters
----------
variogram_model : str or GSTools CovModel
May be any of the variogram models listed above.
May also be 'custom', in which case variogram_parameters and
variogram_function must be specified.
You can also use a
`GSTools <https://github.com/GeoStat-Framework/GSTools>`_ CovModel.
variogram_parameters : list or dict, optional
List or dict of variogram model parameters, as explained above.
If not provided, a best fit model will be calculated as
described above.
variogram_function : callable, optional
A callable function that must be provided if variogram_model is
specified as 'custom'. See above for more information.
nlags : int, optional
Number of averaging bins for the semivariogram. Default is 6.
weight : bool, optional
Flag that specifies if semivariance at smaller lags should be
weighted more heavily when automatically calculating
variogram model. See above for more information. True indicates
that weights will be applied. Default is False.
anisotropy_scaling_y : float, optional
Scalar stretching value to take into account anisotropy
in y-direction. Default is 1 (effectively no stretching).
See above for more information.
anisotropy_scaling_z : float, optional
Scalar stretching value to take into account anisotropy
in z-direction. Default is 1 (effectively no stretching).
See above for more information.
anisotropy_angle_x : float, optional
Angle (in degrees) by which to rotate coordinate system about
the x axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
anisotropy_angle_y : float, optional
Angle (in degrees) by which to rotate coordinate system about
the y axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
anisotropy_angle_z : float, optional
Angle (in degrees) by which to rotate coordinate system about
the z axis in order to take into account anisotropy.
Default is 0 (no rotation). See above for more information.
"""
# set up variogram model and parameters...
self.variogram_model = variogram_model
self.model = None
# check if a GSTools covariance model is given
if hasattr(self.variogram_model, "pykrige_kwargs"):
# save the model in the class
self.model = self.variogram_model
validate_gstools(self.model)
if self.model.field_dim < 3:
raise ValueError("GSTools: model dim is not 3")
self.variogram_model = "custom"
variogram_function = self.model.pykrige_vario
variogram_parameters = []
anisotropy_scaling_y = self.model.pykrige_anis_y
anisotropy_scaling_z = self.model.pykrige_anis_z
anisotropy_angle_x = self.model.pykrige_angle_x
anisotropy_angle_y = self.model.pykrige_angle_y
anisotropy_angle_z = self.model.pykrige_angle_z
if (
self.variogram_model not in self.variogram_dict.keys()
and self.variogram_model != "custom"
):
raise ValueError(
"Specified variogram model '%s' is not supported." % variogram_model
)
elif self.variogram_model == "custom":
if variogram_function is None or not callable(variogram_function):
raise ValueError(
"Must specify callable function for custom variogram model."
)
else:
self.variogram_function = variogram_function
else:
self.variogram_function = self.variogram_dict[self.variogram_model]
if (
anisotropy_scaling_y != self.anisotropy_scaling_y
or anisotropy_scaling_z != self.anisotropy_scaling_z
or anisotropy_angle_x != self.anisotropy_angle_x
or anisotropy_angle_y != self.anisotropy_angle_y
or anisotropy_angle_z != self.anisotropy_angle_z
):
if self.verbose:
print("Adjusting data for anisotropy...")
self.anisotropy_scaling_y = anisotropy_scaling_y
self.anisotropy_scaling_z = anisotropy_scaling_z
self.anisotropy_angle_x = anisotropy_angle_x
self.anisotropy_angle_y = anisotropy_angle_y
self.anisotropy_angle_z = anisotropy_angle_z
self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = _adjust_for_anisotropy(
np.vstack((self.X_ORIG, self.Y_ORIG, self.Z_ORIG)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[
self.anisotropy_angle_x,
self.anisotropy_angle_y,
self.anisotropy_angle_z,
],
).T
if self.verbose:
print("Updating variogram mode...")
vp_temp = _make_variogram_parameter_list(
self.variogram_model, variogram_parameters
)
(
self.lags,
self.semivariance,
self.variogram_model_parameters,
) = _initialize_variogram_model(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_model,
vp_temp,
self.variogram_function,
nlags,
weight,
"euclidean",
)
if self.verbose:
if self.variogram_model == "linear":
print("Using '%s' Variogram Model" % "linear")
print("Slope:", self.variogram_model_parameters[0])
print("Nugget:", self.variogram_model_parameters[1], "\n")
elif self.variogram_model == "power":
print("Using '%s' Variogram Model" % "power")
print("Scale:", self.variogram_model_parameters[0])
print("Exponent:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
elif self.variogram_model == "custom":
print("Using Custom Variogram Model")
else:
print("Using '%s' Variogram Model" % self.variogram_model)
print("Partial Sill:", self.variogram_model_parameters[0])
print(
"Full Sill:",
self.variogram_model_parameters[0]
+ self.variogram_model_parameters[2],
)
print("Range:", self.variogram_model_parameters[1])
print("Nugget:", self.variogram_model_parameters[2], "\n")
if self.enable_plotting:
self.display_variogram_model()
if self.verbose:
print("Calculating statistics on variogram model fit...")
self.delta, self.sigma, self.epsilon = _find_statistics(
np.vstack((self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED)).T,
self.VALUES,
self.variogram_function,
self.variogram_model_parameters,
"euclidean",
self.pseudo_inv,
)
self.Q1 = core.calcQ1(self.epsilon)
self.Q2 = core.calcQ2(self.epsilon)
self.cR = core.calc_cR(self.Q2, self.sigma)
if self.verbose:
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR, "\n")
def display_variogram_model(self):
"""Displays variogram model with the actual binned data."""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.lags, self.semivariance, "r*")
ax.plot(
self.lags,
self.variogram_function(self.variogram_model_parameters, self.lags),
"k-",
)
plt.show()
def switch_verbose(self):
"""Allows user to switch code talk-back on/off. Takes no arguments."""
self.verbose = not self.verbose
def switch_plotting(self):
"""Allows user to switch plot display on/off. Takes no arguments."""
self.enable_plotting = not self.enable_plotting
def get_epsilon_residuals(self):
"""Returns the epsilon residuals for the variogram fit."""
return self.epsilon
def plot_epsilon_residuals(self):
"""Plots the epsilon residuals for the variogram fit."""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(range(self.epsilon.size), self.epsilon, c="k", marker="*")
ax.axhline(y=0.0)
plt.show()
def get_statistics(self):
"""Returns the Q1, Q2, and cR statistics for the
variogram fit (in that order). No arguments.
"""
return self.Q1, self.Q2, self.cR
def print_statistics(self):
"""Prints out the Q1, Q2, and cR statistics for the variogram fit.
NOTE that ideally Q1 is close to zero, Q2 is close to 1,
and cR is as small as possible.
"""
print("Q1 =", self.Q1)
print("Q2 =", self.Q2)
print("cR =", self.cR)
def _get_kriging_matrix(self, n):
"""Assembles the kriging matrix."""
xyz = np.concatenate(
(
self.X_ADJUSTED[:, np.newaxis],
self.Y_ADJUSTED[:, np.newaxis],
self.Z_ADJUSTED[:, np.newaxis],
),
axis=1,
)
d = cdist(xyz, xyz, "euclidean")
a = np.zeros((n + 1, n + 1))
a[:n, :n] = -self.variogram_function(self.variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
return a
def _exec_vector(self, a, bd, mask):
"""Solves the kriging system as a vectorized operation. This method
can take a lot of memory for large grids and/or large datasets."""
npt = bd.shape[0]
n = self.X_ADJUSTED.shape[0]
zero_index = None
zero_value = False
# use the desired method to invert the kriging matrix
if self.pseudo_inv:
a_inv = P_INV[self.pseudo_inv_type](a)
else:
a_inv = scipy.linalg.inv(a)
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
b = np.zeros((npt, n + 1, 1))
b[:, :n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], zero_index[1], 0] = 0.0
b[:, n, 0] = 1.0
if (~mask).any():
mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n + 1, axis=1)
b = np.ma.array(b, mask=mask_b)
x = np.dot(a_inv, b.reshape((npt, n + 1)).T).reshape((1, n + 1, npt)).T
kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1)
sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)
return kvalues, sigmasq
def _exec_loop(self, a, bd_all, mask):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
# use the desired method to invert the kriging matrix
if self.pseudo_inv:
a_inv = P_INV[self.pseudo_inv_type](a)
else:
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[
0
]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
b = np.zeros((n + 1, 1))
b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = np.dot(a_inv, b)
kvalues[j] = np.sum(x[:n, 0] * self.VALUES)
sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])
return kvalues, sigmasq
def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx):
"""Solves the kriging system by looping over all specified points.
Uses only a certain number of closest points. Not very memory intensive,
but the loop is done in pure Python.
"""
import scipy.linalg.lapack
npt = bd_all.shape[0]
n = bd_idx.shape[1]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
for i in np.nonzero(~mask)[0]:
b_selector = bd_idx[i]
bd = bd_all[i]
a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1])))
a = a_all[a_selector[:, None], a_selector]
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
b = np.zeros((n + 1, 1))
b[:n, 0] = -self.variogram_function(self.variogram_model_parameters, bd)
if zero_value and self.exact_values:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = scipy.linalg.solve(a, b)
kvalues[i] = x[:n, 0].dot(self.VALUES[b_selector])
sigmasq[i] = -x[:, 0].dot(b[:, 0])
return kvalues, sigmasq
def execute(
self,
style,
xpoints,
ypoints,
zpoints,
mask=None,
backend="vectorized",
n_closest_points=None,
):
"""Calculates a kriged grid and the associated variance.
This is now the method that performs the main kriging calculation.
Note that currently measurements (i.e., z values) are
considered 'exact'. This means that, when a specified coordinate
for interpolation is exactly the same as one of the data points,
the variogram evaluated at the point is forced to be zero.
Also, the diagonal of the kriging matrix is also always forced
to be zero. In forcing the variogram evaluated at data points
to be zero, we are effectively saying that there is no variance
at that point (no uncertainty, so the value is 'exact').
In the future, the code may include an extra 'exact_values' boolean
flag that can be adjusted to specify whether to treat the
measurements as 'exact'. Setting the flag to false would indicate
that the variogram should not be forced to be zero at zero distance
(i.e., when evaluated at data points). Instead, the uncertainty in the
point will be equal to the nugget. This would mean that the diagonal
of the kriging matrix would be set to the nugget instead of to zero.
Parameters
----------
style : str
Specifies how to treat input kriging points.
Specifying 'grid' treats xpoints, ypoints, and zpoints as arrays of
x, y, and z coordinates that define a rectangular grid.
Specifying 'points' treats xpoints, ypoints, and zpoints as arrays
that provide coordinates at which to solve the kriging system.
Specifying 'masked' treats xpoints, ypoints, and zpoints as arrays
of x, y, and z coordinates that define a rectangular grid and uses
mask to only evaluate specific points in the grid.
xpoints : array_like, shape (N,) or (N, 1)
If style is specific as 'grid' or 'masked', x-coordinates of
LxMxN grid. If style is specified as 'points', x-coordinates of
specific points at which to solve kriging system.
ypoints : array-like, shape (M,) or (M, 1)
If style is specified as 'grid' or 'masked', y-coordinates of
LxMxN grid. If style is specified as 'points', y-coordinates of
specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
zpoints : array-like, shape (L,) or (L, 1)
If style is specified as 'grid' or 'masked', z-coordinates of
LxMxN grid. If style is specified as 'points', z-coordinates of
specific points at which to solve kriging system.
Note that in this case, xpoints, ypoints, and zpoints must have the
same dimensions (i.e., L = M = N).
mask : boolean array, shape (L, M, N), optional
Specifies the points in the rectangular grid defined by xpoints,
ypoints, zpoints that are to be excluded in the
kriging calculations. Must be provided if style is specified
as 'masked'. False indicates that the point should not be masked,
so the kriging system will be solved at the point.
True indicates that the point should be masked, so the kriging
system should will not be solved at the point.
backend : str, optional
Specifies which approach to use in kriging. Specifying 'vectorized'
will solve the entire kriging problem at once in a
vectorized operation. This approach is faster but also can consume a
significant amount of memory for large grids and/or large datasets.
Specifying 'loop' will loop through each point at which the kriging
system is to be solved. This approach is slower but also less
memory-intensive. Default is 'vectorized'.
n_closest_points : int, optional
For kriging with a moving window, specifies the number of nearby
points to use in the calculation. This can speed up the calculation
for large datasets, but should be used with caution.
As Kitanidis notes, kriging with a moving window can produce
unexpected oddities if the variogram model is not carefully chosen.
Returns
-------
kvalues : ndarray, shape (L, M, N) or (N, 1)
Interpolated values of specified grid or at the specified set
of points. If style was specified as 'masked', kvalues will be a
numpy masked array.
sigmasq : ndarray, shape (L, M, N) or (N, 1)
Variance at specified grid points or at the specified set of points.
If style was specified as 'masked', sigmasq will be a numpy
masked array.
"""
if self.verbose:
print("Executing Ordinary Kriging...\n")
if style != "grid" and style != "masked" and style != "points":
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True)))
ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True)))
zpts = np.atleast_1d(np.squeeze(np.array(zpoints, copy=True)))
n = self.X_ADJUSTED.shape[0]
nx = xpts.size
ny = ypts.size
nz = zpts.size
a = self._get_kriging_matrix(n)
if style in ["grid", "masked"]:
if style == "masked":
if mask is None:
raise IOError(
"Must specify boolean masking array when style is 'masked'."
)
if mask.ndim != 3:
raise ValueError("Mask is not three-dimensional.")
if mask.shape[0] != nz or mask.shape[1] != ny or mask.shape[2] != nx:
if (
mask.shape[0] == nx
and mask.shape[2] == nz
and mask.shape[1] == ny
):
mask = mask.swapaxes(0, 2)
else:
raise ValueError(
"Mask dimensions do not match specified grid dimensions."
)
mask = mask.flatten()
npt = nz * ny * nx
grid_z, grid_y, grid_x = np.meshgrid(zpts, ypts, xpts, indexing="ij")
xpts = grid_x.flatten()
ypts = grid_y.flatten()
zpts = grid_z.flatten()
elif style == "points":
if xpts.size != ypts.size and ypts.size != zpts.size:
raise ValueError(
"xpoints, ypoints, and zpoints must have "
"same dimensions when treated as listing "
"discrete points."
)
npt = nx
else:
raise ValueError("style argument must be 'grid', 'points', or 'masked'")
xpts, ypts, zpts = _adjust_for_anisotropy(
np.vstack((xpts, ypts, zpts)).T,
[self.XCENTER, self.YCENTER, self.ZCENTER],
[self.anisotropy_scaling_y, self.anisotropy_scaling_z],
[self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z],
).T
if style != "masked":
mask = np.zeros(npt, dtype="bool")
xyz_points = np.concatenate(
(zpts[:, np.newaxis], ypts[:, np.newaxis], xpts[:, np.newaxis]), axis=1
)
xyz_data = np.concatenate(
(
self.Z_ADJUSTED[:, np.newaxis],
self.Y_ADJUSTED[:, np.newaxis],
self.X_ADJUSTED[:, np.newaxis],
),
axis=1,
)
bd = cdist(xyz_points, xyz_data, "euclidean")
if n_closest_points is not None:
from scipy.spatial import cKDTree
tree = cKDTree(xyz_data)
bd, bd_idx = tree.query(xyz_points, k=n_closest_points, eps=0.0)
if backend == "loop":
kvalues, sigmasq = self._exec_loop_moving_window(a, bd, mask, bd_idx)
else:
raise ValueError(
"Specified backend '{}' not supported "
"for moving window.".format(backend)
)
else:
if backend == "vectorized":
kvalues, sigmasq = self._exec_vector(a, bd, mask)
elif backend == "loop":
kvalues, sigmasq = self._exec_loop(a, bd, mask)
else:
raise ValueError(
"Specified backend {} is not supported for "
"3D ordinary kriging.".format(backend)
)
if style == "masked":
kvalues = np.ma.array(kvalues, mask=mask)
sigmasq = np.ma.array(sigmasq, mask=mask)
if style in ["masked", "grid"]:
kvalues = kvalues.reshape((nz, ny, nx))
sigmasq = sigmasq.reshape((nz, ny, nx))
return kvalues, sigmasq
| 39,816 | 41.676313 | 88 | py |
PyKrige | PyKrige-main/src/pykrige/rk.py | # coding: utf-8
"""Regression Kriging."""
from pykrige.compat import Krige, check_sklearn_model, validate_sklearn
validate_sklearn()
from sklearn.metrics import r2_score
from sklearn.svm import SVR
class RegressionKriging:
"""
An implementation of Regression-Kriging.
As described here:
https://en.wikipedia.org/wiki/Regression-Kriging
Parameters
----------
regression_model: machine learning model instance from sklearn
method: str, optional
type of kriging to be performed
variogram_model: str, optional
variogram model to be used during Kriging
n_closest_points: int
number of closest points to be used during Ordinary Kriging
nlags: int
see OK/UK class description
weight: bool
see OK/UK class description
verbose: bool
see OK/UK class description
exact_values : bool
see OK/UK class description
variogram_parameters : list or dict
see OK/UK class description
variogram_function : callable
see OK/UK class description
anisotropy_scaling : tuple
single value for 2D (UK/OK) and two values in 3D (UK3D/OK3D)
anisotropy_angle : tuple
single value for 2D (UK/OK) and three values in 3D (UK3D/OK3D)
enable_statistics : bool
see OK class description
coordinates_type : str
see OK/UK class description
drift_terms : list of strings
see UK/UK3D class description
point_drift : array_like
see UK class description
ext_drift_grid : tuple
Holding the three values external_drift, external_drift_x and
external_drift_z for the UK class
functional_drift : list of callable
see UK/UK3D class description
"""
def __init__(
self,
regression_model=SVR(),
method="ordinary",
variogram_model="linear",
n_closest_points=10,
nlags=6,
weight=False,
verbose=False,
exact_values=True,
pseudo_inv=False,
pseudo_inv_type="pinv",
variogram_parameters=None,
variogram_function=None,
anisotropy_scaling=(1.0, 1.0),
anisotropy_angle=(0.0, 0.0, 0.0),
enable_statistics=False,
coordinates_type="euclidean",
drift_terms=None,
point_drift=None,
ext_drift_grid=(None, None, None),
functional_drift=None,
):
check_sklearn_model(regression_model)
self.regression_model = regression_model
self.n_closest_points = n_closest_points
self.krige = Krige(
method=method,
variogram_model=variogram_model,
nlags=nlags,
weight=weight,
n_closest_points=n_closest_points,
verbose=verbose,
exact_values=exact_values,
pseudo_inv=pseudo_inv,
pseudo_inv_type=pseudo_inv_type,
variogram_parameters=variogram_parameters,
variogram_function=variogram_function,
anisotropy_scaling=anisotropy_scaling,
anisotropy_angle=anisotropy_angle,
enable_statistics=enable_statistics,
coordinates_type=coordinates_type,
drift_terms=drift_terms,
point_drift=point_drift,
ext_drift_grid=ext_drift_grid,
functional_drift=functional_drift,
)
def fit(self, p, x, y):
"""
Fit the regression method and also Krige the residual.
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for regression
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example 2d regression kriging.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
y: ndarray
array of targets (Ns, )
"""
self.regression_model.fit(p, y)
ml_pred = self.regression_model.predict(p)
print("Finished learning regression model")
# residual=y-ml_pred
self.krige.fit(x=x, y=y - ml_pred)
print("Finished kriging residuals")
def predict(self, p, x, **kwargs):
"""
Predict.
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for regression
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
Returns
-------
pred: ndarray
The expected value of ys for the query inputs, of shape (Ns,).
"""
return self.krige_residual(x, **kwargs) + self.regression_model.predict(p)
def krige_residual(self, x, **kwargs):
"""
Calculate the residuals.
Parameters
----------
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example.
Returns
-------
residual: ndarray
kriged residual values
"""
return self.krige.predict(x, **kwargs)
def score(self, p, x, y, sample_weight=None, **kwargs):
"""
Overloading default regression score method.
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for regression
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
y: ndarray
array of targets (Ns, )
"""
return r2_score(
y_pred=self.predict(p, x, **kwargs), y_true=y, sample_weight=sample_weight
)
| 5,982 | 30.994652 | 86 | py |
PyKrige | PyKrige-main/src/pykrige/variogram_models.py | # coding: utf-8
"""
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
[email protected]
Summary
-------
Function definitions for variogram models. In each function, m is a list of
defining parameters and d is an array of the distance values at which to
calculate the variogram model.
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
Copyright (c) 2015-2020, PyKrige Developers
"""
import numpy as np
def linear_variogram_model(m, d):
"""Linear model, m is [slope, nugget]"""
slope = float(m[0])
nugget = float(m[1])
return slope * d + nugget
def power_variogram_model(m, d):
"""Power model, m is [scale, exponent, nugget]"""
scale = float(m[0])
exponent = float(m[1])
nugget = float(m[2])
return scale * d**exponent + nugget
def gaussian_variogram_model(m, d):
"""Gaussian model, m is [psill, range, nugget]"""
psill = float(m[0])
range_ = float(m[1])
nugget = float(m[2])
return psill * (1.0 - np.exp(-(d**2.0) / (range_ * 4.0 / 7.0) ** 2.0)) + nugget
def exponential_variogram_model(m, d):
"""Exponential model, m is [psill, range, nugget]"""
psill = float(m[0])
range_ = float(m[1])
nugget = float(m[2])
return psill * (1.0 - np.exp(-d / (range_ / 3.0))) + nugget
def spherical_variogram_model(m, d):
"""Spherical model, m is [psill, range, nugget]"""
psill = float(m[0])
range_ = float(m[1])
nugget = float(m[2])
return np.piecewise(
d,
[d <= range_, d > range_],
[
lambda x: psill
* ((3.0 * x) / (2.0 * range_) - (x**3.0) / (2.0 * range_**3.0))
+ nugget,
psill + nugget,
],
)
def hole_effect_variogram_model(m, d):
"""Hole Effect model, m is [psill, range, nugget]"""
psill = float(m[0])
range_ = float(m[1])
nugget = float(m[2])
return (
psill * (1.0 - (1.0 - d / (range_ / 3.0)) * np.exp(-d / (range_ / 3.0)))
+ nugget
)
| 2,092 | 24.52439 | 83 | py |
PyKrige | PyKrige-main/src/pykrige/ck.py | # coding: utf-8
"""Classification Kriging."""
import numpy as np
from pykrige.compat import Krige, check_sklearn_model, validate_sklearn
validate_sklearn()
from scipy.linalg import helmert
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.svm import SVC
class ClassificationKriging:
"""
An implementation of Simplicial Indicator Kriging applied to classification ilr transformed residuals.
Parameters
----------
classification_model: machine learning model instance from sklearn
method: str, optional
type of kriging to be performed
variogram_model: str, optional
variogram model to be used during Kriging
n_closest_points: int
number of closest points to be used during Ordinary Kriging
nlags: int
see OK/UK class description
weight: bool
see OK/UK class description
verbose: bool
see OK/UK class description
exact_values : bool
see OK/UK class description
variogram_parameters : list or dict
see OK/UK class description
variogram_function : callable
see OK/UK class description
anisotropy_scaling : tuple
single value for 2D (UK/OK) and two values in 3D (UK3D/OK3D)
anisotropy_angle : tuple
single value for 2D (UK/OK) and three values in 3D (UK3D/OK3D)
enable_statistics : bool
see OK class description
coordinates_type : str
see OK/UK class description
drift_terms : list of strings
see UK/UK3D class description
point_drift : array_like
see UK class description
ext_drift_grid : tuple
Holding the three values external_drift, external_drift_x and
external_drift_z for the UK class
functional_drift : list of callable
see UK/UK3D class description
"""
def __init__(
self,
classification_model=SVC(),
method="ordinary",
variogram_model="linear",
n_closest_points=10,
nlags=6,
weight=False,
verbose=False,
exact_values=True,
pseudo_inv=False,
pseudo_inv_type="pinv",
variogram_parameters=None,
variogram_function=None,
anisotropy_scaling=(1.0, 1.0),
anisotropy_angle=(0.0, 0.0, 0.0),
enable_statistics=False,
coordinates_type="euclidean",
drift_terms=None,
point_drift=None,
ext_drift_grid=(None, None, None),
functional_drift=None,
):
check_sklearn_model(classification_model, task="classification")
self.classification_model = classification_model
self.n_closest_points = n_closest_points
self._kriging_kwargs = dict(
method=method,
variogram_model=variogram_model,
nlags=nlags,
weight=weight,
n_closest_points=n_closest_points,
verbose=verbose,
exact_values=exact_values,
pseudo_inv=pseudo_inv,
pseudo_inv_type=pseudo_inv_type,
variogram_parameters=variogram_parameters,
variogram_function=variogram_function,
anisotropy_scaling=anisotropy_scaling,
anisotropy_angle=anisotropy_angle,
enable_statistics=enable_statistics,
coordinates_type=coordinates_type,
drift_terms=drift_terms,
point_drift=point_drift,
ext_drift_grid=ext_drift_grid,
functional_drift=functional_drift,
)
def fit(self, p, x, y):
"""
Fit the classification method and also krige the residual.
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for classification
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example 2d classification kriging.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
y: ndarray
array of targets (Ns, )
"""
self.classification_model.fit(p, y.ravel())
print("Finished learning classification model")
self.classes_ = self.classification_model.classes_
self.krige = []
for i in range(len(self.classes_) - 1):
self.krige.append(Krige(**self._kriging_kwargs))
ml_pred = self.classification_model.predict_proba(p)
ml_pred_ilr = ilr_transformation(ml_pred)
self.onehotencode = OneHotEncoder(categories=[self.classes_])
y_ohe = np.array(self.onehotencode.fit_transform(y).todense())
y_ohe_ilr = ilr_transformation(y_ohe)
for i in range(len(self.classes_) - 1):
self.krige[i].fit(x=x, y=y_ohe_ilr[:, i] - ml_pred_ilr[:, i])
print("Finished kriging residuals")
def predict(self, p, x, **kwargs):
"""
Predict.
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for classification
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
Returns
-------
pred: ndarray
The expected value of ys for the query inputs, of shape (Ns,).
"""
ml_pred = self.classification_model.predict_proba(p)
ml_pred_ilr = ilr_transformation(ml_pred)
pred_proba_ilr = self.krige_residual(x, **kwargs) + ml_pred_ilr
pred_proba = inverse_ilr_transformation(pred_proba_ilr)
return np.argmax(pred_proba, axis=1)
def krige_residual(self, x, **kwargs):
"""
Calculate the residuals.
Parameters
----------
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example.
Returns
-------
residual: ndarray
kriged residual values
"""
krig_pred = [
self.krige[i].predict(x=x, **kwargs) for i in range(len(self.classes_) - 1)
]
return np.vstack(krig_pred).T
def score(self, p, x, y, sample_weight=None, **kwargs):
"""
Overloading default classification score method.
Parameters
----------
p: ndarray
(Ns, d) array of predictor variables (Ns samples, d dimensions)
for classification
x: ndarray
ndarray of (x, y) points. Needs to be a (Ns, 2) array
corresponding to the lon/lat, for example.
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
y: ndarray
array of targets (Ns, )
"""
return accuracy_score(
y_pred=self.predict(p, x, **kwargs), y_true=y, sample_weight=sample_weight
)
def closure(data, k=1.0):
"""Apply closure to data, sample-wise.
Adapted from https://github.com/ofgulban/compoda.
Parameters
----------
data : 2d numpy array, shape [n_samples, n_measurements]
Data to be closed to a certain constant. Do not forget to deal with
zeros in the data before this operation.
k : float, positive
Sum of the measurements will be equal to this number.
Returns
-------
data : 2d numpy array, shape [n_samples, n_measurements]
Closed data.
Reference
---------
[1] Pawlowsky-Glahn, V., Egozcue, J. J., & Tolosana-Delgado, R.
(2015). Modelling and Analysis of Compositional Data, pg. 9.
Chichester, UK: John Wiley & Sons, Ltd.
DOI: 10.1002/9781119003144
"""
return k * data / np.sum(data, axis=1)[:, np.newaxis]
def ilr_transformation(data):
"""Isometric logratio transformation (not vectorized).
Adapted from https://github.com/ofgulban/compoda.
Parameters
----------
data : 2d numpy array, shape [n_samples, n_coordinates]
Barycentric coordinates (closed) in simplex space.
Returns
-------
out : 2d numpy array, shape [n_samples, n_coordinates-1]
Coordinates in real space.
Reference
---------
[1] Pawlowsky-Glahn, V., Egozcue, J. J., & Tolosana-Delgado, R.
(2015). Modelling and Analysis of Compositional Data, pg. 37.
Chichester, UK: John Wiley & Sons, Ltd.
DOI: 10.1002/9781119003144
"""
data = np.maximum(data, np.finfo(float).eps)
return np.einsum("ij,jk->ik", np.log(data), -helmert(data.shape[1]).T)
def inverse_ilr_transformation(data):
"""Inverse isometric logratio transformation (not vectorized).
Adapted from https://github.com/ofgulban/compoda.
Parameters
----------
data : 2d numpy array, shape [n_samples, n_coordinates]
Isometric log-ratio transformed coordinates in real space.
Returns
-------
out : 2d numpy array, shape [n_samples, n_coordinates+1]
Barycentric coordinates (closed) in simplex space.
Reference
---------
[1] Pawlowsky-Glahn, V., Egozcue, J. J., & Tolosana-Delgado, R.
(2015). Modelling and Analysis of Compositional Data, pg. 37.
Chichester, UK: John Wiley & Sons, Ltd.
DOI: 10.1002/9781119003144
"""
return closure(np.exp(np.einsum("ij,jk->ik", data, -helmert(data.shape[1] + 1))))
| 9,458 | 31.393836 | 106 | py |
PyKrige | PyKrige-main/src/pykrige/__init__.py | """
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
[email protected]
Summary
-------
Kriging toolkit for Python.
ok: Contains class OrdinaryKriging, which is a convenience class for easy
access to 2D ordinary kriging.
uk: Contains class UniversalKriging, which provides more control over
2D kriging by utilizing drift terms. Supported drift terms currently
include point-logarithmic, regional linear, and external z-scalar.
Generic functions of the spatial coordinates may also be supplied to
provide drift terms, or the point-by-point values of a drift term
may be supplied.
ok3d: Contains class OrdinaryKriging3D, which provides support for
3D ordinary kriging.
uk3d: Contains class UniversalKriging3D, which provide support for
3D universal kriging. A regional linear drift is the only drift term
currently supported, but generic drift functions or point-by-point
values of a drift term may also be supplied.
kriging_tools: Contains a set of functions to work with *.asc files.
variogram_models: Contains the definitions for the implemented variogram
models. Note that the utilized formulas are as presented in Kitanidis,
so the exact definition of the range (specifically, the associated
scaling of that value) may differ slightly from other sources.
core: Contains the backbone functions of the package that are called by both
the various kriging classes. The functions were consolidated here
in order to reduce redundancy in the code.
test: Contains the test script.
References
----------
.. [1] P.K. Kitanidis, Introduction to Geostatistics: Applications in
Hydrogeology, (Cambridge University Press, 1997) 272 p.
Copyright (c) 2015-2020, PyKrige Developers
"""
from . import kriging_tools as kt # noqa
from .ok import OrdinaryKriging # noqa
from .ok3d import OrdinaryKriging3D # noqa
from .uk import UniversalKriging # noqa
from .uk3d import UniversalKriging3D # noqa
try:
from pykrige._version import __version__
except ImportError: # pragma: nocover
# package is not installed
__version__ = "0.0.0.dev0"
__author__ = "Benjamin S. Murphy"
__all__ = ["__version__"]
__all__ += ["kt", "ok", "uk", "ok3d", "uk3d", "kriging_tools"]
__all__ += ["OrdinaryKriging"]
__all__ += ["UniversalKriging"]
__all__ += ["OrdinaryKriging3D"]
__all__ += ["UniversalKriging3D"]
| 2,389 | 35.212121 | 76 | py |
PyKrige | PyKrige-main/src/pykrige/compat.py | # coding: utf-8
# pylint: disable= invalid-name, unused-import
"""For compatibility."""
from pykrige.ok import OrdinaryKriging
from pykrige.ok3d import OrdinaryKriging3D
from pykrige.uk import UniversalKriging
from pykrige.uk3d import UniversalKriging3D
# sklearn
try:
# keep train_test_split here for backward compatibility
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.model_selection import train_test_split
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
train_test_split = None
class RegressorMixin:
"""Mock RegressorMixin."""
class ClassifierMixin:
"""Mock ClassifierMixin."""
class BaseEstimator:
"""Mock BaseEstimator."""
krige_methods = {
"ordinary": OrdinaryKriging,
"universal": UniversalKriging,
"ordinary3d": OrdinaryKriging3D,
"universal3d": UniversalKriging3D,
}
threed_krige = ("ordinary3d", "universal3d")
krige_methods_kws = {
"ordinary": [
"anisotropy_scaling",
"anisotropy_angle",
"enable_statistics",
"coordinates_type",
],
"universal": [
"anisotropy_scaling",
"anisotropy_angle",
"drift_terms",
"point_drift",
"external_drift",
"external_drift_x",
"external_drift_y",
"functional_drift",
],
"ordinary3d": [
"anisotropy_scaling_y",
"anisotropy_scaling_z",
"anisotropy_angle_x",
"anisotropy_angle_y",
"anisotropy_angle_z",
],
"universal3d": [
"anisotropy_scaling_y",
"anisotropy_scaling_z",
"anisotropy_angle_x",
"anisotropy_angle_y",
"anisotropy_angle_z",
"drift_terms",
"functional_drift",
],
}
class SklearnException(Exception):
"""Exception for missing scikit-learn."""
def validate_method(method):
"""Validate the kriging method in use."""
if method not in krige_methods.keys():
raise ValueError(
"Kriging method must be one of {}".format(krige_methods.keys())
)
def validate_sklearn():
"""Validate presence of scikit-learn."""
if not SKLEARN_INSTALLED:
raise SklearnException(
"sklearn needs to be installed in order to use this module"
)
class Krige(RegressorMixin, BaseEstimator):
"""
A scikit-learn wrapper class for Ordinary and Universal Kriging.
This works with both Grid/RandomSearchCv for finding the best
Krige parameters combination for a problem.
Parameters
----------
method: str, optional
type of kriging to be performed
variogram_model: str, optional
variogram model to be used during Kriging
nlags: int
see OK/UK class description
weight: bool
see OK/UK class description
n_closest_points: int
number of closest points to be used during Ordinary Kriging
verbose: bool
see OK/UK class description
exact_values : bool
see OK/UK class description
variogram_parameters : list or dict
see OK/UK class description
variogram_function : callable
see OK/UK class description
anisotropy_scaling : tuple
single value for 2D (UK/OK) and two values in 3D (UK3D/OK3D)
anisotropy_angle : tuple
single value for 2D (UK/OK) and three values in 3D (UK3D/OK3D)
enable_statistics : bool
see OK class description
coordinates_type : str
see OK/UK class description
drift_terms : list of strings
see UK/UK3D class description
point_drift : array_like
see UK class description
ext_drift_grid : tuple
Holding the three values external_drift, external_drift_x and
external_drift_z for the UK class
functional_drift : list of callable
see UK/UK3D class description
"""
def __init__(
self,
method="ordinary",
variogram_model="linear",
nlags=6,
weight=False,
n_closest_points=10,
verbose=False,
exact_values=True,
pseudo_inv=False,
pseudo_inv_type="pinv",
variogram_parameters=None,
variogram_function=None,
anisotropy_scaling=(1.0, 1.0),
anisotropy_angle=(0.0, 0.0, 0.0),
enable_statistics=False,
coordinates_type="euclidean",
drift_terms=None,
point_drift=None,
ext_drift_grid=(None, None, None),
functional_drift=None,
):
validate_method(method)
self.variogram_model = variogram_model
self.variogram_parameters = variogram_parameters
self.variogram_function = variogram_function
self.nlags = nlags
self.weight = weight
self.verbose = verbose
self.exact_values = exact_values
self.pseudo_inv = pseudo_inv
self.pseudo_inv_type = pseudo_inv_type
self.anisotropy_scaling = anisotropy_scaling
self.anisotropy_angle = anisotropy_angle
self.enable_statistics = enable_statistics
self.coordinates_type = coordinates_type
self.drift_terms = drift_terms
self.point_drift = point_drift
self.ext_drift_grid = ext_drift_grid
self.functional_drift = functional_drift
self.model = None # not trained
self.n_closest_points = n_closest_points
self.method = method
def fit(self, x, y, *args, **kwargs):
"""
Fit the current model.
Parameters
----------
x: ndarray
array of Points, (x, y) pairs of shape (N, 2) for 2d kriging
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
y: ndarray
array of targets (N, )
"""
val_kw = "val" if self.method in threed_krige else "z"
setup = dict(
variogram_model=self.variogram_model,
variogram_parameters=self.variogram_parameters,
variogram_function=self.variogram_function,
nlags=self.nlags,
weight=self.weight,
verbose=self.verbose,
exact_values=self.exact_values,
pseudo_inv=self.pseudo_inv,
pseudo_inv_type=self.pseudo_inv_type,
)
add_setup = dict(
anisotropy_scaling=self.anisotropy_scaling[0],
anisotropy_angle=self.anisotropy_angle[0],
enable_statistics=self.enable_statistics,
coordinates_type=self.coordinates_type,
anisotropy_scaling_y=self.anisotropy_scaling[0],
anisotropy_scaling_z=self.anisotropy_scaling[1],
anisotropy_angle_x=self.anisotropy_angle[0],
anisotropy_angle_y=self.anisotropy_angle[1],
anisotropy_angle_z=self.anisotropy_angle[2],
drift_terms=self.drift_terms,
point_drift=self.point_drift,
external_drift=self.ext_drift_grid[0],
external_drift_x=self.ext_drift_grid[1],
external_drift_y=self.ext_drift_grid[2],
functional_drift=self.functional_drift,
)
for kw in krige_methods_kws[self.method]:
setup[kw] = add_setup[kw]
input_kw = self._dimensionality_check(x)
input_kw.update(setup)
input_kw[val_kw] = y
self.model = krige_methods[self.method](**input_kw)
def _dimensionality_check(self, x, ext=""):
if self.method in ("ordinary", "universal"):
if x.shape[1] != 2:
raise ValueError("2d krige can use only 2d points")
else:
return {"x" + ext: x[:, 0], "y" + ext: x[:, 1]}
if self.method in ("ordinary3d", "universal3d"):
if x.shape[1] != 3:
raise ValueError("3d krige can use only 3d points")
else:
return {
"x" + ext: x[:, 0],
"y" + ext: x[:, 1],
"z" + ext: x[:, 2],
}
def predict(self, x, *args, **kwargs):
"""
Predict.
Parameters
----------
x: ndarray
array of Points, (x, y) pairs of shape (N, 2) for 2d kriging
array of Points, (x, y, z) pairs of shape (N, 3) for 3d kriging
Returns
-------
Prediction array
"""
if not self.model:
raise Exception("Not trained. Train first")
points = self._dimensionality_check(x, ext="points")
return self.execute(points, *args, **kwargs)[0]
def execute(self, points, *args, **kwargs):
# TODO array of Points, (x, y) pairs of shape (N, 2)
"""
Execute.
Parameters
----------
points: dict
Returns
-------
Prediction array
Variance array
"""
default_kw = dict(style="points", backend="loop")
default_kw.update(kwargs)
points.update(default_kw)
if isinstance(self.model, (OrdinaryKriging, OrdinaryKriging3D)):
points.update(dict(n_closest_points=self.n_closest_points))
else:
print("n_closest_points will be ignored for UniversalKriging")
prediction, variance = self.model.execute(**points)
return prediction, variance
def check_sklearn_model(model, task="regression"):
"""Check the sklearn method in use."""
if task == "regression":
if not (isinstance(model, BaseEstimator) and isinstance(model, RegressorMixin)):
raise RuntimeError(
"Needs to supply an instance of a scikit-learn regression class."
)
elif task == "classification":
if not (
isinstance(model, BaseEstimator) and isinstance(model, ClassifierMixin)
):
raise RuntimeError(
"Needs to supply an instance of a scikit-learn classification class."
)
| 9,889 | 31.11039 | 88 | py |
PyKrige | PyKrige-main/src/pykrige/kriging_tools.py | # coding: utf-8
"""
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
[email protected]
Summary
-------
Methods for reading/writing ASCII grid files.
Copyright (c) 2015-2020, PyKrige Developers
"""
import datetime
import io
import os
import warnings
import numpy as np
def write_asc_grid(x, y, z, filename="output.asc", no_data=-999.0, style=1):
r"""Writes gridded data to ASCII grid file (\*.asc).
This is useful for exporting data to a GIS program.
Parameters
----------
x : array_like, shape (N,) or (N, 1)
X-coordinates of grid points at center of cells.
y : array_like, shape (M,) or (M, 1)
Y-coordinates of grid points at center of cells.
z : array_like, shape (M, N)
Gridded data values. May be a masked array.
filename : string, optional
Name of output \*.asc file. Default name is 'output.asc'.
no_data : float, optional
no data value to be used
style : int, optional
Determines how to write the \*.asc file header.
Specifying 1 writes out DX, DY, XLLCENTER, YLLCENTER.
Specifying 2 writes out CELLSIZE (note DX must be the same as DY),
XLLCORNER, YLLCORNER. Default is 1.
"""
if np.ma.is_masked(z):
z = np.array(z.tolist(no_data))
x = np.squeeze(np.array(x))
y = np.squeeze(np.array(y))
z = np.squeeze(np.array(z))
nrows = z.shape[0]
ncols = z.shape[1]
if z.ndim != 2:
raise ValueError("Two-dimensional grid is required to write *.asc grid.")
if x.ndim > 1 or y.ndim > 1:
raise ValueError(
"Dimensions of X and/or Y coordinate arrays are not "
"as expected. Could not write *.asc grid."
)
if z.shape != (y.size, x.size):
warnings.warn(
"Grid dimensions are not as expected. "
"Incorrect *.asc file generation may result.",
RuntimeWarning,
)
if np.amin(x) != x[0] or np.amin(y) != y[0]:
warnings.warn(
"Order of X or Y coordinates is not as expected. "
"Incorrect *.asc file generation may result.",
RuntimeWarning,
)
dx = abs(x[1] - x[0])
dy = abs(y[1] - y[0])
if not np.isclose(abs((x[-1] - x[0]) / (x.shape[0] - 1)), dx) or not np.isclose(
abs((y[-1] - y[0]) / (y.shape[0] - 1)), dy
):
raise ValueError(
"X or Y spacing is not constant; *.asc grid cannot be written."
)
cellsize = -1
if style == 2:
if dx != dy:
raise ValueError(
"X and Y spacing is not the same. "
"Cannot write *.asc file in the specified format."
)
cellsize = dx
xllcenter = x[0]
yllcenter = y[0]
# Note that these values are flagged as -1. If there is a problem in trying
# to write out style 2, the -1 value will appear in the output file.
xllcorner = -1
yllcorner = -1
if style == 2:
xllcorner = xllcenter - dx / 2.0
yllcorner = yllcenter - dy / 2.0
with io.open(filename, "w") as f:
if style == 1:
f.write("NCOLS " + "{:<10n}".format(ncols) + "\n")
f.write("NROWS " + "{:<10n}".format(nrows) + "\n")
f.write("XLLCENTER " + "{:<10.2f}".format(xllcenter) + "\n")
f.write("YLLCENTER " + "{:<10.2f}".format(yllcenter) + "\n")
f.write("DX " + "{:<10.2f}".format(dx) + "\n")
f.write("DY " + "{:<10.2f}".format(dy) + "\n")
f.write("NODATA_VALUE " + "{:<10.2f}".format(no_data) + "\n")
elif style == 2:
f.write("NCOLS " + "{:<10n}".format(ncols) + "\n")
f.write("NROWS " + "{:<10n}".format(nrows) + "\n")
f.write("XLLCORNER " + "{:<10.2f}".format(xllcorner) + "\n")
f.write("YLLCORNER " + "{:<10.2f}".format(yllcorner) + "\n")
f.write("CELLSIZE " + "{:<10.2f}".format(cellsize) + "\n")
f.write("NODATA_VALUE " + "{:<10.2f}".format(no_data) + "\n")
else:
raise ValueError("style kwarg must be either 1 or 2.")
for m in range(z.shape[0] - 1, -1, -1):
for n in range(z.shape[1]):
f.write("{:<16.2f}".format(z[m, n]))
if m != 0:
f.write("\n")
def read_asc_grid(filename, footer=0):
r"""Reads ASCII grid file (\*.asc).
Parameters
----------
filename : str
Name of \*.asc file.
footer : int, optional
Number of lines at bottom of \*.asc file to skip.
Returns
-------
grid_array : numpy array, shape (M, N)
(M, N) array of grid values, where M is number of Y-coordinates and
N is number of X-coordinates. The array entry corresponding to
the lower-left coordinates is at index [M, 0], so that
the array is oriented as it would be in X-Y space.
x : numpy array, shape (N,)
1D array of N X-coordinates.
y : numpy array, shape (M,)
1D array of M Y-coordinates.
CELLSIZE : tuple or float
Either a two-tuple of (x-cell size, y-cell size),
or a float that specifies the uniform cell size.
NODATA : float
Value that specifies which entries are not actual data.
"""
ncols = None
nrows = None
xllcorner = None
xllcenter = None
yllcorner = None
yllcenter = None
cellsize = None
dx = None
dy = None
no_data = None
header_lines = 0
with io.open(filename, "r") as f:
while True:
string, value = f.readline().split()
header_lines += 1
if string.lower() == "ncols":
ncols = int(value)
elif string.lower() == "nrows":
nrows = int(value)
elif string.lower() == "xllcorner":
xllcorner = float(value)
elif string.lower() == "xllcenter":
xllcenter = float(value)
elif string.lower() == "yllcorner":
yllcorner = float(value)
elif string.lower() == "yllcenter":
yllcenter = float(value)
elif string.lower() == "cellsize":
cellsize = float(value)
elif string.lower() == "cell_size":
cellsize = float(value)
elif string.lower() == "dx":
dx = float(value)
elif string.lower() == "dy":
dy = float(value)
elif string.lower() == "nodata_value":
no_data = float(value)
elif string.lower() == "nodatavalue":
no_data = float(value)
else:
raise IOError("could not read *.asc file. Error in header.")
if (
(ncols is not None)
and (nrows is not None)
and (
((xllcorner is not None) and (yllcorner is not None))
or ((xllcenter is not None) and (yllcenter is not None))
)
and ((cellsize is not None) or ((dx is not None) and (dy is not None)))
and (no_data is not None)
):
break
raw_grid_array = np.genfromtxt(
filename, skip_header=header_lines, skip_footer=footer
)
grid_array = np.flipud(raw_grid_array)
if nrows != grid_array.shape[0] or ncols != grid_array.shape[1]:
raise IOError(
"Error reading *.asc file. Encountered problem "
"with header: NCOLS and/or NROWS does not match "
"number of columns/rows in data file body."
)
if xllcorner is not None and yllcorner is not None:
if dx is not None and dy is not None:
xllcenter = xllcorner + dx / 2.0
yllcenter = yllcorner + dy / 2.0
else:
xllcenter = xllcorner + cellsize / 2.0
yllcenter = yllcorner + cellsize / 2.0
if dx is not None and dy is not None:
x = np.arange(xllcenter, xllcenter + ncols * dx, dx)
y = np.arange(yllcenter, yllcenter + nrows * dy, dy)
else:
x = np.arange(xllcenter, xllcenter + ncols * cellsize, cellsize)
y = np.arange(yllcenter, yllcenter + nrows * cellsize, cellsize)
# Sometimes x and y and can be an entry too long due to imprecision
# in calculating the upper cutoff for np.arange(); this bit takes care of
# that potential problem.
if x.size == ncols + 1:
x = x[:-1]
if y.size == nrows + 1:
y = y[:-1]
if cellsize is None:
cellsize = (dx, dy)
return grid_array, x, y, cellsize, no_data
def write_zmap_grid(
x, y, z, filename="output.zmap", no_data=-999.0, coord_sys="<null>"
):
r"""Writes gridded data to ASCII grid file in zmap format (\*.zmap).
This is useful for exporting data to a GIS program, or Petrel
https://gdal.org/drivers/raster/zmap.html
Parameters
----------
x : array_like, shape (N,) or (N, 1)
X-coordinates of grid points at center of cells.
y : array_like, shape (M,) or (M, 1)
Y-coordinates of grid points at center of cells.
z : array_like, shape (M, N)
Gridded data values. May be a masked array.
filename : string, optional
Name of output \*.zmap file. Default name is 'output.zmap'.
no_data : float, optional
no data value to be used
coord_sys : String, optional
coordinate sytem description
"""
nodes_per_line = 5
field_width = 15
if np.ma.is_masked(z):
z = np.array(z.tolist(no_data))
x = np.squeeze(np.array(x))
y = np.squeeze(np.array(y))
z = np.squeeze(np.array(z))
nx = len(x)
ny = len(y)
dx = abs(x[1] - x[0])
dy = abs(y[1] - y[0])
if not np.isclose(abs((x[-1] - x[0]) / (x.shape[0] - 1)), dx) or not np.isclose(
abs((y[-1] - y[0]) / (y.shape[0] - 1)), dy
):
raise ValueError(
"X or Y spacing is not constant; *.asc grid cannot be written."
)
xllcenter = x[0]
yllcenter = y[0]
hix = xllcenter + (nx - 1) * dx
hiy = yllcenter + (ny - 1) * dy
now = datetime.datetime.now()
with io.open(filename, "w") as f:
f.write("!" + "\n")
f.write("! ZIMS FILE NAME : " + os.path.basename(filename) + "\n")
f.write(
"! FORMATTED FILE CREATION DATE: " + now.strftime("%d/%m/%Y") + "\n"
)
f.write(
"! FORMATTED FILE CREATION TIME: " + now.strftime("%H:%M:%S") + "\n"
)
f.write("! COORDINATE REFERENCE SYSTEM: " + coord_sys + "\n")
f.write("!" + "\n")
f.write("@Grid HEADER, GRID, " + str(nodes_per_line) + "\n")
f.write(" " + str(field_width) + ", " + str(no_data) + ", , 1 , 1" + "\n")
f.write(
" "
+ str(ny)
+ ", "
+ str(nx)
+ ", "
+ str(xllcenter)
+ ", "
+ str(hix)
+ ", "
+ str(yllcenter)
+ ", "
+ str(hiy)
+ "\n"
)
f.write(" " + str(dx) + ", 0.0, 0.0 " + "\n")
f.write("@" + "\n")
for n in range(z.shape[1]):
count = 0
for m in range(z.shape[0] - 1, -1, -1):
count += 1
if np.isnan(z[m, n]):
f.write(space_back_to_front(format(no_data, "13.7E") + " "))
else:
if abs(z[m, n]) >= 1e100: # one tailing space less
f.write(space_back_to_front(format(z[m, n], "13.7E") + " "))
elif abs(z[m, n]) >= 1e6:
f.write(space_back_to_front(format(z[m, n], "13.7E") + " "))
else:
f.write(space_back_to_front("{:<13.4f}".format(z[m, n]) + " "))
if count % nodes_per_line == 0 or m == 0:
f.write("\n")
def read_zmap_grid(filename):
r"""Reads ASCII grid file in zmap format (\*.zmap).
https://gdal.org/drivers/raster/zmap.html
Parameters
----------
filename : str
Name of \*.zmap file.
Returns
-------
grid_array : numpy array, shape (M, N)
(M, N) array of grid values, where M is number of Y-coordinates and
N is number of X-coordinates. The array entry corresponding to
the lower-left coordinates is at index [M, 0], so that
the array is oriented as it would be in X-Y space.
x : numpy array, shape (N,)
1D array of N X-coordinates.
y : numpy array, shape (M,)
1D array of M Y-coordinates.
cellsize : tuple or float
Either a two-tuple of (x-cell size, y-cell size),
or a float that specifies the uniform cell size.
no_data_value : float
Value that specifies which entries are not actual data.
coord_sys : String
Coordinate system name
"""
no_data_value, nx, ny, originx, originy, maxx, maxy, dx, dy = (
0,
0,
0,
0,
0,
0,
0,
0,
0,
)
data_values = np.empty(1)
coord_sys = "<null>"
i_header_line, i_value = 0, 0
with io.open(filename, "r") as f:
while True:
line = f.readline()
if line.startswith("!"):
line_strings = line.split(":")
if line_strings[0].__contains__("COORDINATE REFERENCE SYSTEM"):
coord_sys = line_strings[1].replace("\n", "")
else:
line_strings = line.split()
line_strings = [string.replace(",", "") for string in line_strings]
if len(line_strings) == 0:
break
if i_header_line == -1 and not line_strings[0].startswith("!"):
for i_string in range(len(line_strings)):
data_values[i_value] = float(line_strings[i_string])
i_value += 1
if line_strings[0].startswith("@"):
if i_header_line == 0:
i_header_line += 1
else:
i_header_line = -1
if i_header_line > 0:
if i_header_line == 2:
no_data_value = float(line_strings[1])
elif i_header_line == 3:
ny = int(line_strings[0])
nx = int(line_strings[1])
originx = float(line_strings[2])
maxx = float(line_strings[3])
originy = float(line_strings[4])
maxy = float(line_strings[5])
data_values = np.empty(ny * nx)
i_header_line += 1
if nx * ny != len(data_values):
raise IOError(
"Error reading *.zmap file. Encountered problem "
"with header: (nx * ny) does not match with the "
"number items in data file body."
)
z = np.empty([ny, nx])
i_value = 0
for n in range(z.shape[1]):
for m in range(z.shape[0] - 1, -1, -1):
z[m, n] = data_values[i_value]
i_value += 1
dx = (maxx - originx) / (nx - 1)
dy = (maxy - originy) / (ny - 1)
gridx = np.arange(originx, originx + nx * dx, dx)
gridy = np.arange(originy, originy + ny * dy, dy)
cellsize = (dx, dy)
return z, gridx, gridy, cellsize, no_data_value, coord_sys
def space_back_to_front(string):
net = string.replace(" ", "")
return "".join(string.rsplit(net)) + net
| 15,629 | 32.612903 | 88 | py |
PyKrige | PyKrige-main/src/pykrige/lib/__init__.py | __all__ = ["cok", "lapack", "variogram_models"]
| 48 | 23.5 | 47 | py |
PyKrige | PyKrige-main/tests/test_core.py | """
Testing code.
Updated BSM February 2017
"""
import os
import sys
import numpy as np
import pytest
from numpy.testing import assert_allclose
from pytest import approx
from scipy.spatial.distance import cdist
from pykrige import core
from pykrige import kriging_tools as kt
from pykrige import variogram_models
from pykrige.ok import OrdinaryKriging
from pykrige.ok3d import OrdinaryKriging3D
from pykrige.uk import UniversalKriging
from pykrige.uk3d import UniversalKriging3D
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
allclose_pars = {"rtol": 1e-05, "atol": 1e-08}
@pytest.fixture
def validation_ref():
data = np.genfromtxt(os.path.join(BASE_DIR, "test_data/test_data.txt"))
ok_test_answer, ok_test_gridx, ok_test_gridy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test1_answer.asc"), footer=2
)
uk_test_answer, uk_test_gridx, uk_test_gridy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test2_answer.asc"), footer=2
)
return (
data,
(ok_test_answer, ok_test_gridx, ok_test_gridy),
(uk_test_answer, uk_test_gridx, uk_test_gridy),
)
@pytest.fixture
def sample_data_2d():
data = np.array(
[
[0.3, 1.2, 0.47],
[1.9, 0.6, 0.56],
[1.1, 3.2, 0.74],
[3.3, 4.4, 1.47],
[4.7, 3.8, 1.74],
]
)
gridx = np.arange(0.0, 6.0, 1.0)
gridx_2 = np.arange(0.0, 5.5, 0.5)
gridy = np.arange(0.0, 5.5, 0.5)
xi, yi = np.meshgrid(gridx, gridy)
mask = np.array(xi == yi)
return data, (gridx, gridy, gridx_2), mask
@pytest.fixture
def sample_data_3d():
data = np.array(
[
[0.1, 0.1, 0.3, 0.9],
[0.2, 0.1, 0.4, 0.8],
[0.1, 0.3, 0.1, 0.9],
[0.5, 0.4, 0.4, 0.5],
[0.3, 0.3, 0.2, 0.7],
]
)
gridx = np.arange(0.0, 0.6, 0.05)
gridy = np.arange(0.0, 0.6, 0.01)
gridz = np.arange(0.0, 0.6, 0.1)
zi, yi, xi = np.meshgrid(gridz, gridy, gridx, indexing="ij")
mask = np.array((xi == yi) & (yi == zi))
return data, (gridx, gridy, gridz), mask
def test_core_adjust_for_anisotropy():
X = np.array([[1.0, 0.0, -1.0, 0.0], [0.0, 1.0, 0.0, -1.0]]).T
X_adj = core._adjust_for_anisotropy(X, [0.0, 0.0], [2.0], [90.0])
assert_allclose(X_adj[:, 0], np.array([0.0, 1.0, 0.0, -1.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([-2.0, 0.0, 2.0, 0.0]), **allclose_pars)
def test_core_adjust_for_anisotropy_3d():
# this is a bad examples, as the X matrix is symmetric
# and insensitive to transpositions
X = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]).T
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [90.0, 0.0, 0.0]
)
assert_allclose(X_adj[:, 0], np.array([1.0, 0.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([0.0, 0.0, 2.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([0.0, -2.0, 0.0]), **allclose_pars)
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [0.0, 90.0, 0.0]
)
assert_allclose(X_adj[:, 0], np.array([0.0, 0.0, -1.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([0.0, 2.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([2.0, 0.0, 0.0]), **allclose_pars)
X_adj = core._adjust_for_anisotropy(
X, [0.0, 0.0, 0.0], [2.0, 2.0], [0.0, 0.0, 90.0]
)
assert_allclose(X_adj[:, 0], np.array([0.0, 1.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 1], np.array([-2.0, 0.0, 0.0]), **allclose_pars)
assert_allclose(X_adj[:, 2], np.array([0.0, 0.0, 2.0]), **allclose_pars)
def test_core_make_variogram_parameter_list():
# test of first case - variogram_model_parameters is None
# function should return None unaffected
result = core._make_variogram_parameter_list("linear", None)
assert result is None
# tests for second case - variogram_model_parameters is dict
with pytest.raises(KeyError):
core._make_variogram_parameter_list("linear", {"tacos": 1.0, "burritos": 2.0})
result = core._make_variogram_parameter_list(
"linear", {"slope": 1.0, "nugget": 0.0}
)
assert result == [1.0, 0.0]
with pytest.raises(KeyError):
core._make_variogram_parameter_list("power", {"frijoles": 1.0})
result = core._make_variogram_parameter_list(
"power", {"scale": 2.0, "exponent": 1.0, "nugget": 0.0}
)
assert result == [2.0, 1.0, 0.0]
with pytest.raises(KeyError):
core._make_variogram_parameter_list("exponential", {"tacos": 1.0})
with pytest.raises(KeyError):
core._make_variogram_parameter_list(
"exponential", {"range": 1.0, "nugget": 1.0}
)
result = core._make_variogram_parameter_list(
"exponential", {"sill": 5.0, "range": 2.0, "nugget": 1.0}
)
assert result == [4.0, 2.0, 1.0]
result = core._make_variogram_parameter_list(
"exponential", {"psill": 4.0, "range": 2.0, "nugget": 1.0}
)
assert result == [4.0, 2.0, 1.0]
with pytest.raises(TypeError):
core._make_variogram_parameter_list("custom", {"junk": 1.0})
with pytest.raises(ValueError):
core._make_variogram_parameter_list("blarg", {"junk": 1.0})
# tests for third case - variogram_model_parameters is list
with pytest.raises(ValueError):
core._make_variogram_parameter_list("linear", [1.0, 2.0, 3.0])
result = core._make_variogram_parameter_list("linear", [1.0, 2.0])
assert result == [1.0, 2.0]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("power", [1.0, 2.0])
result = core._make_variogram_parameter_list("power", [1.0, 2.0, 3.0])
assert result == [1.0, 2.0, 3.0]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("exponential", [1.0, 2.0, 3.0, 4.0])
result = core._make_variogram_parameter_list("exponential", [5.0, 2.0, 1.0])
assert result == [4.0, 2.0, 1.0]
result = core._make_variogram_parameter_list("custom", [1.0, 2.0, 3.0])
assert result == [1.0, 2.0, 3]
with pytest.raises(ValueError):
core._make_variogram_parameter_list("junk", [1.0, 1.0, 1.0])
# test for last case - make sure function handles incorrect
# variogram_model_parameters type appropriately
with pytest.raises(TypeError):
core._make_variogram_parameter_list("linear", "tacos")
def test_core_initialize_variogram_model(validation_ref):
data, _, _ = validation_ref
# Note the variogram_function argument is not a string in real life...
# core._initialize_variogram_model also checks the length of input
# lists, which is redundant now because the same tests are done in
# core._make_variogram_parameter_list
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"linear",
[0.0],
"linear",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"spherical",
[0.0],
"spherical",
6,
False,
"euclidean",
)
# core._initialize_variogram_model does also check coordinate type,
# this is NOT redundant
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
"spherical",
[0.0, 0.0, 0.0],
"spherical",
6,
False,
"tacos",
)
x = np.array([1.0 + n / np.sqrt(2) for n in range(4)])
y = np.array([1.0 + n / np.sqrt(2) for n in range(4)])
z = np.arange(1.0, 5.0, 1.0)
lags, semivariance, variogram_model_parameters = core._initialize_variogram_model(
np.vstack((x, y)).T, z, "linear", [0.0, 0.0], "linear", 6, False, "euclidean"
)
assert_allclose(lags, np.array([1.0, 2.0, 3.0]))
assert_allclose(semivariance, np.array([0.5, 2.0, 4.5]))
def test_core_initialize_variogram_model_3d(sample_data_3d):
data, _, _ = sample_data_3d
# Note the variogram_function argument is not a string in real life...
# again, these checks in core._initialize_variogram_model are redundant
# now because the same tests are done in
# core._make_variogram_parameter_list
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"linear",
[0.0],
"linear",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"spherical",
[0.0],
"spherical",
6,
False,
"euclidean",
)
with pytest.raises(ValueError):
core._initialize_variogram_model(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
"linear",
[0.0, 0.0],
"linear",
6,
False,
"geographic",
)
lags, semivariance, variogram_model_parameters = core._initialize_variogram_model(
np.vstack(
(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.0, 3.0, 4.0]),
)
).T,
np.array([1.0, 2.0, 3.0, 4.0]),
"linear",
[0.0, 0.0],
"linear",
3,
False,
"euclidean",
)
assert_allclose(
lags, np.array([np.sqrt(3.0), 2.0 * np.sqrt(3.0), 3.0 * np.sqrt(3.0)])
)
assert_allclose(semivariance, np.array([0.5, 2.0, 4.5]))
def test_core_calculate_variogram_model():
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.05, 2.95, 4.05, 4.95]),
"linear",
variogram_models.linear_variogram_model,
False,
)
assert_allclose(res, np.array([0.98, 1.05]), 0.01, 0.01)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.05, 2.95, 4.05, 4.95]),
"linear",
variogram_models.linear_variogram_model,
True,
)
assert_allclose(res, np.array([0.98, 1.05]), 0.01, 0.01)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 2.8284271, 5.1961524, 8.0]),
"power",
variogram_models.power_variogram_model,
False,
)
assert_allclose(res, np.array([1.0, 1.5, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.0, 1.4142, 1.7321, 2.0]),
"power",
variogram_models.power_variogram_model,
False,
)
assert_allclose(res, np.array([1.0, 0.5, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([1.2642, 1.7293, 1.9004, 1.9634]),
"exponential",
variogram_models.exponential_variogram_model,
False,
)
assert_allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([0.5769, 1.4872, 1.9065, 1.9914]),
"gaussian",
variogram_models.gaussian_variogram_model,
False,
)
assert_allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([3.33060952, 3.85063879, 3.96667301, 3.99256374]),
"exponential",
variogram_models.exponential_variogram_model,
False,
)
assert_allclose(res, np.array([3.0, 2.0, 1.0]), 0.001, 0.001)
res = core._calculate_variogram_model(
np.array([1.0, 2.0, 3.0, 4.0]),
np.array([2.60487044, 3.85968813, 3.99694817, 3.99998564]),
"gaussian",
variogram_models.gaussian_variogram_model,
False,
)
assert_allclose(res, np.array([3.0, 2.0, 1.0]), 0.001, 0.001)
def test_core_krige():
# Example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.22], [43.8, 24.6, 2.822]])
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([18.8, 67.9]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(1.6364, rel=1e-4)
assert ss == approx(0.4201, rel=1e-4)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([43.8, 24.6]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(2.822, rel=1e-3)
assert ss == approx(0.0, rel=1e-3)
def test_core_krige_3d():
# Adapted from example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.0, 1.22], [43.8, 24.6, 1.0, 2.822]])
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
np.array([18.8, 67.9, 1.0]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(1.6364, rel=1e-4)
assert ss == approx(0.4201, rel=1e-4)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1], data[:, 2])).T,
data[:, 3],
np.array([43.8, 24.6, 1.0]),
variogram_models.linear_variogram_model,
[0.006, 0.1],
"euclidean",
)
assert z == approx(2.822, rel=1e-3)
assert ss == approx(0.0, rel=1e-3)
def test_non_exact():
# custom data for this test
data = np.array(
[
[0.0, 0.0, 0.47],
[1.5, 1.5, 0.56],
[3, 3, 0.74],
[4.5, 4.5, 1.47],
]
)
# construct grid points so diagonal
# is identical to input points
gridx = np.arange(0.0, 4.51, 1.5)
gridy = np.arange(0.0, 4.51, 1.5)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 5.0],
)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
ok_non_exact = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 5.0],
exact_values=False,
)
z_non_exact, ss_non_exact = ok_non_exact.execute(
"grid", gridx, gridy, backend="vectorized"
)
in_values = np.diag(z)
# test that krig field
# at input location are identical
# to the inputs themselves with
# exact_values == True
assert_allclose(in_values, data[:, 2])
# test that krig field
# at input location are different
# than the inputs themselves
# with exact_values == False
assert ~np.allclose(in_values, data[:, 2])
# test that off diagonal values are the same
# by filling with dummy value and comparing
# each entry in array
np.fill_diagonal(z, 0.0)
np.fill_diagonal(z_non_exact, 0.0)
assert_allclose(z, z_non_exact)
def test_ok(validation_ref):
# Test to compare OK results to those obtained using KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
data, (ok_test_answer, gridx, gridy), _ = validation_ref
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z, ok_test_answer)
z, ss = ok.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z, ok_test_answer)
def test_ok_update_variogram_model(validation_ref):
data, (ok_test_answer, gridx, gridy), _ = validation_ref
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="blurg")
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
variogram_model = ok.variogram_model
variogram_parameters = ok.variogram_model_parameters
anisotropy_scaling = ok.anisotropy_scaling
anisotropy_angle = ok.anisotropy_angle
with pytest.raises(ValueError):
ok.update_variogram_model("blurg")
ok.update_variogram_model("power", anisotropy_scaling=3.0, anisotropy_angle=45.0)
# TODO: check that new parameters equal to the set parameters
assert variogram_model != ok.variogram_model
assert not np.array_equal(variogram_parameters, ok.variogram_model_parameters)
assert anisotropy_scaling != ok.anisotropy_scaling
assert anisotropy_angle != ok.anisotropy_angle
def test_ok_get_variogram_points(validation_ref):
# Test to compare the variogram of OK results to those obtained using
# KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
# Variogram parameters
_variogram_parameters = [500.0, 3000.0, 0.0]
data, _, (ok_test_answer, gridx, gridy) = validation_ref
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=_variogram_parameters,
)
# Get the variogram points from the UniversalKriging instance
lags, calculated_variogram = ok.get_variogram_points()
# Generate the expected variogram points according to the
# exponential variogram model
expected_variogram = variogram_models.exponential_variogram_model(
_variogram_parameters, lags
)
assert_allclose(calculated_variogram, expected_variogram)
def test_ok_execute(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], exact_values="blurg")
ok_non_exact = OrdinaryKriging(
data[:, 0], data[:, 1], data[:, 2], exact_values=False
)
with pytest.raises(ValueError):
ok.execute("blurg", gridx, gridy)
z, ss = ok.execute("grid", gridx, gridy, backend="vectorized")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z, ss = ok.execute("grid", gridx, gridy, backend="loop")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z1, ss1 = ok_non_exact.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z1, z)
assert_allclose(ss1, ss)
z, ss = ok_non_exact.execute("grid", gridx, gridy, backend="loop")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
with pytest.raises(IOError):
ok.execute("masked", gridx, gridy, backend="vectorized")
mask = np.array([True, False])
with pytest.raises(ValueError):
ok.execute("masked", gridx, gridy, mask=mask, backend="vectorized")
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref.T, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(IOError):
ok.execute("masked", gridx, gridy, backend="loop")
mask = np.array([True, False])
with pytest.raises(ValueError):
ok.execute("masked", gridx, gridy, mask=mask, backend="loop")
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = ok.execute("masked", gridx, gridy, mask=mask_ref.T, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = ok_non_exact.execute(
"masked", gridx, gridy, mask=mask_ref.T, backend="loop"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(ValueError):
ok.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
backend="vectorized",
)
z, ss = ok.execute("points", gridx[0], gridy[0], backend="vectorized")
assert z.shape == (1,)
assert ss.shape == (1,)
with pytest.raises(ValueError):
ok.execute(
"points", np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]), backend="loop"
)
z, ss = ok.execute("points", gridx[0], gridy[0], backend="loop")
assert z.shape == (1,)
assert ss.shape == (1,)
def test_cython_ok(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
ok_non_exact = OrdinaryKriging(
data[:, 0], data[:, 1], data[:, 2], exact_values=False
)
z1, ss1 = ok.execute("grid", gridx, gridy, backend="loop")
z2, ss2 = ok.execute("grid", gridx, gridy, backend="C")
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
z1, ss1 = ok_non_exact.execute("grid", gridx, gridy, backend="loop")
z2, ss2 = ok_non_exact.execute("grid", gridx, gridy, backend="C")
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
closest_points = 4
z1, ss1 = ok.execute(
"grid", gridx, gridy, backend="loop", n_closest_points=closest_points
)
z2, ss2 = ok.execute(
"grid", gridx, gridy, backend="C", n_closest_points=closest_points
)
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
z1, ss1 = ok_non_exact.execute(
"grid", gridx, gridy, backend="loop", n_closest_points=closest_points
)
z2, ss2 = ok_non_exact.execute(
"grid", gridx, gridy, backend="C", n_closest_points=closest_points
)
assert_allclose(z1, z2)
assert_allclose(ss1, ss2)
def test_uk(validation_ref):
# Test to compare UK with linear drift to results from KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
data, _, (uk_test_answer, gridx, gridy) = validation_ref
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
drift_terms=["regional_linear"],
)
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z, uk_test_answer)
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z, uk_test_answer)
def test_uk_update_variogram_model(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="blurg")
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], drift_terms=["external_Z"])
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
drift_terms=["external_Z"],
external_drift=np.array([0]),
)
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], drift_terms=["point_log"])
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2])
variogram_model = uk.variogram_model
variogram_parameters = uk.variogram_model_parameters
anisotropy_scaling = uk.anisotropy_scaling
anisotropy_angle = uk.anisotropy_angle
with pytest.raises(ValueError):
uk.update_variogram_model("blurg")
uk.update_variogram_model("power", anisotropy_scaling=3.0, anisotropy_angle=45.0)
# TODO: check that the new parameters are equal to the expected ones
assert variogram_model != uk.variogram_model
assert not np.array_equal(variogram_parameters, uk.variogram_model_parameters)
assert anisotropy_scaling != uk.anisotropy_scaling
assert anisotropy_angle != uk.anisotropy_angle
def test_uk_get_variogram_points(validation_ref):
# Test to compare the variogram of UK with linear drift to results from
# KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater,
# vol. 47, no. 4, 580-586.)
# Variogram parameters
_variogram_parameters = [500.0, 3000.0, 0.0]
data, _, (uk_test_answer, gridx, gridy) = validation_ref
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=_variogram_parameters,
drift_terms=["regional_linear"],
)
# Get the variogram points from the UniversalKriging instance
lags, calculated_variogram = uk.get_variogram_points()
# Generate the expected variogram points according to the
# exponential variogram model
expected_variogram = variogram_models.exponential_variogram_model(
_variogram_parameters, lags
)
assert_allclose(calculated_variogram, expected_variogram)
def test_uk_calculate_data_point_zscalars(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
external_drift=dem,
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
external_drift=dem,
external_drift_x=dem_x,
external_drift_y=np.arange(0.0, 5.0, 1.0),
)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 0.0],
drift_terms=["external_Z"],
external_drift=dem,
external_drift_x=dem_x,
external_drift_y=dem_y,
)
assert_allclose(uk.z_scalars, data[:, 0])
xi, yi = np.meshgrid(np.arange(0.0, 5.3, 0.1), gridy)
with pytest.raises(ValueError):
uk._calculate_data_point_zscalars(xi, yi)
xi, yi = np.meshgrid(np.arange(0.0, 5.0, 0.1), gridy)
z_scalars = uk._calculate_data_point_zscalars(xi, yi)
assert_allclose(z_scalars[0, :], np.arange(0.0, 5.0, 0.1))
def test_uk_execute_single_point():
# Test data and answer from lecture notes by Nicolas Christou, UCLA Stats
data = np.array(
[
[61.0, 139.0, 477.0],
[63.0, 140.0, 696.0],
[64.0, 129.0, 227.0],
[68.0, 128.0, 646.0],
[71.0, 140.0, 606.0],
[73.0, 141.0, 791.0],
[75.0, 128.0, 783.0],
]
)
point = (65.0, 137.0)
z_answer = 567.54
ss_answer = 9.044
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="exponential",
variogram_parameters=[10.0, 9.99, 0.0],
drift_terms=["regional_linear"],
)
z, ss = uk.execute(
"points", np.array([point[0]]), np.array([point[1]]), backend="vectorized"
)
assert z_answer == approx(z[0], rel=0.1)
assert ss_answer == approx(ss[0], rel=0.1)
z, ss = uk.execute(
"points", np.array([61.0]), np.array([139.0]), backend="vectorized"
)
assert z[0] == approx(477.0, rel=1e-3)
assert ss[0] == approx(0.0, rel=1e-3)
z, ss = uk.execute("points", np.array([61.0]), np.array([139.0]), backend="loop")
assert z[0] == approx(477.0, rel=1e-3)
assert ss[0] == approx(0.0, rel=1e-3)
def test_uk_execute(sample_data_2d):
data, (gridx, gridy, _), mask_ref = sample_data_2d
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
exact_values="blurg",
)
uk_non_exact = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
with pytest.raises(ValueError):
uk.execute("blurg", gridx, gridy)
with pytest.raises(ValueError):
uk.execute("grid", gridx, gridy, backend="mrow")
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z1, ss1 = uk_non_exact.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z1, z)
assert_allclose(ss1, ss)
z, ss = uk_non_exact.execute("grid", gridx, gridy, backend="vectorized")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
shape = (gridy.size, gridx.size)
assert z.shape == shape
assert ss.shape == shape
assert np.amax(z) != np.amin(z)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(z)
with pytest.raises(IOError):
uk.execute("masked", gridx, gridy, backend="vectorized")
mask = np.array([True, False])
with pytest.raises(ValueError):
uk.execute("masked", gridx, gridy, mask=mask, backend="vectorized")
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref.T, backend="vectorized")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(IOError):
uk.execute("masked", gridx, gridy, backend="loop")
mask = np.array([True, False])
with pytest.raises(ValueError):
uk.execute("masked", gridx, gridy, mask=mask, backend="loop")
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = uk.execute("masked", gridx, gridy, mask=mask_ref.T, backend="loop")
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
z, ss = uk_non_exact.execute(
"masked", gridx, gridy, mask=mask_ref.T, backend="loop"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0] is np.ma.masked
assert ss[0, 0] is np.ma.masked
with pytest.raises(ValueError):
uk.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
backend="vectorized",
)
z, ss = uk.execute("points", gridx[0], gridy[0], backend="vectorized")
assert z.shape == (1,)
assert ss.shape == (1,)
with pytest.raises(ValueError):
uk.execute(
"points", np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]), backend="loop"
)
z, ss = uk.execute("points", gridx[0], gridy[0], backend="loop")
assert z.shape == (1,)
assert ss.shape == (1,)
def test_ok_uk_produce_same_result(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
z_ok, ss_ok = ok.execute("grid", gridx, gridy, backend="vectorized")
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
uk_non_exact = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
exact_values=False,
)
z_uk, ss_uk = uk.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
z_uk, ss_uk = uk_non_exact.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
z_ok, ss_ok = ok.execute("grid", gridx, gridy, backend="loop")
z_uk, ss_uk = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
z_uk, ss_uk = uk_non_exact.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_ok, z_uk)
assert_allclose(ss_ok, ss_uk)
def test_ok_backends_produce_same_result(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
z_ok_v, ss_ok_v = ok.execute("grid", gridx, gridy, backend="vectorized")
z_ok_l, ss_ok_l = ok.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_ok_v, z_ok_l)
assert_allclose(ss_ok_v, ss_ok_l)
def test_uk_backends_produce_same_result(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
verbose=False,
enable_plotting=False,
)
z_uk_v, ss_uk_v = uk.execute("grid", gridx, gridy, backend="vectorized")
z_uk_l, ss_uk_l = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z_uk_v, z_uk_l)
assert_allclose(ss_uk_v, ss_uk_l)
def test_kriging_tools(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
z_write, ss_write = ok.execute("grid", gridx, gridy)
kt.write_asc_grid(
gridx,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.asc"),
style=1,
)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/temp.asc")
)
assert_allclose(z_write, z_read, 0.01, 0.01)
assert_allclose(gridx, x_read)
assert_allclose(gridy, y_read)
kt.write_zmap_grid(
gridx,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.zmap"),
no_data=1e30,
)
z_read, x_read, y_read, cellsize, no_data, _ = kt.read_zmap_grid(
os.path.join(BASE_DIR, "test_data/temp.zmap")
)
assert_allclose(z_write, z_read, 0.01, 0.01)
assert_allclose(gridx, x_read)
assert_allclose(gridy, y_read)
z_write, ss_write = ok.execute("masked", gridx, gridy, mask=mask_ref)
kt.write_asc_grid(
gridx,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.asc"),
style=1,
)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/temp.asc")
)
assert np.ma.allclose(
z_write,
np.ma.masked_where(z_read == no_data, z_read),
masked_equal=True,
rtol=0.01,
atol=0.01,
)
assert_allclose(gridx, x_read)
assert_allclose(gridy, y_read)
kt.write_zmap_grid(
gridx,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.zmap"),
no_data=1e30,
)
z_read, x_read, y_read, cellsize, no_data, _ = kt.read_zmap_grid(
os.path.join(BASE_DIR, "test_data/temp.zmap")
)
assert np.ma.allclose(
z_write,
np.ma.masked_where(z_read == no_data, z_read),
masked_equal=True,
rtol=0.01,
atol=0.01,
)
assert_allclose(gridx, x_read)
assert_allclose(gridy, y_read)
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2])
z_write, ss_write = ok.execute("grid", gridx_2, gridy)
kt.write_asc_grid(
gridx_2,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.asc"),
style=2,
)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/temp.asc")
)
assert_allclose(z_write, z_read, 0.01, 0.01)
assert_allclose(gridx_2, x_read)
assert_allclose(gridy, y_read)
kt.write_zmap_grid(
gridx_2,
gridy,
z_write,
filename=os.path.join(BASE_DIR, "test_data/temp.zmap"),
)
z_read, x_read, y_read, cellsize, no_data, _ = kt.read_zmap_grid(
os.path.join(BASE_DIR, "test_data/temp.zmap")
)
assert_allclose(z_write, z_read, 0.01, 0.01)
assert_allclose(gridx_2, x_read)
assert_allclose(gridy, y_read)
os.remove(os.path.join(BASE_DIR, "test_data/temp.asc"))
os.remove(os.path.join(BASE_DIR, "test_data/temp.zmap"))
# http://doc.pytest.org/en/latest/skipping.html#id1
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
def test_uk_three_primary_drifts(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
well = np.array([[1.1, 1.1, -1.0]])
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear", "external_Z", "point_log"],
point_drift=well,
external_drift=dem,
external_drift_x=dem_x,
external_drift_y=dem_y,
)
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
assert z.shape == (gridy.shape[0], gridx.shape[0])
assert ss.shape == (gridy.shape[0], gridx.shape[0])
assert np.all(np.isfinite(z))
assert not np.all(np.isnan(z))
assert np.all(np.isfinite(ss))
assert not np.all(np.isnan(ss))
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
assert z.shape == (gridy.shape[0], gridx.shape[0])
assert ss.shape == (gridy.shape[0], gridx.shape[0])
assert np.all(np.isfinite(z))
assert not np.all(np.isnan(z))
assert np.all(np.isfinite(ss))
assert not np.all(np.isnan(ss))
def test_uk_specified_drift(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
xg, yg = np.meshgrid(gridx, gridy)
well = np.array([[1.1, 1.1, -1.0]])
point_log = (
well[0, 2]
* np.log(np.sqrt((xg - well[0, 0]) ** 2.0 + (yg - well[0, 1]) ** 2.0))
* -1.0
)
if np.any(np.isinf(point_log)):
point_log[np.isinf(point_log)] = -100.0 * well[0, 2] * -1.0
point_log_data = (
well[0, 2]
* np.log(
np.sqrt((data[:, 0] - well[0, 0]) ** 2.0 + (data[:, 1] - well[0, 1]) ** 2.0)
)
* -1.0
)
if np.any(np.isinf(point_log_data)):
point_log_data[np.isinf(point_log_data)] = -100.0 * well[0, 2] * -1.0
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
)
with pytest.raises(TypeError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=data[:, 0],
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:2, 0]],
)
uk_spec = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:, 0], data[:, 1]],
)
with pytest.raises(ValueError):
uk_spec.execute("grid", gridx, gridy, specified_drift_arrays=[gridx, gridy])
with pytest.raises(TypeError):
uk_spec.execute("grid", gridx, gridy, specified_drift_arrays=gridx)
with pytest.raises(ValueError):
uk_spec.execute("grid", gridx, gridy, specified_drift_arrays=[xg])
z_spec, ss_spec = uk_spec.execute(
"grid", gridx, gridy, specified_drift_arrays=[xg, yg]
)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_spec, z_lin)
assert_allclose(ss_spec, ss_lin)
uk_spec = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[point_log_data],
)
z_spec, ss_spec = uk_spec.execute(
"grid", gridx, gridy, specified_drift_arrays=[point_log]
)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_spec, z_lin)
assert_allclose(ss_spec, ss_lin)
uk_spec = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:, 0], data[:, 1], point_log_data],
)
z_spec, ss_spec = uk_spec.execute(
"grid", gridx, gridy, specified_drift_arrays=[xg, yg, point_log]
)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear", "point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_spec, z_lin)
assert_allclose(ss_spec, ss_lin)
def test_uk_functional_drift(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
well = np.array([[1.1, 1.1, -1.0]])
func_x = lambda x, y: x # noqa
func_y = lambda x, y: y # noqa
def func_well(x, y):
return -well[0, 2] * np.log(
np.sqrt((x - well[0, 0]) ** 2.0 + (y - well[0, 1]) ** 2.0)
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
)
with pytest.raises(TypeError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=func_x,
)
uk_func = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func_x, func_y],
)
z_func, ss_func = uk_func.execute("grid", gridx, gridy)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear"],
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_func, z_lin)
assert_allclose(ss_func, ss_lin)
uk_func = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func_well],
)
z_func, ss_func = uk_func.execute("grid", gridx, gridy)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_func, z_lin)
assert_allclose(ss_func, ss_lin)
uk_func = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func_x, func_y, func_well],
)
z_func, ss_func = uk_func.execute("grid", gridx, gridy)
uk_lin = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
drift_terms=["regional_linear", "point_log"],
point_drift=well,
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy)
assert_allclose(z_func, z_lin)
assert_allclose(ss_func, ss_lin)
def test_uk_with_external_drift(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
dem, demx, demy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test3_dem.asc")
)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="spherical",
variogram_parameters=[500.0, 3000.0, 0.0],
anisotropy_scaling=1.0,
anisotropy_angle=0.0,
drift_terms=["external_Z"],
external_drift=dem,
external_drift_x=demx,
external_drift_y=demy,
verbose=False,
)
answer, gridx, gridy, cellsize, no_data = kt.read_asc_grid(
os.path.join(BASE_DIR, "test_data/test3_answer.asc")
)
z, ss = uk.execute("grid", gridx, gridy, backend="vectorized")
assert_allclose(z, answer, **allclose_pars)
z, ss = uk.execute("grid", gridx, gridy, backend="loop")
assert_allclose(z, answer, **allclose_pars)
def test_force_exact():
data = np.array([[1.0, 1.0, 2.0], [2.0, 2.0, 1.5], [3.0, 3.0, 1.0]])
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[1.0, 1.0],
)
z, ss = ok.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="vectorized")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = ok.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="vectorized"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="vectorized"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert not np.any(np.isclose(ss, 0))
z, ss = ok.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="vectorized",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
z, ss = ok.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="loop")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = ok.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="loop"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="loop"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert not np.any(np.isclose(ss, 0))
z, ss = ok.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="loop",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2])
z, ss = uk.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="vectorized")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = uk.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="vectorized"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="vectorized"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="vectorized"
)
assert not (np.any(np.isclose(ss, 0)))
z, ss = uk.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="vectorized",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
z, ss = uk.execute("grid", [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], backend="loop")
assert z[0, 0] == approx(2.0)
assert ss[0, 0] == approx(0.0)
assert z[1, 1] == approx(1.5)
assert ss[1, 1] == approx(0.0)
assert z[2, 2] == approx(1.0)
assert ss[2, 2] == approx(0.0)
assert ss[0, 2] != approx(0.0)
assert ss[2, 0] != approx(0.0)
z, ss = uk.execute(
"points", [1.0, 2.0, 3.0, 3.0], [2.0, 1.0, 1.0, 3.0], backend="loop"
)
assert ss[0] != approx(0.0)
assert ss[1] != approx(0.0)
assert ss[2] != approx(0.0)
assert z[3] == approx(1.0)
assert ss[3] == approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 4.0, 0.1), np.arange(0.0, 4.0, 0.1), backend="loop"
)
assert z[10, 10] == approx(2.0)
assert ss[10, 10] == approx(0.0)
assert z[20, 20] == approx(1.5)
assert ss[20, 20] == approx(0.0)
assert z[30, 30] == approx(1.0)
assert ss[30, 30] == approx(0.0)
assert ss[0, 0] != approx(0.0)
assert ss[15, 15] != approx(0.0)
assert ss[10, 0] != approx(0.0)
assert ss[0, 10] != approx(0.0)
assert ss[20, 10] != approx(0.0)
assert ss[10, 20] != approx(0.0)
assert ss[30, 20] != approx(0.0)
assert ss[20, 30] != approx(0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert np.any(np.isclose(ss, 0))
assert not np.any(np.isclose(ss[:9, :30], 0))
assert not np.allclose(z[:9, :30], 0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend="loop"
)
assert not np.any(np.isclose(ss, 0))
z, ss = uk.execute(
"masked",
np.arange(2.5, 3.5, 0.1),
np.arange(2.5, 3.5, 0.25),
backend="loop",
mask=np.asarray(
np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.0
),
)
assert np.isclose(ss[2, 5], 0)
assert not np.allclose(ss, 0.0)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([1.0, 1.0]),
variogram_models.linear_variogram_model,
[1.0, 1.0],
"euclidean",
)
assert z == approx(2.0)
assert ss == approx(0.0)
z, ss = core._krige(
np.vstack((data[:, 0], data[:, 1])).T,
data[:, 2],
np.array([1.0, 2.0]),
variogram_models.linear_variogram_model,
[1.0, 1.0],
"euclidean",
)
assert ss != approx(0.0)
data = np.zeros((50, 3))
x, y = np.meshgrid(np.arange(0.0, 10.0, 1.0), np.arange(0.0, 10.0, 2.0))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(x) * np.ravel(y)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[100.0, 1.0],
)
z, ss = ok.execute(
"grid",
np.arange(0.0, 10.0, 1.0),
np.arange(0.0, 10.0, 2.0),
backend="vectorized",
)
assert_allclose(np.ravel(z), data[:, 2], **allclose_pars)
assert_allclose(ss, 0.0, **allclose_pars)
z, ss = ok.execute(
"grid",
np.arange(0.5, 10.0, 1.0),
np.arange(0.5, 10.0, 2.0),
backend="vectorized",
)
assert not np.allclose(np.ravel(z), data[:, 2])
assert not np.allclose(ss, 0.0)
z, ss = ok.execute(
"grid", np.arange(0.0, 10.0, 1.0), np.arange(0.0, 10.0, 2.0), backend="loop"
)
assert_allclose(np.ravel(z), data[:, 2], **allclose_pars)
assert_allclose(ss, 0.0, **allclose_pars)
z, ss = ok.execute(
"grid", np.arange(0.5, 10.0, 1.0), np.arange(0.5, 10.0, 2.0), backend="loop"
)
assert not np.allclose(np.ravel(z), data[:, 2])
assert not np.allclose(ss, 0.0)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="linear",
variogram_parameters=[100.0, 1.0],
)
z, ss = uk.execute(
"grid",
np.arange(0.0, 10.0, 1.0),
np.arange(0.0, 10.0, 2.0),
backend="vectorized",
)
assert_allclose(np.ravel(z), data[:, 2], **allclose_pars)
assert_allclose(ss, 0.0, **allclose_pars)
z, ss = uk.execute(
"grid",
np.arange(0.5, 10.0, 1.0),
np.arange(0.5, 10.0, 2.0),
backend="vectorized",
)
assert not np.allclose(np.ravel(z), data[:, 2])
assert not np.allclose(ss, 0.0)
z, ss = uk.execute(
"grid", np.arange(0.0, 10.0, 1.0), np.arange(0.0, 10.0, 2.0), backend="loop"
)
assert_allclose(np.ravel(z), data[:, 2], **allclose_pars)
assert_allclose(ss, 0.0, **allclose_pars)
z, ss = uk.execute(
"grid", np.arange(0.5, 10.0, 1.0), np.arange(0.5, 10.0, 2.0), backend="loop"
)
assert not np.allclose(np.ravel(z), data[:, 2])
assert not np.allclose(ss, 0.0)
def test_custom_variogram(sample_data_2d):
data, (gridx, gridy, gridx_2), mask_ref = sample_data_2d
def func(params, dist):
return params[0] * np.log10(dist + params[1]) + params[2]
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="mrow")
with pytest.raises(ValueError):
UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="custom")
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_function=0,
)
with pytest.raises(ValueError):
UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_function=func,
)
uk = UniversalKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_parameters=[1.0, 1.0, 1.0],
variogram_function=func,
)
assert uk.variogram_function([1.0, 1.0, 1.0], 1.0) == approx(1.3010, rel=1e-4)
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="linear")
uk.update_variogram_model(
"custom", variogram_parameters=[1.0, 1.0, 1.0], variogram_function=func
)
assert uk.variogram_function([1.0, 1.0, 1.0], 1.0) == approx(1.3010, rel=1e-4)
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="mrow")
with pytest.raises(ValueError):
OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="custom")
with pytest.raises(ValueError):
OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_function=0,
)
with pytest.raises(ValueError):
OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_function=func,
)
ok = OrdinaryKriging(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_model="custom",
variogram_parameters=[1.0, 1.0, 1.0],
variogram_function=func,
)
assert ok.variogram_function([1.0, 1.0, 1.0], 1.0) == approx(1.3010, rel=1e-4)
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model="linear")
ok.update_variogram_model(
"custom", variogram_parameters=[1.0, 1.0, 1.0], variogram_function=func
)
assert ok.variogram_function([1.0, 1.0, 1.0], 1.0) == approx(1.3010, rel=1e-4)
def test_ok3d(validation_ref):
data, (ok_test_answer, gridx_ref, gridy_ref), _ = validation_ref
# Test to compare K3D results to those obtained using KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, vol. 47,
# no. 4, 580-586.)
k3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
)
with pytest.raises(ValueError):
OrdinaryKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
exact_values="blurg",
)
ok3d_non_exact = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
exact_values=False,
)
k, ss = k3d.execute(
"grid", gridx_ref, gridy_ref, np.array([0.0]), backend="vectorized"
)
assert_allclose(np.squeeze(k), ok_test_answer)
k, ss = k3d.execute("grid", gridx_ref, gridy_ref, np.array([0.0]), backend="loop")
assert_allclose(np.squeeze(k), ok_test_answer)
# Test to compare K3D results to those obtained using KT3D.
data = np.genfromtxt(
os.path.join(BASE_DIR, "test_data", "test3d_data.txt"), skip_header=1
)
ans = np.genfromtxt(os.path.join(BASE_DIR, "test_data", "test3d_answer.txt"))
ans_z = ans[:, 0].reshape((10, 10, 10))
ans_ss = ans[:, 1].reshape((10, 10, 10))
k3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
variogram_parameters=[1.0, 0.1],
)
k, ss = k3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="vectorized"
)
assert_allclose(k, ans_z, rtol=1e-3, atol=1e-8)
assert_allclose(ss, ans_ss, rtol=1e-3, atol=1e-8)
k3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
variogram_parameters=[1.0, 0.1],
)
k, ss = k3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="loop"
)
assert_allclose(k, ans_z, rtol=1e-3, atol=1e-8)
assert_allclose(ss, ans_ss, rtol=1e-3, atol=1e-8)
def test_ok3d_moving_window():
# Test to compare K3D results to those obtained using KT3D.
data = np.genfromtxt(
os.path.join(BASE_DIR, "test_data", "test3d_data.txt"), skip_header=1
)
ans = np.genfromtxt(os.path.join(BASE_DIR, "./test_data/test3d_answer.txt"))
ans_z = ans[:, 0].reshape((10, 10, 10))
ans_ss = ans[:, 1].reshape((10, 10, 10))
k3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
variogram_parameters=[1.0, 0.1],
)
k, ss = k3d.execute(
"grid",
np.arange(10.0),
np.arange(10.0),
np.arange(10.0),
backend="loop",
n_closest_points=10,
)
assert_allclose(k, ans_z, rtol=1e-3)
assert_allclose(ss, ans_ss, rtol=1e-3)
def test_ok3d_uk3d_and_backends_produce_same_results(validation_ref):
data, _, (uk_test_answer, gridx_ref, gridy_ref) = validation_ref
ok3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
)
ok_v, oss_v = ok3d.execute(
"grid", gridx_ref, gridy_ref, np.array([0.0]), backend="vectorized"
)
ok_l, oss_l = ok3d.execute(
"grid", gridx_ref, gridy_ref, np.array([0.0]), backend="loop"
)
uk3d = UniversalKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
)
uk_v, uss_v = uk3d.execute(
"grid", gridx_ref, gridy_ref, np.array([0.0]), backend="vectorized"
)
assert_allclose(uk_v, ok_v)
uk_l, uss_l = uk3d.execute(
"grid", gridx_ref, gridy_ref, np.array([0.0]), backend="loop"
)
assert_allclose(uk_l, ok_l)
assert_allclose(uk_l, uk_v)
assert_allclose(uss_l, uss_v)
data = np.genfromtxt(
os.path.join(BASE_DIR, "test_data", "test3d_data.txt"), skip_header=1
)
ok3d = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
variogram_parameters=[1.0, 0.1],
)
ok_v, oss_v = ok3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="vectorized"
)
ok_l, oss_l = ok3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="loop"
)
uk3d = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
variogram_parameters=[1.0, 0.1],
)
uk_v, uss_v = uk3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="vectorized"
)
assert_allclose(uk_v, ok_v)
assert_allclose(uss_v, oss_v)
uk_l, uss_l = uk3d.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="loop"
)
assert_allclose(uk_l, ok_l)
assert_allclose(uss_l, oss_l)
assert_allclose(uk_l, uk_v)
assert_allclose(uss_l, uss_v)
def test_ok3d_update_variogram_model(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
with pytest.raises(ValueError):
OrdinaryKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="blurg"
)
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3])
variogram_model = k3d.variogram_model
variogram_parameters = k3d.variogram_model_parameters
anisotropy_scaling_y = k3d.anisotropy_scaling_y
anisotropy_scaling_z = k3d.anisotropy_scaling_z
anisotropy_angle_x = k3d.anisotropy_angle_x
anisotropy_angle_y = k3d.anisotropy_angle_y
anisotropy_angle_z = k3d.anisotropy_angle_z
with pytest.raises(ValueError):
k3d.update_variogram_model("blurg")
k3d.update_variogram_model(
"power",
anisotropy_scaling_y=3.0,
anisotropy_scaling_z=3.0,
anisotropy_angle_x=45.0,
anisotropy_angle_y=45.0,
anisotropy_angle_z=45.0,
)
assert variogram_model != k3d.variogram_model
assert not np.array_equal(variogram_parameters, k3d.variogram_model_parameters)
assert anisotropy_scaling_y != k3d.anisotropy_scaling_y
assert anisotropy_scaling_z != k3d.anisotropy_scaling_z
assert anisotropy_angle_x != k3d.anisotropy_angle_x
assert anisotropy_angle_y != k3d.anisotropy_angle_y
assert anisotropy_angle_z != k3d.anisotropy_angle_z
def test_uk3d_update_variogram_model(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
with pytest.raises(ValueError):
UniversalKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="blurg"
)
uk3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3])
variogram_model = uk3d.variogram_model
variogram_parameters = uk3d.variogram_model_parameters
anisotropy_scaling_y = uk3d.anisotropy_scaling_y
anisotropy_scaling_z = uk3d.anisotropy_scaling_z
anisotropy_angle_x = uk3d.anisotropy_angle_x
anisotropy_angle_y = uk3d.anisotropy_angle_y
anisotropy_angle_z = uk3d.anisotropy_angle_z
with pytest.raises(ValueError):
uk3d.update_variogram_model("blurg")
uk3d.update_variogram_model(
"power",
anisotropy_scaling_y=3.0,
anisotropy_scaling_z=3.0,
anisotropy_angle_x=45.0,
anisotropy_angle_y=45.0,
anisotropy_angle_z=45.0,
)
assert not variogram_model == uk3d.variogram_model
assert not np.array_equal(variogram_parameters, uk3d.variogram_model_parameters)
assert not anisotropy_scaling_y == uk3d.anisotropy_scaling_y
assert not anisotropy_scaling_z == uk3d.anisotropy_scaling_z
assert not anisotropy_angle_x == uk3d.anisotropy_angle_x
assert not anisotropy_angle_y == uk3d.anisotropy_angle_y
assert not anisotropy_angle_z == uk3d.anisotropy_angle_z
def test_ok3d_backends_produce_same_result(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
k3d = OrdinaryKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
ok3d_non_exact = OrdinaryKriging3D(
data[:, 0],
data[:, 1],
np.zeros(data[:, 1].shape),
data[:, 2],
variogram_model="exponential",
variogram_parameters=[500.0, 3000.0, 0.0],
exact_values=False,
)
k_k3d_v, ss_k3d_v = k3d.execute(
"grid", gridx_ref, gridy_ref, gridz_ref, backend="vectorized"
)
k_k3d_l, ss_k3d_l = k3d.execute(
"grid", gridx_ref, gridy_ref, gridz_ref, backend="loop"
)
assert_allclose(k_k3d_v, k_k3d_l, rtol=1e-05, atol=1e-8)
assert_allclose(ss_k3d_v, ss_k3d_l, rtol=1e-05, atol=1e-8)
k, ss = ok3d_non_exact.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="loop"
)
k1, ss1 = ok3d_non_exact.execute(
"grid", np.arange(10.0), np.arange(10.0), np.arange(10.0), backend="vectorized"
)
assert_allclose(k1, k)
assert_allclose(ss1, ss)
def test_ok3d_execute(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3])
with pytest.raises(ValueError):
k3d.execute("blurg", gridx_ref, gridy_ref, gridz_ref)
k, ss = k3d.execute("grid", gridx_ref, gridy_ref, gridz_ref, backend="vectorized")
shape = (gridz_ref.size, gridy_ref.size, gridx_ref.size)
assert k.shape == shape
assert ss.shape == shape
assert np.amax(k) != np.amin(k)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(k)
k, ss = k3d.execute("grid", gridx_ref, gridy_ref, gridz_ref, backend="loop")
shape = (gridz_ref.size, gridy_ref.size, gridx_ref.size)
assert k.shape == shape
assert ss.shape == shape
assert np.amax(k) != np.amin(k)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(k)
with pytest.raises(IOError):
k3d.execute("masked", gridx_ref, gridy_ref, gridz_ref, backend="vectorized")
mask = np.array([True, False])
with pytest.raises(ValueError):
k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask, backend="vectorized"
)
k, ss = k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref, backend="vectorized"
)
assert np.ma.is_masked(k)
assert np.ma.is_masked(ss)
assert k[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
z, ss = k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref.T, backend="vectorized"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
with pytest.raises(IOError):
k3d.execute("masked", gridx_ref, gridy_ref, gridz_ref, backend="loop")
mask = np.array([True, False])
with pytest.raises(ValueError):
k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask, backend="loop"
)
k, ss = k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref, backend="loop"
)
assert np.ma.is_masked(k)
assert np.ma.is_masked(ss)
assert k[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
z, ss = k3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref.T, backend="loop"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
with pytest.raises(ValueError):
k3d.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
np.array([1.0]),
backend="vectorized",
)
k, ss = k3d.execute(
"points", gridx_ref[0], gridy_ref[0], gridz_ref[0], backend="vectorized"
)
assert k.shape == (1,)
assert ss.shape == (1,)
with pytest.raises(ValueError):
k3d.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
np.array([1.0]),
backend="loop",
)
k, ss = k3d.execute(
"points", gridx_ref[0], gridy_ref[0], gridz_ref[0], backend="loop"
)
assert k.shape == (1,)
assert ss.shape == (1,)
data = np.zeros((125, 4))
z, y, x = np.meshgrid(
np.arange(0.0, 5.0, 1.0), np.arange(0.0, 5.0, 1.0), np.arange(0.0, 5.0, 1.0)
)
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(z)
data[:, 3] = np.ravel(z)
k3d = OrdinaryKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"grid",
np.arange(2.0, 3.0, 0.1),
np.arange(2.0, 3.0, 0.1),
np.arange(0.0, 4.0, 1.0),
backend="vectorized",
)
assert_allclose(k[0, :, :], 0.0, atol=0.01)
assert_allclose(k[1, :, :], 1.0, rtol=1.0e-2)
assert_allclose(k[2, :, :], 2.0, rtol=1.0e-2)
assert_allclose(k[3, :, :], 3.0, rtol=1.0e-2)
k, ss = k3d.execute(
"grid",
np.arange(2.0, 3.0, 0.1),
np.arange(2.0, 3.0, 0.1),
np.arange(0.0, 4.0, 1.0),
backend="loop",
)
assert_allclose(k[0, :, :], 0.0, atol=0.01)
assert_allclose(k[1, :, :], 1.0, rtol=1.0e-2)
assert_allclose(k[2, :, :], 2.0, rtol=1.0e-2)
assert_allclose(k[3, :, :], 3.0, rtol=1.0e-2)
k3d = OrdinaryKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"points",
[2.5, 2.5, 2.5],
[2.5, 2.5, 2.5],
[1.0, 2.0, 3.0],
backend="vectorized",
)
assert_allclose(k[0], 1.0, atol=0.01)
assert_allclose(k[1], 2.0, rtol=1.0e-2)
assert_allclose(k[2], 3.0, rtol=1.0e-2)
k, ss = k3d.execute(
"points", [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1.0, 2.0, 3.0], backend="loop"
)
assert_allclose(k[0], 1.0, atol=0.01)
assert_allclose(k[1], 2.0, rtol=1.0e-2)
assert_allclose(k[2], 3.0, rtol=1.0e-2)
def test_uk3d_execute(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
uk3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3])
with pytest.raises(ValueError):
uk3d.execute("blurg", gridx_ref, gridy_ref, gridz_ref)
k, ss = uk3d.execute("grid", gridx_ref, gridy_ref, gridz_ref, backend="vectorized")
shape = (gridz_ref.size, gridy_ref.size, gridx_ref.size)
assert k.shape == shape
assert ss.shape == shape
assert np.amax(k) != np.amin(k)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(k)
k, ss = uk3d.execute("grid", gridx_ref, gridy_ref, gridz_ref, backend="loop")
shape = (gridz_ref.size, gridy_ref.size, gridx_ref.size)
assert k.shape == shape
assert ss.shape == shape
assert np.amax(k) != np.amin(k)
assert np.amax(ss) != np.amin(ss)
assert not np.ma.is_masked(k)
with pytest.raises(IOError):
uk3d.execute("masked", gridx_ref, gridy_ref, gridz_ref, backend="vectorized")
mask = np.array([True, False])
with pytest.raises(ValueError):
uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask, backend="vectorized"
)
k, ss = uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref, backend="vectorized"
)
assert np.ma.is_masked(k)
assert np.ma.is_masked(ss)
assert k[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
z, ss = uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref.T, backend="vectorized"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
with pytest.raises(IOError):
uk3d.execute("masked", gridx_ref, gridy_ref, gridz_ref, backend="loop")
mask = np.array([True, False])
with pytest.raises(ValueError):
uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask, backend="loop"
)
k, ss = uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref, backend="loop"
)
assert np.ma.is_masked(k)
assert np.ma.is_masked(ss)
assert k[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
z, ss = uk3d.execute(
"masked", gridx_ref, gridy_ref, gridz_ref, mask=mask_ref.T, backend="loop"
)
assert np.ma.is_masked(z)
assert np.ma.is_masked(ss)
assert z[0, 0, 0] is np.ma.masked
assert ss[0, 0, 0] is np.ma.masked
with pytest.raises(ValueError):
uk3d.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
np.array([1.0]),
backend="vectorized",
)
k, ss = uk3d.execute(
"points", gridx_ref[0], gridy_ref[0], gridz_ref[0], backend="vectorized"
)
assert k.shape == (1,)
assert ss.shape == (1,)
with pytest.raises(ValueError):
uk3d.execute(
"points",
np.array([0.0, 1.0, 2.0]),
np.array([0.0, 1.0]),
np.array([1.0]),
backend="loop",
)
k, ss = uk3d.execute(
"points", gridx_ref[0], gridy_ref[0], gridz_ref[0], backend="loop"
)
assert k.shape == (1,)
assert ss.shape == (1,)
data = np.zeros((125, 4))
z, y, x = np.meshgrid(
np.arange(0.0, 5.0, 1.0), np.arange(0.0, 5.0, 1.0), np.arange(0.0, 5.0, 1.0)
)
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(z)
data[:, 3] = np.ravel(z)
k3d = UniversalKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"grid",
np.arange(2.0, 3.0, 0.1),
np.arange(2.0, 3.0, 0.1),
np.arange(0.0, 4.0, 1.0),
backend="vectorized",
)
assert_allclose(k[0, :, :], 0.0, atol=0.01)
assert_allclose(k[1, :, :], 1.0, rtol=1.0e-2)
assert_allclose(k[2, :, :], 2.0, rtol=1.0e-2)
assert_allclose(k[3, :, :], 3.0, rtol=1.0e-2)
k, ss = k3d.execute(
"grid",
np.arange(2.0, 3.0, 0.1),
np.arange(2.0, 3.0, 0.1),
np.arange(0.0, 4.0, 1.0),
backend="loop",
)
assert_allclose(k[0, :, :], 0.0, atol=0.01)
assert_allclose(k[1, :, :], 1.0, rtol=1.0e-2)
assert_allclose(k[2, :, :], 2.0, rtol=1.0e-2)
assert_allclose(k[3, :, :], 3.0, rtol=1.0e-2)
k3d = UniversalKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"points",
[2.5, 2.5, 2.5],
[2.5, 2.5, 2.5],
[1.0, 2.0, 3.0],
backend="vectorized",
)
assert_allclose(k[0], 1.0, atol=0.01)
assert_allclose(k[1], 2.0, rtol=1.0e-2)
assert_allclose(k[2], 3.0, rtol=1.0e-2)
k, ss = k3d.execute(
"points", [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1.0, 2.0, 3.0], backend="loop"
)
assert_allclose(k[0], 1.0, atol=0.01)
assert_allclose(k[1], 2.0, rtol=1.0e-2)
assert_allclose(k[2], 3.0, rtol=1.0e-2)
def test_force_exact_3d(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
k3d = OrdinaryKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"grid", [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend="vectorized"
)
assert k[2, 0, 0] == approx(0.9)
assert ss[2, 0, 0] == approx(0.0)
assert k[0, 2, 0] == approx(0.9)
assert ss[0, 2, 0] == approx(0.0)
assert k[1, 2, 2] == approx(0.7)
assert ss[1, 2, 2] == approx(0.0)
assert ss[2, 2, 2] != approx(0.0)
assert ss[0, 0, 0] != approx(0.0)
k, ss = k3d.execute(
"grid", [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend="loop"
)
assert k[2, 0, 0] == approx(0.9)
assert ss[2, 0, 0] == approx(0.0)
assert k[0, 2, 0] == approx(0.9)
assert ss[0, 2, 0] == approx(0.0)
assert k[1, 2, 2] == approx(0.7)
assert ss[1, 2, 2] == approx(0.0)
assert ss[2, 2, 2] != approx(0.0)
assert ss[0, 0, 0] != approx(0.0)
k3d = UniversalKriging3D(
data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model="linear"
)
k, ss = k3d.execute(
"grid", [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend="vectorized"
)
assert k[2, 0, 0] == approx(0.9)
assert ss[2, 0, 0] == approx(0.0)
assert k[0, 2, 0] == approx(0.9)
assert ss[0, 2, 0] == approx(0.0)
assert k[1, 2, 2] == approx(0.7)
assert ss[1, 2, 2] == approx(0.0)
assert ss[2, 2, 2] != approx(0.0)
assert ss[0, 0, 0] != approx(0.0)
k, ss = k3d.execute(
"grid", [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend="loop"
)
assert k[2, 0, 0] == approx(0.9)
assert ss[2, 0, 0] == approx(0.0)
assert k[0, 2, 0] == approx(0.9)
assert ss[0, 2, 0] == approx(0.0)
assert k[1, 2, 2] == approx(0.7)
assert ss[1, 2, 2] == approx(0.0)
assert ss[2, 2, 2] != approx(0.0)
assert ss[0, 0, 0] != approx(0.0)
def test_uk3d_specified_drift(sample_data_3d):
data, (gridx_ref, gridy_ref, gridz_ref), mask_ref = sample_data_3d
zg, yg, xg = np.meshgrid(gridz_ref, gridy_ref, gridx_ref, indexing="ij")
with pytest.raises(ValueError):
UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["specified"],
)
with pytest.raises(TypeError):
UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=data[:, 0],
)
with pytest.raises(ValueError):
UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:2, 0]],
)
uk_spec = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["specified"],
specified_drift=[data[:, 0], data[:, 1], data[:, 2]],
)
with pytest.raises(ValueError):
uk_spec.execute(
"grid",
gridx_ref,
gridy_ref,
gridz_ref,
specified_drift_arrays=[gridx_ref, gridy_ref, gridz_ref],
)
with pytest.raises(TypeError):
uk_spec.execute(
"grid", gridx_ref, gridy_ref, gridz_ref, specified_drift_arrays=gridx_ref
)
with pytest.raises(ValueError):
uk_spec.execute(
"grid", gridx_ref, gridy_ref, gridz_ref, specified_drift_arrays=[zg]
)
z_spec, ss_spec = uk_spec.execute(
"grid", gridx_ref, gridy_ref, gridz_ref, specified_drift_arrays=[xg, yg, zg]
)
uk_lin = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["regional_linear"],
)
z_lin, ss_lin = uk_lin.execute("grid", gridx_ref, gridy_ref, gridz_ref)
assert_allclose(z_spec, z_lin)
assert_allclose(ss_spec, ss_lin)
def test_uk3d_functional_drift(sample_data_3d):
data, (gridx, gridy, gridz), mask_ref = sample_data_3d
func_x = lambda x, y, z: x # noqa
func_y = lambda x, y, z: y # noqa
func_z = lambda x, y, z: z # noqa
with pytest.raises(ValueError):
UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["functional"],
)
with pytest.raises(TypeError):
UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=func_x,
)
uk_func = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["functional"],
functional_drift=[func_x, func_y, func_z],
)
z_func, ss_func = uk_func.execute("grid", gridx, gridy, gridz)
uk_lin = UniversalKriging3D(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_model="linear",
drift_terms=["regional_linear"],
)
z_lin, ss_lin = uk_lin.execute("grid", gridx, gridy, gridz)
assert_allclose(z_func, z_lin)
assert_allclose(ss_func, ss_lin)
def test_geometric_code():
# Create selected points distributed across the sphere:
N = 4
lon = np.array([7.0, 7.0, 187.0, 73.231])
lat = np.array([13.23, 13.2301, -13.23, -79.3])
# For the points generated with this reference seed, the distance matrix
# has been calculated using geopy (v. 1.11.0) as follows:
# >>> from geopy.distance import great_circle
# >>> g = great_circle(radius=1.0)
# >>> d = np.zeros((N,N), dtype=float)
# >>> for i in range(N):
# >>> for j in range(N):
# >>> d[i,j] = g.measure((lat[i],lon[i]),(lat[j],lon[j]))
# >>> d *= 180.0/np.pi
# From that distance matrix, the reference values have been obtained.
d_ref = np.array(
[
[0.0, 1e-4, 180.0, 98.744848317171801],
[1e-4, 0.0, 179.9999, 98.744946828324345],
[180.0, 179.9999, 0.0, 81.255151682828213],
[98.744848317171801, 98.744946828324345, 81.255151682828213, 0.0],
]
)
# Calculate distance matrix using the PyKrige code:
d = np.zeros((N, N))
for i in range(N):
for j in range(N):
d[i, j] = core.great_circle_distance(lon[i], lat[i], lon[j], lat[j])
# Test agains reference values:
assert_allclose(d, d_ref)
# Test general features:
assert_allclose(d[np.eye(N, dtype=bool)], 0.0)
np.testing.assert_equal(d >= 0.0, np.ones((N, N), dtype=bool))
assert_allclose(d, d.T)
np.testing.assert_equal(d <= 180.0, np.ones((N, N), dtype=bool))
# Test great_circle_distance and euclid3_to_great_circle against each other
lon_ref = lon
lat_ref = lat
for i in range(len(lon_ref)):
lon, lat = np.meshgrid(np.linspace(0, 360.0, 20), np.linspace(-90.0, 90.0, 20))
dx = np.cos(np.pi / 180.0 * lon) * np.cos(np.pi / 180.0 * lat) - np.cos(
np.pi / 180.0 * lon_ref[i]
) * np.cos(np.pi / 180.0 * lat_ref[i])
dy = np.sin(np.pi / 180.0 * lon) * np.cos(np.pi / 180.0 * lat) - np.sin(
np.pi / 180.0 * lon_ref[i]
) * np.cos(np.pi / 180.0 * lat_ref[i])
dz = np.sin(np.pi / 180.0 * lat) - np.sin(np.pi / 180.0 * lat_ref[i])
assert_allclose(
core.great_circle_distance(lon_ref[i], lat_ref[i], lon, lat),
core.euclid3_to_great_circle(np.sqrt(dx**2 + dy**2 + dz**2)),
rtol=1e-5,
)
def test_ok_geographic():
# Generate random data:
np.random.seed(89239413)
lon = 360.0 * np.random.rand(50, 1)
lat = 180.0 * np.random.rand(50, 1) - 90.0
z = np.random.rand(50, 1)
# Generate grid:
grid_lon = 360.0 * np.random.rand(120, 1)
grid_lat = 180.0 * np.random.rand(120, 1) - 90.0
# Create ordinary kriging object:
OK = OrdinaryKriging(
lon,
lat,
z,
variogram_model="linear",
verbose=False,
enable_plotting=False,
coordinates_type="geographic",
)
# Execute on grid:
z, ss = OK.execute("grid", grid_lon, grid_lat)
def test_ok_geographic_vs_euclid():
# Generate some random data close to the north pole.
# Then we use a polar projected 2d euclidean coordinate
# system and compare the kriging results in that coordinate
# system with the geographic-option results.
# If data point distance to the north pole is small enough
# (choose maximum 0.01 degrees), the differences due to curvature
# should be negligible.
np.random.seed(89239413)
from_north = 1e-2 * np.random.random(5)
lat = 90.0 - from_north
lon = 360.0 * np.random.random(5)
z = np.random.random(5)
z -= z.mean()
x = from_north * np.cos(np.deg2rad(lon))
y = from_north * np.sin(np.deg2rad(lon))
# Generate grids:
grid_lon = 360.0 * np.linspace(0, 1, 50)
grid_from_north = np.linspace(0, 0.01, 10)
grid_lat = 90.0 - grid_from_north
grid_x = grid_from_north[:, np.newaxis] * np.cos(
np.deg2rad(grid_lon[np.newaxis, :])
)
grid_y = grid_from_north[:, np.newaxis] * np.sin(
np.deg2rad(grid_lon[np.newaxis, :])
)
grid_lon, grid_lat = np.meshgrid(grid_lon, grid_lat, indexing="xy")
# Flatten the grids:
grid_x = grid_x.flatten()
grid_y = grid_y.flatten()
grid_lon = grid_lon.flatten()
grid_lat = grid_lat.flatten()
# Calculate and compare distance matrices ensuring that that part
# of the workflow works as intended (tested: 2e-9 is currently the
# match for this setup):
d_eucl = cdist(
np.concatenate([x[:, np.newaxis], y[:, np.newaxis]], axis=1),
np.concatenate([grid_x[:, np.newaxis], grid_y[:, np.newaxis]], axis=1),
)
d_geo = core.great_circle_distance(
lon[:, np.newaxis],
lat[:, np.newaxis],
grid_lon[np.newaxis, :],
grid_lat[np.newaxis, :],
)
assert_allclose(d_eucl, d_geo, rtol=2e-9)
# Create ordinary kriging objects:
OK_geo = OrdinaryKriging(
lon,
lat,
z,
variogram_model="linear",
verbose=False,
enable_plotting=False,
coordinates_type="geographic",
)
OK_xy = OrdinaryKriging(
x, y, z, variogram_model="linear", verbose=False, enable_plotting=False
)
OK_wrong = OrdinaryKriging(
lon, lat, z, variogram_model="linear", verbose=False, enable_plotting=False
)
# Execute on grid:
zgeo, ss = OK_geo.execute("points", grid_lon, grid_lat)
zxy, ss = OK_xy.execute("points", grid_x, grid_y)
zwrong, ss = OK_wrong.execute("points", grid_lon, grid_lat)
# Assert equivalence / difference (tested: 2e-5 is currently the
# match for this setup):
assert_allclose(zgeo, zxy, rtol=2e-5)
assert not np.any(zgeo == 0)
assert np.abs((zgeo - zwrong) / zgeo).max() > 1.0
def test_ok_geometric_closest_points():
# Generate random data:
np.random.seed(89239413)
lon = 360.0 * np.random.rand(50, 1)
lat = 180.0 * np.random.rand(50, 1) - 90.0
z = np.random.rand(50, 1)
# Generate grid:
grid_lon = 360.0 * np.random.rand(120, 1)
grid_lat = 180.0 * np.random.rand(120, 1) - 90.0
# Create ordinary kriging object:
OK = OrdinaryKriging(
lon,
lat,
z,
variogram_model="linear",
verbose=False,
enable_plotting=False,
coordinates_type="geographic",
)
# Execute on grid:
with pytest.raises(ValueError):
# Test OK raising ValueError when closest_points == 1:
z, ss = OK.execute("grid", grid_lon, grid_lat, n_closest_points=1, backend="C")
z, ss = OK.execute("grid", grid_lon, grid_lat, n_closest_points=5, backend="C")
@pytest.mark.parametrize("model", [OrdinaryKriging, UniversalKriging])
def test_gstools_variogram(model):
gstools = pytest.importorskip("gstools")
# test data
data = np.array(
[
[0.3, 1.2, 0.47],
[1.9, 0.6, 0.56],
[1.1, 3.2, 0.74],
[3.3, 4.4, 1.47],
[4.7, 3.8, 1.74],
]
)
gridx = np.arange(0.0, 5.5, 0.1)
gridy = np.arange(0.0, 6.5, 0.1)
# a GSTools based covariance model
cov_model = gstools.Gaussian(
dim=2, len_scale=1, anis=0.2, angles=-0.5, var=0.5, nugget=0.1
)
# create the krige field
krige = model(data[:, 0], data[:, 1], data[:, 2], cov_model)
z1, ss1 = krige.execute("grid", gridx, gridy)
# check if the field coincides with the data
for i in range(5):
y_id = int(data[i, 1] * 10)
x_id = int(data[i, 0] * 10)
assert np.isclose(z1[y_id, x_id], data[i, 2])
@pytest.mark.parametrize("model", [OrdinaryKriging, UniversalKriging])
def test_pseudo_2d(model):
# test data
data = np.array([[0.0, 0.0, 1.0], [0.0, 0.0, 3.0], [1.0, 0.0, 6.0]])
for p_type in ["pinv", "pinvh"]:
# create the krige field
krige = model(
data[:, 0],
data[:, 1],
data[:, 2],
variogram_parameters=[1.0, 0.0],
pseudo_inv=True,
pseudo_inv_type=p_type,
)
z1, ss1 = krige.execute("points", 0.0, 0.0)
# check if the field coincides with the mean of the redundant data
assert np.isclose(z1.item(), 2.0)
@pytest.mark.parametrize("model", [OrdinaryKriging3D, UniversalKriging3D])
def test_pseudo_3d(model):
# test data
data = np.array([[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 3.0], [1.0, 0.0, 0.0, 6.0]])
for p_type in ["pinv", "pinvh"]:
# create the krige field
krige = model(
data[:, 0],
data[:, 1],
data[:, 2],
data[:, 3],
variogram_parameters=[1.0, 0.0],
pseudo_inv=True,
pseudo_inv_type=p_type,
)
z1, ss1 = krige.execute("points", 0.0, 0.0, 0.0)
# check if the field coincides with the mean of the redundant data
assert np.isclose(z1.item(), 2.0)
| 96,612 | 31.344493 | 88 | py |
PyKrige | PyKrige-main/tests/test_api.py | from itertools import product
import numpy as np
import pytest
from pykrige.compat import Krige, threed_krige
def _method_and_vergiogram():
method = ["ordinary", "universal", "ordinary3d", "universal3d"]
variogram_model = ["linear", "power", "gaussian", "spherical", "exponential"]
return product(method, variogram_model)
def test_krige():
# dummy data
pytest.importorskip("sklearn")
from sklearn.model_selection import GridSearchCV
np.random.seed(1)
X = np.random.randint(0, 400, size=(20, 3)).astype(float)
y = 5 * np.random.rand(20)
for m, v in _method_and_vergiogram():
param_dict = {"method": [m], "variogram_model": [v]}
estimator = GridSearchCV(
Krige(),
param_dict,
n_jobs=-1,
pre_dispatch="2*n_jobs",
verbose=False,
return_train_score=True,
cv=5,
)
# run the gridsearch
if m in ["ordinary", "universal"]:
estimator.fit(X=X[:, :2], y=y)
else:
estimator.fit(X=X, y=y)
if hasattr(estimator, "best_score_"):
if m in threed_krige:
assert estimator.best_score_ > -10.0
else:
assert estimator.best_score_ > -3.0
if hasattr(estimator, "cv_results_"):
assert estimator.cv_results_["mean_train_score"] > 0
| 1,394 | 28.0625 | 81 | py |
PyKrige | PyKrige-main/tests/test_classification_krige.py | from itertools import product
import numpy as np
import pytest
from pykrige.ck import ClassificationKriging
try:
from sklearn.datasets import fetch_california_housing
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.svm import SVC
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
def _methods():
krige_methods = ["ordinary", "universal"]
ml_methods = [
SVC(C=0.01, gamma="auto", probability=True),
RandomForestClassifier(n_estimators=50),
]
return product(ml_methods, krige_methods)
@pytest.mark.skipif(not SKLEARN_INSTALLED, reason="requires scikit-learn")
def test_classification_krige():
np.random.seed(1)
x = np.linspace(-1.0, 1.0, 100)
# create a feature matrix with 5 features
X = np.tile(x, reps=(5, 1)).T
y = (
1
+ 5 * X[:, 0]
- 2 * X[:, 1]
- 2 * X[:, 2]
+ 3 * X[:, 3]
+ 4 * X[:, 4]
+ 2 * (np.random.rand(100) - 0.5)
)
# create lat/lon array
lon = np.linspace(-180.0, 180.0, 10)
lat = np.linspace(-90.0, 90.0, 10)
lon_lat = np.array(list(product(lon, lat)))
discretizer = KBinsDiscretizer(encode="ordinal")
y = discretizer.fit_transform(y.reshape(-1, 1))
X_train, X_test, y_train, y_test, lon_lat_train, lon_lat_test = train_test_split(
X, y, lon_lat, train_size=0.7, random_state=10
)
for ml_model, krige_method in _methods():
class_model = ClassificationKriging(
classification_model=ml_model, method=krige_method, n_closest_points=2
)
class_model.fit(X_train, lon_lat_train, y_train)
assert class_model.score(X_test, lon_lat_test, y_test) > 0.25
@pytest.mark.skipif(not SKLEARN_INSTALLED, reason="requires scikit-learn")
def test_krige_classification_housing():
import ssl
import urllib
try:
housing = fetch_california_housing()
except (ssl.SSLError, urllib.error.URLError):
ssl._create_default_https_context = ssl._create_unverified_context
try:
housing = fetch_california_housing()
except PermissionError:
# This can raise permission error on Appveyor
pytest.skip("Failed to load california housing dataset")
ssl._create_default_https_context = ssl.create_default_context
# take only first 1000
p = housing["data"][:1000, :-2]
x = housing["data"][:1000, -2:]
target = housing["target"][:1000]
discretizer = KBinsDiscretizer(encode="ordinal")
target = discretizer.fit_transform(target.reshape(-1, 1))
p_train, p_test, y_train, y_test, x_train, x_test = train_test_split(
p, target, x, train_size=0.7, random_state=10
)
for ml_model, krige_method in _methods():
class_model = ClassificationKriging(
classification_model=ml_model, method=krige_method, n_closest_points=2
)
class_model.fit(p_train, x_train, y_train)
if krige_method == "ordinary":
assert class_model.score(p_test, x_test, y_test) > 0.5
else:
assert class_model.score(p_test, x_test, y_test) > 0.0
| 3,274 | 31.107843 | 85 | py |
PyKrige | PyKrige-main/tests/test_regression_krige.py | from itertools import product
import numpy as np
import pytest
from pykrige.rk import RegressionKriging
try:
from sklearn.datasets import fetch_california_housing
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import ElasticNet, Lasso, LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
def _methods():
krige_methods = ["ordinary", "universal"]
ml_methods = [
SVR(C=0.01, gamma="auto"),
RandomForestRegressor(min_samples_split=5, n_estimators=50),
LinearRegression(),
Lasso(),
ElasticNet(),
]
return product(ml_methods, krige_methods)
@pytest.mark.skipif(not SKLEARN_INSTALLED, reason="requires scikit-learn")
def test_regression_krige():
np.random.seed(1)
x = np.linspace(-1.0, 1.0, 100)
# create a feature matrix with 5 features
X = np.tile(x, reps=(5, 1)).T
y = (
1
+ 5 * X[:, 0]
- 2 * X[:, 1]
- 2 * X[:, 2]
+ 3 * X[:, 3]
+ 4 * X[:, 4]
+ 2 * (np.random.rand(100) - 0.5)
)
# create lat/lon array
lon = np.linspace(-180.0, 180.0, 10)
lat = np.linspace(-90.0, 90.0, 10)
lon_lat = np.array(list(product(lon, lat)))
X_train, X_test, y_train, y_test, lon_lat_train, lon_lat_test = train_test_split(
X, y, lon_lat, train_size=0.7, random_state=10
)
for ml_model, krige_method in _methods():
reg_kr_model = RegressionKriging(
regression_model=ml_model, method=krige_method, n_closest_points=2
)
reg_kr_model.fit(X_train, lon_lat_train, y_train)
assert reg_kr_model.score(X_test, lon_lat_test, y_test) > 0.25
@pytest.mark.skipif(not SKLEARN_INSTALLED, reason="requires scikit-learn")
def test_krige_housing():
import ssl
import urllib
try:
housing = fetch_california_housing()
except (ssl.SSLError, urllib.error.URLError):
ssl._create_default_https_context = ssl._create_unverified_context
try:
housing = fetch_california_housing()
except PermissionError:
# This can raise permission error on Appveyor
pytest.skip("Failed to load california housing dataset")
ssl._create_default_https_context = ssl.create_default_context
# take only first 1000
p = housing["data"][:1000, :-2]
x = housing["data"][:1000, -2:]
target = housing["target"][:1000]
p_train, p_test, y_train, y_test, x_train, x_test = train_test_split(
p, target, x, train_size=0.7, random_state=10
)
for ml_model, krige_method in _methods():
reg_kr_model = RegressionKriging(
regression_model=ml_model, method=krige_method, n_closest_points=2
)
reg_kr_model.fit(p_train, x_train, y_train)
if krige_method == "ordinary":
assert reg_kr_model.score(p_test, x_test, y_test) > 0.5
else:
assert reg_kr_model.score(p_test, x_test, y_test) > 0.0
| 3,107 | 30.08 | 85 | py |
PyKrige | PyKrige-main/docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyKrige documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 1 18:34:53 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
import shlex
import sys
import matplotlib
import sphinx_rtd_theme
matplotlib.use("Agg")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath("../../"))
sys.path.insert(0, os.path.abspath("sphinxext"))
from github_link import make_linkcode_resolve
import pykrige
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.mathjax",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon", # parameters look better than with numpydoc only
"numpydoc",
"sphinx_gallery.gen_gallery",
"sphinx.ext.linkcode",
"m2r2",
]
autodoc_default_flags = ["members", "inherited-members"]
# autosummaries from source-files
autosummary_generate = True
# dont show __init__ docstring
autoclass_content = "class"
# sort class members
autodoc_member_order = "groupwise"
# autodoc_member_order = 'bysource'
# Notes in boxes
napoleon_use_admonition_for_notes = True
# Attributes like parameters
# napoleon_use_ivar = True
# this is a nice class-doc layout
numpydoc_show_class_members = True
# class members have no separate file, so they are not in a toctree
numpydoc_class_members_toctree = False
# for the covmodels alot of classmembers show up...
# maybe switch off with: :no-inherited-members:
numpydoc_show_inherited_class_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = {
".rst": "restructuredtext",
".txt": "restructuredtext",
".md": "markdown",
}
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "contents"
sphinx_gallery_conf = {
# path to your examples scripts
"examples_dirs": "../../examples",
# path where to save gallery generated examples
"gallery_dirs": "examples",
"filename_pattern": "/.*.py",
}
# General information about the project.
curr_year = datetime.datetime.now().year
project = "PyKrige"
copyright = "2017 - {}, PyKrige developers".format(curr_year)
author = "PyKrige developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pykrige.__version__
# The full version, including alpha/beta/rc tags.
release = pykrige.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "PyKrigedoc"
html_logo = "pics/PyKrige_150.png"
html_favicon = "pics/PyKrige.ico"
# -- Options for LaTeX output ---------------------------------------------
latex_logo = "pics/PyKrige_150.png"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "PyKrige.tex", "PyKrige Documentation", "PyKrige developers", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pykrige", "PyKrige Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"PyKrige",
"PyKrige Documentation",
author,
"PyKrige",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve(
"pykrige",
"https://github.com/GeoStat-Framework/"
"PyKrige/blob/{revision}/"
"{package}/{path}#L{lineno}",
)
| 11,242 | 30.940341 | 88 | py |
PyKrige | PyKrige-main/docs/source/sphinxext/github_link.py | # Adapted from scikit learn
import inspect
import os
import subprocess
import sys
from functools import partial
from operator import attrgetter
REVISION_CMD = "git rev-parse --short HEAD"
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print("Failed to execute git to get revision")
return None
return revision.decode("utf-8")
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ("py", "pyx"):
return
if not info.get("module") or not info.get("fullname"):
return
class_name = info["fullname"].split(".")[0]
if type(class_name) != str:
# Python 2 only
class_name = class_name.encode("utf-8")
module = __import__(info["module"], fromlist=[class_name])
obj = attrgetter(info["fullname"])(module)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ""
return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(
_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt
)
| 2,645 | 29.767442 | 85 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/constants.py | num_initial_random_draws = 5
num_gradient_updates = 1000 | 56 | 27.5 | 28 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/benchmark_example.py | import logging
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
from blackbox import BlackboxOffline
from blackbox.load_utils import evaluation_split_from_task
from optimizer.benchmark import benchmark
from optimizer.gaussian_process import GP
from optimizer.random_search import RS
from optimizer.thompson_sampling_functional_prior import TS
if __name__ == '__main__':
task = "electricity"
logging.basicConfig(level=logging.INFO)
num_seeds = 20
num_evaluations = 50
Xys_train, (X_test, y_test) = evaluation_split_from_task(task)
candidates = X_test
blackbox = BlackboxOffline(
X=X_test,
y=y_test,
)
optimizers = {
#"GP + prior": partial(G3P, normalization="standard"),
#"GCP + prior": partial(G3P, normalization="gaussian"),
"RS": RS,
"TS": TS,
"GP": GP,
}
res = {}
for name, Optimizer_cls in optimizers.items():
logging.info(f"evaluate {name}")
optimizer_factory = partial(
Optimizer_cls,
input_dim=blackbox.input_dim,
output_dim=blackbox.output_dim,
evaluations_other_tasks=Xys_train,
)
X, y = benchmark(
optimizer_factory=optimizer_factory,
blackbox=blackbox,
candidates=candidates,
num_seeds=num_seeds,
num_evaluations=num_evaluations,
verbose=False,
)
res[name] = X, y
print(res)
fig, ax = plt.subplots()
for name, (X, y) in res.items():
y_best = np.minimum.accumulate(y, axis=1)
mean = y_best.mean(axis=0)[:, 0]
std = y_best.std(axis=0)[:, 0]
ax.plot(mean, label=name)
ax.fill_between(range(len(mean)), mean - std, mean + std, alpha=0.2)
plt.legend()
plt.show()
plt.savefig(f"optimizer-comparison-{task}.pdf")
| 1,894 | 27.283582 | 76 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/__init__.py | 0 | 0 | 0 | py |
|
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/misc/artificial_data.py | import numpy as np
def artificial_task1(
input_dim: int = 2,
num_train_examples: int = 10000,
num_tasks: int = 5,
seed: int = 0,
):
# blackboxes are quadratic functions whose centers are sampled in a ball around [0.5, ..., 0.5]
np.random.seed(seed)
centers = (np.random.rand(num_tasks, input_dim) - 0.5) * 0.25 + 0.5
Xys = []
for x_star in centers:
X = np.random.rand(num_train_examples, input_dim)
y = np.square((X - x_star)).mean(axis=-1, keepdims=True)
Xys.append((X, y))
Xy_train = Xys[1:]
X_test, y_test = Xys[0]
return Xy_train, X_test, y_test
def artificial_task2(
input_dim: int = 2,
num_train_examples: int = 10000,
num_tasks: int = 5,
seed: int = 0,
):
# blackboxes are quadratic functions whose centers are either [0.25, ..., 0.25] or [0.75, ..., 0.75]
# this is a tasks that requires adaptation so that TS should be as good as RS and outperformed by GP and GP3
# GP2 and GP3 should have the same performance
np.random.seed(seed)
sign = 2 * (np.random.randint(low=0, high=2, size=num_tasks) - 0.5)
# the first sign is set to 1 so that there is prior knowledge
sign[0] = 1
center = np.ones(input_dim) * 0.5
shift = 0.25 * (np.ones(input_dim).reshape(1, -1) * sign.reshape(-1, 1))
centers = center + shift
Xys = []
for x_star in centers:
X = np.random.rand(num_train_examples, input_dim)
y = np.square((X - x_star)).mean(axis=-1, keepdims=True)
Xys.append((X, y))
Xy_train = Xys[1:]
X_test, y_test = Xys[0]
return Xy_train, X_test, y_test
if __name__ == '__main__':
artificial_task2() | 1,709 | 31.884615 | 112 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/misc/__init__.py | import random
import numpy as np
import torch
def set_seed(seed: int):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
| 149 | 12.636364 | 27 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/prior/mlp_pytorch.py | import tempfile
import uuid
from pathlib import Path
from typing import Optional, Tuple
from sklearn.preprocessing import StandardScaler
from constants import num_gradient_updates
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader, TensorDataset
from prior import Prior
def train(
module,
X_train: np.array,
y_train: np.array,
num_gradient_updates: int = num_gradient_updates,
lr: float = 1e-2,
num_decays: int = 3,
factor_decay: float = 5.0,
batch_size: int = 64,
clip_gradient: Optional[float] = None,
optimizer=None,
early_stopping: bool = True,
):
dataset = TensorDataset(
torch.Tensor(X_train),
torch.Tensor(y_train)
)
# keep 10% of train dataset as validation
num_train = len(dataset) * 9 // 10
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [num_train, len(dataset) - num_train])
# dont use gpu for now
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# module = module.to(device)
def infinite_stream():
while True:
# reshuffle
dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
for data in dataloader:
yield data
train_losses = []
val_rmses = []
first = True
if optimizer is None:
optimizer = torch.optim.Adam(module.parameters(), lr=lr)
checkpoint_freq = 100
it = 0
best_val_rmse = float("inf")
checkpoint_path = Path(tempfile.gettempdir()) / f"best-model-{uuid.uuid4().hex}.pth"
with torch.autograd.set_detect_anomaly(True):
for _ in range(num_decays):
with tqdm(infinite_stream(), total=num_gradient_updates, miniters=200, mininterval=2) as tqdm_iter:
for X_batch, y_batch in tqdm_iter:
optimizer.zero_grad()
# both of shape (batch_size, output_dim,) we could also fit a covariate matrix to account
# the dependency between different criterion
mu, sigma = module(X_batch)
distr = torch.distributions.normal.Normal(loc=mu, scale=sigma)
# y_batch has shape (batch_size, output_dim)
loss = - distr.log_prob(y_batch).mean()
loss.backward()
loss_value = loss.item()
if clip_gradient is not None:
nn.utils.clip_grad_norm_(
module.parameters(),
max_norm=clip_gradient
)
if first:
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(
"\n".join(f"{name}: shape {p.shape}, {p.numel()} parameters" for name, p in
module.named_parameters() if p.requires_grad)
)
print(f"number of parameters: {count_parameters(module)}")
first = False
# print(loss_value)
train_losses.append(loss_value)
optimizer.step()
metrics_dict = {
"train_loss": loss_value,
}
if it % checkpoint_freq == 0:
for X_val, y_val in DataLoader(val_dataset, batch_size=len(val_dataset)):
# compute mean
mu, sigma = module(X_val)
val_rmse = ((mu - y_val) ** 2).mean().sqrt().item()
metrics_dict['val_rmse'] = val_rmse
val_rmses.append(val_rmse)
if early_stopping and val_rmse < best_val_rmse:
# print(f" found better loss {val_rmse} than {best_val_rmse}, checkpointing in {checkpoint_path}")
best_val_rmse = min(best_val_rmse, val_rmse)
torch.save(module.state_dict(), checkpoint_path)
tqdm_iter.set_postfix(metrics_dict)
it += 1
if it % num_gradient_updates == 0:
break
lr /= factor_decay
if early_stopping:
print(f"loading best model found at {checkpoint_path} with val_rmse={val_rmse}")
module.load_state_dict(torch.load(checkpoint_path))
return module, (train_losses, val_rmses)
class GaussianRegression(nn.Module):
def __init__(self, input_dim: int, num_layers: int = 3, num_hidden: int = 40, dropout: float = 0.0):
super(GaussianRegression, self).__init__()
layers = [nn.Linear(input_dim, num_hidden)]
for i in range(num_layers):
layers.append(nn.Linear(num_hidden, num_hidden))
layers.append(nn.ReLU())
layers.append(nn.Dropout(dropout))
self.layers = nn.Sequential(*layers)
self.mu_proj = nn.Linear(num_hidden, 1)
self.sigma_proj = nn.Sequential(nn.Linear(num_hidden, 1), nn.Softplus())
def init(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
# use the modules apply function to recursively apply the initialization
self.layers.apply(init)
def forward(self, x):
x_hidden = self.layers(x)
mu = self.mu_proj(x_hidden)
sigma = self.sigma_proj(x_hidden)
return mu, sigma
class ParametricPrior(Prior):
def __init__(
self,
X_train: np.array,
y_train: np.array,
num_gradient_updates: int = num_gradient_updates,
dropout: float = 0.1,
num_layers: int = 3,
num_hidden: int = 50,
**train_kwargs
):
super(ParametricPrior, self).__init__(
X_train=X_train,
y_train=y_train,
)
n, dim = X_train.shape
self.scaler = StandardScaler()
module = GaussianRegression(input_dim=dim, num_layers=num_layers, num_hidden=num_hidden, dropout=dropout)
self.module, _ = train(
module=module,
X_train=self.scaler.fit_transform(X_train),
y_train=y_train,
num_gradient_updates=num_gradient_updates,
**train_kwargs
)
def predict(self, X: np.array) -> Tuple[np.array, np.array]:
X_test = torch.Tensor(self.scaler.transform(X))
self.module.eval()
mu, sigma = self.module(X_test)
return mu.detach().numpy(), sigma.detach().numpy()
| 6,805 | 36.191257 | 126 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/prior/benchmark.py | import numpy as np
import pandas as pd
from blackbox.load_utils import evaluation_split_from_task, tasks
from optimizer.normalization_transforms import from_string
from prior.mlp_pytorch import ParametricPrior
from prior.mlp_sklearn import ParametricPriorSklearn
normalization = "gaussian"
rows = []
#tasks = [
# 'electricity',
# # 'australian',
# #'m4-Hourly',
# #'m4-Daily',
#]
for task in tasks:
Xys_train, (X_test, y_test) = evaluation_split_from_task(task)
X_train = np.concatenate([X for X, y in Xys_train], axis=0)
normalizer = from_string(normalization)
z_train = np.concatenate([normalizer(y).transform(y) for X, y in Xys_train], axis=0)
# y_test is only used for measuring RMSE on the prior as mentioned in the paper
z_test = normalizer(y_test).transform(y_test)
# todo normalization inside prior
prior = ParametricPrior(
X_train=X_train,
y_train=z_train,
num_gradient_updates=2000,
num_decays=2,
num_layers=3,
num_hidden=50,
dropout=0.1,
lr=0.001,
)
mu_pred, sigma_pred = prior.predict(X_test)
rmse = np.sqrt(np.square(mu_pred - z_test).mean())
mae = np.abs(mu_pred - z_test).mean()
row = {"task": task, "rmse": rmse, "mae": mae}
rows.append(row)
print(row)
df = pd.DataFrame(rows)
print(df.to_string()) | 1,360 | 26.22 | 88 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/prior/mlp_sklearn.py | import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
from constants import num_gradient_updates
from prior import Prior
class ParametricPriorSklearn(Prior):
def __init__(
self,
X_train: np.array,
y_train: np.array,
num_gradient_updates: int = num_gradient_updates,
):
self.estimator = MLPRegressor(
activation='relu',
hidden_layer_sizes=(50, 50, 50),
learning_rate='adaptive',
verbose=False,
max_iter=num_gradient_updates,
tol=1e-6,
early_stopping=True,
)
self.scaler = StandardScaler()
X = self.scaler.fit_transform(X_train)
self.estimator.fit(X, y_train.ravel())
def predict(self, X):
X = self.scaler.transform(X)
mu = self.estimator.predict(X).reshape((-1, 1))
sigma = np.ones_like(mu)
return mu, sigma
if __name__ == '__main__':
num_train_examples = 10000
num_test_examples = num_train_examples
dim = 2
num_gradient_updates = 200
lr = 1e-2
def make_random_X_y(num_examples: int, dim: int, noise_std: float):
X = np.random.rand(num_examples, dim)
noise = np.random.normal(scale=noise_std, size=(num_examples, 1))
y = X.sum(axis=-1, keepdims=True) + noise
return X, y
# test that parametric prior can recover a simple linear function for the mean
noise_std = 0.01
X_train, y_train = make_random_X_y(num_examples=num_train_examples, dim=dim, noise_std=noise_std)
prior = ParametricPriorSklearn(
X_train=X_train,
y_train=y_train,
#num_gradient_updates=num_gradient_updates,
#num_decays=2,
# smaller network for UT speed
#num_layers=2,
#num_hidden=20,
#dropout=0.0,
#lr=lr
)
X_test, y_test = make_random_X_y(num_examples=num_test_examples, dim=dim, noise_std=noise_std)
mu_pred, sigma_pred = prior.predict(X_test)
mu_l1_error = np.abs(mu_pred - y_test).mean()
print(mu_l1_error)
assert mu_l1_error < 0.2
| 2,161 | 29.450704 | 101 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/prior/unit_prior.py | from typing import Tuple
import numpy as np
from prior import Prior
class UnitPrior(Prior):
def __init__(
self,
X_train: np.array,
y_train: np.array
):
super(UnitPrior, self).__init__(
X_train=X_train,
y_train=y_train,
)
def predict(self, X_test: np.array) -> Tuple[np.array, np.array]:
return np.zeros_like(X_test[..., 0]), np.ones_like(X_test[..., 1]) | 453 | 22.894737 | 74 | py |