repo
stringlengths 2
91
| file
stringlengths 14
211
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
1.36M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
TiKick | TiKick-main/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import os
from setuptools import setup, find_packages
import setuptools
def get_version() -> str:
# https://packaging.python.org/guides/single-sourcing-package-version/
init = open(os.path.join("tmarl", "__init__.py"), "r").read().split()
return init[init.index("__version__") + 2][1:-1]
setup(
name="tmarl", # Replace with your own username
version=get_version(),
description="marl algorithms",
long_description=open("README.md", encoding="utf8").read(),
long_description_content_type="text/markdown",
author="tmarl",
author_email="[email protected]",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License",
"Operating System :: OS Independent",
],
keywords="multi-agent reinforcement learning algorithms pytorch",
python_requires='>=3.6',
)
| 1,788 | 35.510204 | 74 | py |
TiKick | TiKick-main/tmarl/__init__.py | __version__ = "0.0.3" | 21 | 21 | 21 | py |
TiKick | TiKick-main/tmarl/networks/policy_network.py |
import torch
import torch.nn as nn
from tmarl.networks.utils.util import init, check
from tmarl.networks.utils.mlp import MLPBase, MLPLayer
from tmarl.networks.utils.rnn import RNNLayer
from tmarl.networks.utils.act import ACTLayer
from tmarl.networks.utils.popart import PopArt
from tmarl.utils.util import get_shape_from_obs_space
# networks are defined here
class PolicyNetwork(nn.Module):
def __init__(self, args, obs_space, action_space, device=torch.device("cpu")):
super(PolicyNetwork, self).__init__()
self.hidden_size = args.hidden_size
self._gain = args.gain
self._use_orthogonal = args.use_orthogonal
self._activation_id = args.activation_id
self._use_policy_active_masks = args.use_policy_active_masks
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._use_influence_policy = args.use_influence_policy
self._influence_layer_N = args.influence_layer_N
self._use_policy_vhead = args.use_policy_vhead
self._recurrent_N = args.recurrent_N
self.tpdv = dict(dtype=torch.float32, device=device)
obs_shape = get_shape_from_obs_space(obs_space)
self._mixed_obs = False
self.base = MLPBase(args, obs_shape, use_attn_internal=False, use_cat_self=True)
input_size = self.base.output_size
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(input_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)
input_size = self.hidden_size
if self._use_influence_policy:
self.mlp = MLPLayer(obs_shape[0], self.hidden_size,
self._influence_layer_N, self._use_orthogonal, self._activation_id)
input_size += self.hidden_size
self.act = ACTLayer(action_space, input_size, self._use_orthogonal, self._gain)
if self._use_policy_vhead:
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0))
if self._use_popart:
self.v_out = init_(PopArt(input_size, 1, device=device))
else:
self.v_out = init_(nn.Linear(input_size, 1))
self.to(device)
def forward(self, obs, rnn_states, masks, available_actions=None, deterministic=False):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
actions, action_log_probs = self.act(actor_features, available_actions, deterministic)
return actions, action_log_probs, rnn_states
def evaluate_actions(self, obs, rnn_states, action, masks, available_actions=None, active_masks=None):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
action = check(action).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
if active_masks is not None:
active_masks = check(active_masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features, action, available_actions, active_masks = active_masks if self._use_policy_active_masks else None)
values = self.v_out(actor_features) if self._use_policy_vhead else None
return action_log_probs, dist_entropy, values
def get_policy_values(self, obs, rnn_states, masks):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
values = self.v_out(actor_features)
return values | 5,558 | 41.113636 | 181 | py |
TiKick | TiKick-main/tmarl/networks/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
| 638 | 34.5 | 74 | py |
TiKick | TiKick-main/tmarl/networks/utils/distributions.py | import torch
import torch.nn as nn
from .util import init
"""
Modify standard PyTorch distributions so they are compatible with this code.
"""
#
# Standardize distribution interfaces
#
# Categorical
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
# Normal
class FixedNormal(torch.distributions.Normal):
def log_probs(self, actions):
return super().log_prob(actions).sum(-1, keepdim=True)
def entrop(self):
return super.entropy().sum(-1)
def mode(self):
return self.mean
# Bernoulli
class FixedBernoulli(torch.distributions.Bernoulli):
def log_probs(self, actions):
return super.log_prob(actions).view(actions.size(0), -1).sum(-1).unsqueeze(-1)
def entropy(self):
return super().entropy().sum(-1)
def mode(self):
return torch.gt(self.probs, 0.5).float()
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Categorical, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x, available_actions=None):
x = self.linear(x)
if available_actions is not None:
x[available_actions == 0] = -1e10
return FixedCategorical(logits=x)
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(DiagGaussian, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
class Bernoulli(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Bernoulli, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedBernoulli(logits=x)
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
| 3,466 | 27.891667 | 86 | py |
TiKick | TiKick-main/tmarl/networks/utils/mlp.py |
import torch.nn as nn
from .util import init, get_clones
class MLPLayer(nn.Module):
def __init__(self, input_dim, hidden_size, layer_N, use_orthogonal, activation_id):
super(MLPLayer, self).__init__()
self._layer_N = layer_N
active_func = [nn.Tanh(), nn.ReLU(), nn.LeakyReLU(), nn.ELU()][activation_id]
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
gain = nn.init.calculate_gain(['tanh', 'relu', 'leaky_relu', 'leaky_relu'][activation_id])
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain)
self.fc1 = nn.Sequential(
init_(nn.Linear(input_dim, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc_h = nn.Sequential(init_(
nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc2 = get_clones(self.fc_h, self._layer_N)
def forward(self, x):
x = self.fc1(x)
for i in range(self._layer_N):
x = self.fc2[i](x)
return x
class MLPBase(nn.Module):
def __init__(self, args, obs_shape, use_attn_internal=False, use_cat_self=True):
super(MLPBase, self).__init__()
self._use_feature_normalization = args.use_feature_normalization
self._use_orthogonal = args.use_orthogonal
self._activation_id = args.activation_id
self._use_conv1d = args.use_conv1d
self._stacked_frames = args.stacked_frames
self._layer_N = args.layer_N
self.hidden_size = args.hidden_size
obs_dim = obs_shape[0]
inputs_dim = obs_dim
if self._use_feature_normalization:
self.feature_norm = nn.LayerNorm(obs_dim)
self.mlp = MLPLayer(inputs_dim, self.hidden_size,
self._layer_N, self._use_orthogonal, self._activation_id)
def forward(self, x):
if self._use_feature_normalization:
x = self.feature_norm(x)
x = self.mlp(x)
return x
@property
def output_size(self):
return self.hidden_size | 2,116 | 32.603175 | 98 | py |
TiKick | TiKick-main/tmarl/networks/utils/popart.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PopArt(torch.nn.Module):
def __init__(self, input_shape, output_shape, norm_axes=1, beta=0.99999, epsilon=1e-5, device=torch.device("cpu")):
super(PopArt, self).__init__()
self.beta = beta
self.epsilon = epsilon
self.norm_axes = norm_axes
self.tpdv = dict(dtype=torch.float32, device=device)
self.input_shape = input_shape
self.output_shape = output_shape
self.weight = nn.Parameter(torch.Tensor(output_shape, input_shape)).to(**self.tpdv)
self.bias = nn.Parameter(torch.Tensor(output_shape)).to(**self.tpdv)
self.stddev = nn.Parameter(torch.ones(output_shape), requires_grad=False).to(**self.tpdv)
self.mean = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.mean_sq = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
self.mean.zero_()
self.mean_sq.zero_()
self.debiasing_term.zero_()
def forward(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
return F.linear(input_vector, self.weight, self.bias)
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
old_mean, old_stddev = self.mean, self.stddev
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
self.mean.mul_(self.beta).add_(batch_mean * (1.0 - self.beta))
self.mean_sq.mul_(self.beta).add_(batch_sq_mean * (1.0 - self.beta))
self.debiasing_term.mul_(self.beta).add_(1.0 * (1.0 - self.beta))
self.stddev = (self.mean_sq - self.mean ** 2).sqrt().clamp(min=1e-4)
self.weight = self.weight * old_stddev / self.stddev
self.bias = (old_stddev * self.bias + old_mean - self.mean) / self.stddev
def debiased_mean_var(self):
debiased_mean = self.mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
def normalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
return out
def denormalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,796 | 38.968421 | 119 | py |
TiKick | TiKick-main/tmarl/networks/utils/util.py |
import copy
import numpy as np
import torch
import torch.nn as nn
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def check(input):
output = torch.from_numpy(input) if type(input) == np.ndarray else input
return output
| 426 | 21.473684 | 76 | py |
TiKick | TiKick-main/tmarl/networks/utils/act.py |
from .distributions import Bernoulli, Categorical, DiagGaussian
import torch
import torch.nn as nn
class ACTLayer(nn.Module):
def __init__(self, action_space, inputs_dim, use_orthogonal, gain):
super(ACTLayer, self).__init__()
self.multidiscrete_action = False
self.continuous_action = False
self.mixed_action = False
if action_space.__class__.__name__ == "Discrete":
action_dim = action_space.n
self.action_out = Categorical(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "Box":
self.continuous_action = True
action_dim = action_space.shape[0]
self.action_out = DiagGaussian(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiBinary":
action_dim = action_space.shape[0]
self.action_out = Bernoulli(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiDiscrete":
self.multidiscrete_action = True
action_dims = action_space.high - action_space.low + 1
self.action_outs = []
for action_dim in action_dims:
self.action_outs.append(Categorical(inputs_dim, action_dim, use_orthogonal, gain))
self.action_outs = nn.ModuleList(self.action_outs)
else: # discrete + continous
self.mixed_action = True
continous_dim = action_space[0].shape[0]
discrete_dim = action_space[1].n
self.action_outs = nn.ModuleList([DiagGaussian(inputs_dim, continous_dim, use_orthogonal, gain), Categorical(
inputs_dim, discrete_dim, use_orthogonal, gain)])
def forward(self, x, available_actions=None, deterministic=False):
if self.mixed_action :
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action.float())
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
elif self.multidiscrete_action:
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action)
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.cat(action_log_probs, -1)
elif self.continuous_action:
action_logits = self.action_out(x)
actions = action_logits.mode() if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
else:
action_logits = self.action_out(x, available_actions)
actions = action_logits.mode() if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
return actions, action_log_probs
def get_probs(self, x, available_actions=None):
if self.mixed_action or self.multidiscrete_action:
action_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action_prob = action_logit.probs
action_probs.append(action_prob)
action_probs = torch.cat(action_probs, -1)
elif self.continuous_action:
action_logits = self.action_out(x)
action_probs = action_logits.probs
else:
action_logits = self.action_out(x, available_actions)
action_probs = action_logits.probs
return action_probs
def get_log_1mp(self, x, action, available_actions=None, active_masks=None):
action_logits = self.action_out(x, available_actions)
action_prob = torch.gather(action_logits.probs, 1, action.long())
action_prob = torch.clamp(action_prob, 0, 1-1e-6)
action_log_1mp = torch.log(1 - action_prob)
return action_log_1mp
def evaluate_actions(self, x, action, available_actions=None, active_masks=None):
if self.mixed_action:
a, b = action.split((2, 1), -1)
b = b.long()
action = [a, b]
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
if len(action_logit.entropy().shape) == len(active_masks.shape):
dist_entropy.append((action_logit.entropy() * active_masks).sum()/active_masks.sum())
else:
dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
dist_entropy = dist_entropy[0] * 0.0025 + dist_entropy[1] * 0.01
elif self.multidiscrete_action:
action = torch.transpose(action, 0, 1)
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
dist_entropy.append((action_logit.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.cat(action_log_probs, -1) # ! could be wrong
dist_entropy = torch.tensor(dist_entropy).mean()
elif self.continuous_action:
action_logits = self.action_out(x)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (action_logits.entropy()*active_masks).sum()/active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
else:
action_logits = self.action_out(x, available_actions)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
return action_log_probs, dist_entropy | 7,195 | 46.342105 | 121 | py |
TiKick | TiKick-main/tmarl/networks/utils/rnn.py |
import torch
import torch.nn as nn
class RNNLayer(nn.Module):
def __init__(self, inputs_dim, outputs_dim, recurrent_N, use_orthogonal):
super(RNNLayer, self).__init__()
self._recurrent_N = recurrent_N
self._use_orthogonal = use_orthogonal
self.rnn = nn.GRU(inputs_dim, outputs_dim, num_layers=self._recurrent_N)
for name, param in self.rnn.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
if self._use_orthogonal:
nn.init.orthogonal_(param)
else:
nn.init.xavier_uniform_(param)
self.norm = nn.LayerNorm(outputs_dim)
def forward(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.rnn(x.unsqueeze(0), (hxs * masks.repeat(1, self._recurrent_N).unsqueeze(-1)).transpose(0, 1).contiguous())
x = x.squeeze(0)
hxs = hxs.transpose(0, 1)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0)
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.transpose(0, 1)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
temp = (hxs * masks[start_idx].view(1, -1, 1).repeat(self._recurrent_N, 1, 1)).contiguous()
rnn_scores, hxs = self.rnn(x[start_idx:end_idx], temp)
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.reshape(T * N, -1)
hxs = hxs.transpose(0, 1)
x = self.norm(x)
return x, hxs
| 2,816 | 34.2125 | 132 | py |
TiKick | TiKick-main/tmarl/drivers/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/drivers/shared_distributed/base_driver.py | import numpy as np
import torch
def _t2n(x):
return x.detach().cpu().numpy()
class Driver(object):
def __init__(self, config, client=None):
self.all_args = config['all_args']
self.envs = config['envs']
self.eval_envs = config['eval_envs']
self.device = config['device']
self.num_agents = config['num_agents']
if 'signal' in config:
self.actor_id = config['signal'].actor_id
self.weight_ids = config['signal'].weight_ids
else:
self.actor_id = 0
self.weight_ids = [0]
# parameters
self.env_name = self.all_args.env_name
self.algorithm_name = self.all_args.algorithm_name
self.experiment_name = self.all_args.experiment_name
self.use_centralized_V = self.all_args.use_centralized_V
self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state
self.num_env_steps = self.all_args.num_env_steps if hasattr(self.all_args,'num_env_steps') else self.all_args.eval_num
self.episode_length = self.all_args.episode_length
self.n_rollout_threads = self.all_args.n_rollout_threads
self.learner_n_rollout_threads = self.all_args.n_rollout_threads
self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads
self.hidden_size = self.all_args.hidden_size
self.recurrent_N = self.all_args.recurrent_N
# interval
self.save_interval = self.all_args.save_interval
self.use_eval = self.all_args.use_eval
self.eval_interval = self.all_args.eval_interval
self.log_interval = self.all_args.log_interval
# dir
self.model_dir = self.all_args.model_dir
if self.algorithm_name == "rmappo":
from tmarl.algorithms.r_mappo_distributed.mappo_algorithm import MAPPOAlgorithm as TrainAlgo
from tmarl.algorithms.r_mappo_distributed.mappo_module import MAPPOModule as AlgoModule
else:
raise NotImplementedError
if self.envs:
share_observation_space = self.envs.share_observation_space[0] \
if self.use_centralized_V else self.envs.observation_space[0]
# policy network
self.algo_module = AlgoModule(self.all_args,
self.envs.observation_space[0],
share_observation_space,
self.envs.action_space[0],
device=self.device)
else:
share_observation_space = self.eval_envs.share_observation_space[0] \
if self.use_centralized_V else self.eval_envs.observation_space[0]
# policy network
self.algo_module = AlgoModule(self.all_args,
self.eval_envs.observation_space[0],
share_observation_space,
self.eval_envs.action_space[0],
device=self.device)
if self.model_dir is not None:
self.restore()
# algorithm
self.trainer = TrainAlgo(self.all_args, self.algo_module, device=self.device)
# buffer
from tmarl.replay_buffers.normal.shared_buffer import SharedReplayBuffer
self.buffer = SharedReplayBuffer(self.all_args,
self.num_agents,
self.envs.observation_space[0] if self.envs else self.eval_envs.observation_space[0],
share_observation_space,
self.envs.action_space[0] if self.envs else self.eval_envs.action_space[0])
def run(self):
raise NotImplementedError
def warmup(self):
raise NotImplementedError
def collect(self, step):
raise NotImplementedError
def insert(self, data):
raise NotImplementedError
def restore(self):
policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor.pt', map_location=self.device)
self.algo_module.actor.load_state_dict(policy_actor_state_dict)
| 4,244 | 39.04717 | 126 | py |
TiKick | TiKick-main/tmarl/drivers/shared_distributed/football_driver.py | from tqdm import tqdm
import numpy as np
from tmarl.drivers.shared_distributed.base_driver import Driver
def _t2n(x):
return x.detach().cpu().numpy()
class FootballDriver(Driver):
def __init__(self, config):
super(FootballDriver, self).__init__(config)
def run(self):
self.trainer.prep_rollout()
episodes = int(self.num_env_steps)
total_num_steps = 0
for episode in range(episodes):
print('Episode {}:'.format(episode))
self.eval(total_num_steps)
def eval(self, total_num_steps):
eval_episode_rewards = []
eval_obs, eval_share_obs, eval_available_actions = self.eval_envs.reset()
agent_num = eval_obs.shape[1]
used_buffer = self.buffer
rnn_shape = [self.n_eval_rollout_threads, agent_num, *used_buffer.rnn_states_critic.shape[3:]]
eval_rnn_states = np.zeros(rnn_shape, dtype=np.float32)
eval_rnn_states_critic = np.zeros(rnn_shape, dtype=np.float32)
eval_masks = np.ones((self.n_eval_rollout_threads, agent_num, 1), dtype=np.float32)
finished = None
for eval_step in tqdm(range(3001)):
self.trainer.prep_rollout()
_, eval_action, eval_action_log_prob, eval_rnn_states, _ = \
self.trainer.algo_module.get_actions(np.concatenate(eval_share_obs),
np.concatenate(eval_obs),
np.concatenate(eval_rnn_states),
None,
np.concatenate(eval_masks),
np.concatenate(eval_available_actions),
deterministic=True)
eval_actions = np.array(
np.split(_t2n(eval_action), self.n_eval_rollout_threads))
eval_rnn_states = np.array(
np.split(_t2n(eval_rnn_states), self.n_eval_rollout_threads))
if self.eval_envs.action_space[0].__class__.__name__ == 'Discrete':
eval_actions_env = np.squeeze(
np.eye(self.eval_envs.action_space[0].n)[eval_actions], 2)
else:
raise NotImplementedError
# Obser reward and next obs
eval_obs, eval_share_obs, eval_rewards, eval_dones, eval_infos, eval_available_actions = \
self.eval_envs.step(eval_actions_env)
eval_rewards = eval_rewards.reshape([-1, agent_num]) # [roll_out, num_agents]
if finished is None:
eval_r = eval_rewards[:,:self.num_agents]
eval_episode_rewards.append(eval_r)
finished = eval_dones.copy()
else:
eval_r = (eval_rewards * ~finished)[:,:self.num_agents]
eval_episode_rewards.append(eval_r)
finished = eval_dones.copy() | finished
eval_masks = np.ones(
(self.n_eval_rollout_threads, agent_num, 1), dtype=np.float32)
eval_masks[eval_dones == True] = np.zeros(
((eval_dones == True).sum(), 1), dtype=np.float32)
eval_rnn_states[eval_dones == True] = np.zeros(
((eval_dones == True).sum(), self.recurrent_N, self.hidden_size), dtype=np.float32)
if finished.all() == True:
break
eval_episode_rewards = np.array(eval_episode_rewards) # [step,rollout,num_agents]
ally_goal = np.sum((eval_episode_rewards == 1), axis=0)
enemy_goal = np.sum((eval_episode_rewards == -1), axis=0)
net_goal = np.sum(eval_episode_rewards, axis=0)
winning_rate = np.mean(net_goal, axis=-1)
eval_env_infos = {}
eval_env_infos['eval_average_winning_rate'] = winning_rate>0
eval_env_infos['eval_average_losing_rate'] = winning_rate<0
eval_env_infos['eval_average_draw_rate'] = winning_rate==0
eval_env_infos['eval_average_ally_score'] = ally_goal
eval_env_infos['eval_average_enemy_score'] = enemy_goal
eval_env_infos['eval_average_net_score'] = net_goal
print("\tSuccess Rate: " + str(np.mean(winning_rate>0)) )
| 4,315 | 42.16 | 102 | py |
TiKick | TiKick-main/tmarl/envs/env_wrappers.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
from multiprocessing import Process, Pipe
from abc import ABC, abstractmethod
from tmarl.utils.util import tile_images
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class ShareVecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, share_observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.share_observation_space = share_observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob = env.reset()
else:
if np.all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send((ob))
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'get_max_step':
remote.send((env.max_steps))
elif cmd == 'get_action': # for behavior cloning
action = env.get_action()
remote.send((action))
else:
raise NotImplementedError
class SubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def get_max_step(self):
for remote in self.remotes:
remote.send(('get_max_step', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def shareworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob, s_ob, available_actions = env.reset()
else:
if np.all(done):
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send(
(env.observation_space, env.share_observation_space, env.action_space))
elif cmd == 'render_vulnerability':
fr = env.render_vulnerability(data)
remote.send((fr))
elif cmd == 'get_action': # for behavior cloning
action = env.get_action()
remote.send((action))
else:
raise NotImplementedError
class ShareSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[0].recv(
)
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(dones), infos, np.stack(available_actions)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def get_action(self): # for behavior clonging
for remote in self.remotes:
remote.send(('get_action', None))
results = [remote.recv() for remote in self.remotes]
return np.concatenate(results)
# single env
class DummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i] = self.envs[i].reset()
self.actions = None
return obs, rews, dones, infos
def reset(self):
obs = [env.reset() for env in self.envs]
return np.array(obs)
def get_max_step(self):
return [env.max_steps for env in self.envs]
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human", playeridx=None):
if mode == "rgb_array":
if playeridx == None:
return np.array([env.render(mode=mode) for env in self.envs])
else:
return np.array([env.render(mode=mode,playeridx=playeridx) for env in self.envs])
elif mode == "human":
for env in self.envs:
if playeridx == None:
env.render(mode=mode)
else:
env.render(mode=mode, playeridx=playeridx)
else:
raise NotImplementedError
class ShareDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(
env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self):
results = [env.reset() for env in self.envs]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
def save_replay(self):
for env in self.envs:
env.save_replay()
def get_action(self): # for behavior cloning
results = [env.reset() for env in self.envs]
return results
| 15,351 | 32.373913 | 118 | py |
TiKick | TiKick-main/tmarl/envs/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/envs/football/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
| 638 | 34.5 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/football.py | import numpy as np
import gym
from ray.rllib.env.multi_agent_env import MultiAgentEnv
import tmarl.envs.football.env as football_env
class RllibGFootball(MultiAgentEnv):
"""An example of a wrapper for GFootball to make it compatible with rllib."""
def __init__(self, all_args, rank, log_dir=None, isEval=False):
self.num_agents = all_args.num_agents
self.num_rollout = all_args.n_rollout_threads
self.isEval = isEval
self.rank = rank
# create env
# need_render = (rank == 0) and isEval
need_render = (rank == 0)
# and (not isEval or self.use_behavior_cloning)
self.env = football_env.create_environment(
env_name=all_args.scenario_name, stacked=False,
logdir=log_dir,
representation=all_args.representation,
rewards='scoring' if isEval else all_args.rewards,
write_goal_dumps=False,
write_full_episode_dumps=need_render,
render=need_render,
dump_frequency=1 if need_render else 0,
number_of_left_players_agent_controls=self.num_agents,
number_of_right_players_agent_controls=0,
other_config_options={'action_set':'full'})
# state
self.last_loffside = np.zeros(11)
self.last_roffside = np.zeros(11)
# dimension
self.action_size = 33
if all_args.scenario_name == "11_vs_11_kaggle":
self.avail_size = 20
else:
self.avail_size = 19
if all_args.representation == 'raw':
obs_space_dim = 268
obs_space_low = np.zeros(obs_space_dim) - 1e6
obs_space_high = np.zeros(obs_space_dim) + 1e6
obs_space_type = 'float64'
else:
raise NotImplementedError
self.action_space = [gym.spaces.Discrete(
self.action_size) for _ in range(self.num_agents)]
self.observation_space = [gym.spaces.Box(
low=obs_space_low,
high=obs_space_high,
dtype=obs_space_type) for _ in range(self.num_agents)]
self.share_observation_space = [gym.spaces.Box(
low=obs_space_low,
high=obs_space_high,
dtype=obs_space_type) for _ in range(self.num_agents)]
def reset(self):
# available actions
avail_actions = np.ones([self.num_agents, self.action_size])
avail_actions[:, self.avail_size:] = 0
# state
self.last_loffside = np.zeros(11)
self.last_roffside = np.zeros(11)
# obs
raw_obs = self.env.reset()
raw_obs = self._notFullGame(raw_obs)
obs = self.raw2vec(raw_obs)
share_obs = obs.copy()
return obs, share_obs, avail_actions
def step(self, actions):
# step
actions = np.argmax(actions, axis=-1)
raw_o, r, d, info = self.env.step(actions.astype('int32'))
raw_o = self._notFullGame(raw_o)
obs = self.raw2vec(raw_o)
share_obs = obs.copy()
# available actions
avail_actions = np.ones([self.num_agents, self.action_size])
avail_actions[:, self.avail_size:] = 0
# translate to specific form
rewards = []
infos, dones = [], []
for i in range(self.num_agents):
infos.append(info)
dones.append(d)
reward = r[i] if self.num_agents > 1 else r
reward = -0.01 if d and reward < 1 and not self.isEval else reward
rewards.append(reward)
rewards = np.expand_dims(np.array(rewards), axis=1)
return obs, share_obs, rewards, dones, infos, avail_actions
def seed(self, seed=None):
if seed is None:
np.random.seed(1)
else:
np.random.seed(seed)
def close(self):
self.env.close()
def raw2vec(self, raw_obs):
obs = []
ally = np.array(raw_obs[0]['left_team'])
ally_d = np.array(raw_obs[0]['left_team_direction'])
enemy = np.array(raw_obs[0]['right_team'])
enemy_d = np.array(raw_obs[0]['right_team_direction'])
lo, ro = self.get_offside(raw_obs[0])
for a in range(self.num_agents):
# prepocess
me = ally[int(raw_obs[a]['active'])]
ball = raw_obs[a]['ball'][:2]
ball_dist = np.linalg.norm(me - ball)
enemy_dist = np.linalg.norm(me - enemy, axis=-1)
to_enemy = enemy - me
to_ally = ally - me
to_ball = ball - me
o = []
# shape = 0
o.extend(ally.flatten())
o.extend(ally_d.flatten())
o.extend(enemy.flatten())
o.extend(enemy_d.flatten())
# shape = 88
o.extend(raw_obs[a]['ball'])
o.extend(raw_obs[a]['ball_direction'])
# shape = 94
if raw_obs[a]['ball_owned_team'] == -1:
o.extend([1, 0, 0])
if raw_obs[a]['ball_owned_team'] == 0:
o.extend([0, 1, 0])
if raw_obs[a]['ball_owned_team'] == 1:
o.extend([0, 0, 1])
# shape = 97
active = [0] * 11
active[raw_obs[a]['active']] = 1
o.extend(active)
# shape = 108
game_mode = [0] * 7
game_mode[raw_obs[a]['game_mode']] = 1
o.extend(game_mode)
# shape = 115
o.extend(raw_obs[a]['sticky_actions'][:10])
# shape = 125)
ball_dist = 1 if ball_dist > 1 else ball_dist
o.extend([ball_dist])
# shape = 126)
o.extend(raw_obs[a]['left_team_tired_factor'])
# shape = 137)
o.extend(raw_obs[a]['left_team_yellow_card'])
# shape = 148)
o.extend(raw_obs[a]['left_team_active']) # red cards
# shape = 159)
o.extend(lo) # !
# shape = 170)
o.extend(ro) # !
# shape = 181)
o.extend(enemy_dist)
# shape = 192)
to_ally[:, 0] /= 2
o.extend(to_ally.flatten())
# shape = 214)
to_enemy[:, 0] /= 2
o.extend(to_enemy.flatten())
# shape = 236)
to_ball[0] /= 2
o.extend(to_ball.flatten())
# shape = 238)
steps_left = raw_obs[a]['steps_left']
o.extend([1.0 * steps_left / 3001]) # steps left till end
if steps_left > 1500:
steps_left -= 1501 # steps left till halfend
steps_left = 1.0 * min(steps_left, 300.0) # clip
steps_left /= 300.0
o.extend([steps_left])
score_ratio = 1.0 * \
(raw_obs[a]['score'][0] - raw_obs[a]['score'][1])
score_ratio /= 5.0
score_ratio = min(score_ratio, 1.0)
score_ratio = max(-1.0, score_ratio)
o.extend([score_ratio])
# shape = 241
o.extend([0.0] * 27)
# shape = 268
obs.append(o)
return np.array(obs)
def get_offside(self, obs):
ball = np.array(obs['ball'][:2])
ally = np.array(obs['left_team'])
enemy = np.array(obs['right_team'])
if obs['game_mode'] != 0:
self.last_loffside = np.zeros(11, np.float32)
self.last_roffside = np.zeros(11, np.float32)
return np.zeros(11, np.float32), np.zeros(11, np.float32)
need_recalc = False
effective_ownball_team = -1
effective_ownball_player = -1
if obs['ball_owned_team'] > -1:
effective_ownball_team = obs['ball_owned_team']
effective_ownball_player = obs['ball_owned_player']
need_recalc = True
else:
ally_dist = np.linalg.norm(ball - ally, axis=-1)
enemy_dist = np.linalg.norm(ball - enemy, axis=-1)
if np.min(ally_dist) < np.min(enemy_dist):
if np.min(ally_dist) < 0.017:
need_recalc = True
effective_ownball_team = 0
effective_ownball_player = np.argmin(ally_dist)
elif np.min(enemy_dist) < np.min(ally_dist):
if np.min(enemy_dist) < 0.017:
need_recalc = True
effective_ownball_team = 1
effective_ownball_player = np.argmin(enemy_dist)
if not need_recalc:
return self.last_loffside, self.last_roffside
left_offside = np.zeros(11, np.float32)
right_offside = np.zeros(11, np.float32)
if effective_ownball_team == 0:
right_xs = [obs['right_team'][k][0] for k in range(1, 11)]
right_xs = np.array(right_xs)
right_xs.sort()
for k in range(1, 11):
if obs['left_team'][k][0] > right_xs[-1] and k != effective_ownball_player \
and obs['left_team'][k][0] > 0.0:
left_offside[k] = 1.0
else:
left_xs = [obs['left_team'][k][0] for k in range(1, 11)]
left_xs = np.array(left_xs)
left_xs.sort()
for k in range(1, 11):
if obs['right_team'][k][0] < left_xs[0] and k != effective_ownball_player \
and obs['right_team'][k][0] < 0.0:
right_offside[k] = 1.0
self.last_loffside = left_offside
self.last_roffside = right_offside
return left_offside, right_offside
def _notFullGame(self, raw_obs):
# use this function when there are less than 11 players in the scenario
left_ok = len(raw_obs[0]['left_team']) == 11
right_ok = len(raw_obs[0]['right_team']) == 11
if left_ok and right_ok:
return raw_obs
# set player's coordinate at (-1,0), set player's velocity as (0,0)
for obs in raw_obs:
obs['left_team'] = np.array(obs['left_team'])
obs['right_team'] = np.array(obs['right_team'])
obs['left_team_direction'] = np.array(obs['left_team_direction'])
obs['right_team_direction'] = np.array(obs['right_team_direction'])
while len(obs['left_team']) < 11:
obs['left_team'] = np.concatenate([obs['left_team'], np.array([[-1,0]])], axis=0)
obs['left_team_direction'] = np.concatenate([obs['left_team_direction'], np.zeros([1,2])], axis=0)
obs['left_team_tired_factor'] = np.concatenate([obs['left_team_tired_factor'], np.zeros(1)], axis=0)
obs['left_team_yellow_card'] = np.concatenate([obs['left_team_yellow_card'], np.zeros(1)], axis=0)
obs['left_team_active'] = np.concatenate([obs['left_team_active'], np.ones(1)], axis=0)
while len(obs['right_team']) < 11:
obs['right_team'] = np.concatenate([obs['right_team'], np.array([[-1,0]])], axis=0)
obs['right_team_direction'] = np.concatenate([obs['right_team_direction'], np.zeros([1,2])], axis=0)
return raw_obs | 11,164 | 38.038462 | 116 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/11_vs_11_kaggle.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 3000
builder.config().second_half = 1500
builder.config().right_team_difficulty = 1.0
builder.config().left_team_difficulty = 1.0
builder.config().deterministic = False
if builder.EpisodeNumber() % 2 == 0:
first_team = Team.e_Left
second_team = Team.e_Right
else:
first_team = Team.e_Right
second_team = Team.e_Left
builder.SetTeam(first_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.000000, 0.020000, e_PlayerRole_RM)
builder.AddPlayer(0.000000, -0.020000, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM)
builder.SetTeam(second_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(-0.050000, 0.000000, e_PlayerRole_RM)
builder.AddPlayer(-0.010000, 0.216102, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM)
| 2,396 | 39.627119 | 77 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/11_vs_11_lazy.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 3000
builder.config().second_half = 1500
builder.config().right_team_difficulty = 1.0
builder.config().left_team_difficulty = 1.0
builder.config().deterministic = False
if builder.EpisodeNumber() % 2 == 0:
first_team = Team.e_Left
second_team = Team.e_Right
else:
first_team = Team.e_Right
second_team = Team.e_Left
builder.SetTeam(first_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.000000, 0.020000, e_PlayerRole_RM)
builder.AddPlayer(0.000000, -0.020000, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM)
builder.SetTeam(second_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(-0.050000, 0.000000, e_PlayerRole_RM, lazy=True)
builder.AddPlayer(-0.010000, 0.216102, e_PlayerRole_CF, lazy=True)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB, lazy=True)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB, lazy=True)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB, lazy=True)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB, lazy=True)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM, lazy=True)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM, lazy=True)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM, lazy=True)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM, lazy=True)
| 2,506 | 41.491525 | 77 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_3_vs_1_with_keeper.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.62, 0.0)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.6, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.7, 0.2, e_PlayerRole_CM)
builder.AddPlayer(0.7, -0.2, e_PlayerRole_CM)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(-0.75, 0.0, e_PlayerRole_CB)
| 1,324 | 31.317073 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_empty_goal.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.02, 0.0)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.0, 0.0, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(1.0, 0.0, e_PlayerRole_GK)
| 1,179 | 30.052632 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_run_to_score_with_keeper.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.02, 0.0)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.0, 0.0, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.12, 0.2, e_PlayerRole_LB)
builder.AddPlayer(0.12, 0.1, e_PlayerRole_CB)
builder.AddPlayer(0.12, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.12, -0.1, e_PlayerRole_CB)
builder.AddPlayer(0.12, -0.2, e_PlayerRole_RB)
| 1,422 | 32.093023 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_counterattack_hard.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.26, -0.11)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(-0.672, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.75, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.75, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.672, 0.19576, e_PlayerRole_RB)
builder.AddPlayer(-0.434, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.434, 0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.5, -0.3161, e_PlayerRole_CM)
builder.AddPlayer(0.25, -0.1, e_PlayerRole_LM)
builder.AddPlayer(0.25, 0.1, e_PlayerRole_RM)
builder.AddPlayer(0.35, 0.316102, e_PlayerRole_CF)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.128, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.4, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.4, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(0.128, -0.19576, e_PlayerRole_RB)
builder.AddPlayer(0.365, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.282, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.365, 0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.54, -0.3161, e_PlayerRole_LM)
builder.AddPlayer(0.51, 0.0, e_PlayerRole_RM)
builder.AddPlayer(0.54, 0.316102, e_PlayerRole_CF)
| 2,186 | 36.706897 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_run_to_score.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.02, 0.0)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.0, 0.0, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.12, 0.2, e_PlayerRole_LB)
builder.AddPlayer(0.12, 0.1, e_PlayerRole_CB)
builder.AddPlayer(0.12, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.12, -0.1, e_PlayerRole_CB)
builder.AddPlayer(0.12, -0.2, e_PlayerRole_RB)
| 1,421 | 32.069767 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_empty_goal_close.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.77, 0.0)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.75, 0.0, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(1.0, 0.0, e_PlayerRole_GK)
| 1,180 | 30.078947 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_corner.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = False
builder.SetBallPosition(0.99, 0.41)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(1.0, 0.42, e_PlayerRole_LB)
builder.AddPlayer(0.7, 0.15, e_PlayerRole_CB)
builder.AddPlayer(0.7, 0.05, e_PlayerRole_CB)
builder.AddPlayer(0.7, -0.05, e_PlayerRole_RB)
builder.AddPlayer(0.0, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.6, 0.35, e_PlayerRole_CM)
builder.AddPlayer(0.8, 0.07, e_PlayerRole_CM)
builder.AddPlayer(0.8, -0.03, e_PlayerRole_LM)
builder.AddPlayer(0.8, -0.13, e_PlayerRole_RM)
builder.AddPlayer(0.7, -0.3, e_PlayerRole_CF)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(-0.75, -0.18, e_PlayerRole_LB)
builder.AddPlayer(-0.75, -0.08, e_PlayerRole_CB)
builder.AddPlayer(-0.75, 0.02, e_PlayerRole_CB)
builder.AddPlayer(-1.0, -0.1, e_PlayerRole_RB)
builder.AddPlayer(-0.8, -0.25, e_PlayerRole_CM)
builder.AddPlayer(-0.88, -0.07, e_PlayerRole_CM)
builder.AddPlayer(-0.88, 0.03, e_PlayerRole_CM)
builder.AddPlayer(-0.88, 0.13, e_PlayerRole_LM)
builder.AddPlayer(-0.75, 0.25, e_PlayerRole_RM)
builder.AddPlayer(-0.2, 0.0, e_PlayerRole_CF)
| 2,118 | 35.534483 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/__init__.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gfootball_engine as libgame
e_PlayerRole_GK = libgame.e_PlayerRole.e_PlayerRole_GK
e_PlayerRole_CB = libgame.e_PlayerRole.e_PlayerRole_CB
e_PlayerRole_LB = libgame.e_PlayerRole.e_PlayerRole_LB
e_PlayerRole_RB = libgame.e_PlayerRole.e_PlayerRole_RB
e_PlayerRole_DM = libgame.e_PlayerRole.e_PlayerRole_DM
e_PlayerRole_CM = libgame.e_PlayerRole.e_PlayerRole_CM
e_PlayerRole_LM = libgame.e_PlayerRole.e_PlayerRole_LM
e_PlayerRole_RM = libgame.e_PlayerRole.e_PlayerRole_RM
e_PlayerRole_AM = libgame.e_PlayerRole.e_PlayerRole_AM
e_PlayerRole_CF = libgame.e_PlayerRole.e_PlayerRole_CF
Team = libgame.e_Team
| 1,198 | 38.966667 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_pass_and_shoot_with_keeper.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.7, -0.28)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.7, 0.0, e_PlayerRole_CB)
builder.AddPlayer(0.7, -0.3, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(-0.75, 0.3, e_PlayerRole_CB)
| 1,278 | 30.975 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_run_pass_and_shoot_with_keeper.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.7, -0.28)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.7, 0.0, e_PlayerRole_CB)
builder.AddPlayer(0.7, -0.3, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(-0.75, 0.1, e_PlayerRole_CB)
| 1,278 | 30.975 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/scenarios/academy_counterattack_easy.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.26, -0.11)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(-0.672, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.75, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.75, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.672, 0.19576, e_PlayerRole_RB)
builder.AddPlayer(-0.434, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.434, 0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.5, -0.3161, e_PlayerRole_CM)
builder.AddPlayer(0.25, -0.1, e_PlayerRole_LM)
builder.AddPlayer(0.25, 0.1, e_PlayerRole_RM)
builder.AddPlayer(0.35, 0.316102, e_PlayerRole_CF)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.128, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(0.4, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.4, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(0.128, -0.19576, e_PlayerRole_RB)
builder.AddPlayer(0.365, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.282, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.365, 0.10568, e_PlayerRole_CM)
builder.AddPlayer(0.54, -0.3161, e_PlayerRole_LM)
builder.AddPlayer(0.51, 0.0, e_PlayerRole_RM)
builder.AddPlayer(0.54, 0.316102, e_PlayerRole_CF)
| 2,185 | 36.689655 | 74 | py |
TiKick | TiKick-main/tmarl/envs/football/env/football_env_core.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Football environment as close as possible to a GYM environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import copy
try:
import gfootball_engine as libgame
from gfootball_engine import GameState
except ImportError:
print('Cannot import gfootball_engine. Package was not installed properly.')
from tmarl.envs.football.env import config as cfg
from gfootball.env import constants
from gfootball.env import football_action_set
from gfootball.env import observation_processor
import numpy as np
import six.moves.cPickle
from six.moves import range
import timeit
_unused_engines = []
_unused_rendering_engine = None
_active_rendering = False
try:
import cv2
except ImportError:
import cv2
class EnvState(object):
def __init__(self):
self.previous_score_diff = 0
self.previous_game_mode = -1
self.prev_ball_owned_team = -1
class FootballEnvCore(object):
def __init__(self, config):
global _unused_engines
self._config = config
self._sticky_actions = football_action_set.get_sticky_actions(config)
self._use_rendering_engine = False
if _unused_engines:
self._env = _unused_engines.pop()
else:
self._env = self._get_new_env()
# Reset is needed here to make sure render() API call before reset() API
# call works fine (get/setState makes sure env. config is the same).
self.reset(inc=0)
def _get_new_env(self):
env = libgame.GameEnv()
env.game_config.physics_steps_per_frame = self._config[
'physics_steps_per_frame']
env.game_config.render_resolution_x = self._config['render_resolution_x']
env.game_config.render_resolution_y = self._config['render_resolution_y']
return env
def _reset(self, animations, inc):
global _unused_engines
global _unused_rendering_engine
assert (self._env.state == GameState.game_created or
self._env.state == GameState.game_running or
self._env.state == GameState.game_done)
# Variables that are part of the set_state/get_state snapshot.
self._state = EnvState()
# Variables being re-computed upon set_state call, no need to snapshot.
self._observation = None
# Not snapshoted variables.
self._steps_time = 0
self._step = 0
self._config.NewScenario(inc=inc)
if self._env.state == GameState.game_created:
self._env.start_game()
self._env.state = GameState.game_running
scenario_config = self._config.ScenarioConfig()
assert (
not scenario_config.dynamic_player_selection or
not scenario_config.control_all_players
), ('For this scenario you need to control either 0 or all players on the '
'team ({} for left team, {} for right team).').format(
scenario_config.controllable_left_players,
scenario_config.controllable_right_players)
self._env.reset(scenario_config, animations)
def reset(self, inc=1):
"""Reset environment for a new episode using a given config."""
self._episode_start = timeit.default_timer()
self._action_set = football_action_set.get_action_set(self._config)
trace = observation_processor.ObservationProcessor(self._config)
self._cumulative_reward = 0
self._step_count = 0
self._trace = trace
self._reset(self._env.game_config.render, inc=inc)
while not self._retrieve_observation():
self._env.step()
return True
def _rendering_in_use(self):
global _active_rendering
if not self._use_rendering_engine:
assert not _active_rendering, ('Environment does not support multiple '
'rendering instances in the same process.')
_active_rendering = True
self._use_rendering_engine = True
self._env.game_config.render = True
def _release_engine(self):
global _unused_engines
global _unused_rendering_engine
global _active_rendering
if self._env:
if self._use_rendering_engine:
assert not _unused_rendering_engine
_unused_rendering_engine = self._env
_active_rendering = False
else:
_unused_engines.append(self._env)
self._env = None
def close(self):
self._release_engine()
if self._trace:
del self._trace
self._trace = None
def __del__(self):
self.close()
def step(self, action, extra_data={}):
assert self._env.state != GameState.game_done, (
'Cant call step() once episode finished (call reset() instead)')
assert self._env.state == GameState.game_running, (
'reset() must be called before step()')
action = [
football_action_set.named_action_from_action_set(self._action_set, a)
for a in action
]
self._step_count += 1
assert len(action) == (
self._env.config.left_agents + self._env.config.right_agents)
debug = {}
debug['action'] = action
action_index = 0
for left_team in [True, False]:
agents = self._env.config.left_agents if left_team else self._env.config.right_agents
for i in range(agents):
player_action = action[action_index]
# If agent 'holds' the game for too long, just start it.
if self._env.waiting_for_game_count == 20:
player_action = football_action_set.action_short_pass
elif self._env.waiting_for_game_count > 20:
player_action = football_action_set.action_idle
controlled_players = self._observation[
'left_agent_controlled_player'] if left_team else self._observation[
'right_agent_controlled_player']
if self._observation['ball_owned_team'] != -1 and self._observation[
'ball_owned_team'] ^ left_team and controlled_players[
i] == self._observation['ball_owned_player']:
if self._env.waiting_for_game_count < 30:
player_action = football_action_set.action_left
else:
player_action = football_action_set.action_right
action_index += 1
assert isinstance(player_action, football_action_set.CoreAction)
self._env.perform_action(player_action._backend_action, left_team, i)
while True:
enter_time = timeit.default_timer()
self._env.step()
self._steps_time += timeit.default_timer() - enter_time
if self._retrieve_observation():
break
if 'frame' in self._observation:
self._trace.add_frame(self._observation['frame'])
debug['frame_cnt'] = self._step
# Finish the episode on score.
if self._env.config.end_episode_on_score:
if self._observation['score'][0] > 0 or self._observation['score'][1] > 0:
self._env.state = GameState.game_done
# Finish the episode if the game is out of play (e.g. foul, corner etc)
if (self._env.config.end_episode_on_out_of_play and
self._observation['game_mode'] != int(
libgame.e_GameMode.e_GameMode_Normal) and
self._state.previous_game_mode == int(
libgame.e_GameMode.e_GameMode_Normal)):
self._env.state = GameState.game_done
self._state.previous_game_mode = self._observation['game_mode']
# End episode when team possessing the ball changes.
if (self._env.config.end_episode_on_possession_change and
self._observation['ball_owned_team'] != -1 and
self._state.prev_ball_owned_team != -1 and
self._observation['ball_owned_team'] !=
self._state.prev_ball_owned_team):
self._env.state = GameState.game_done
if self._observation['ball_owned_team'] != -1:
self._state.prev_ball_owned_team = self._observation['ball_owned_team']
# Compute reward.
score_diff = self._observation['score'][0] - self._observation['score'][1]
reward = score_diff - self._state.previous_score_diff
self._state.previous_score_diff = score_diff
if reward == 1:
self._trace.write_dump('score')
elif reward == -1:
self._trace.write_dump('lost_score')
debug['reward'] = reward
if self._observation['game_mode'] != int(
libgame.e_GameMode.e_GameMode_Normal):
self._env.waiting_for_game_count += 1
else:
self._env.waiting_for_game_count = 0
if self._step >= self._env.config.game_duration:
self._env.state = GameState.game_done
episode_done = self._env.state == GameState.game_done
debug['time'] = timeit.default_timer()
debug.update(extra_data)
self._cumulative_reward += reward
single_observation = copy.deepcopy(self._observation)
trace = {
'debug': debug,
'observation': single_observation,
'reward': reward,
'cumulative_reward': self._cumulative_reward
}
info = {}
self._trace.update(trace)
dumps = self._trace.process_pending_dumps(episode_done)
if dumps:
info['dumps'] = dumps
if episode_done:
del self._trace
self._trace = None
fps = self._step_count / (debug['time'] - self._episode_start)
game_fps = self._step_count / self._steps_time
logging.info(
'Episode reward: %.2f score: [%d, %d], steps: %d, '
'FPS: %.1f, gameFPS: %.1f', self._cumulative_reward,
single_observation['score'][0], single_observation['score'][1],
self._step_count, fps, game_fps)
if self._step_count == 1:
# Start writing episode_done
self.write_dump('episode_done')
return self._observation, reward, episode_done, info
def _retrieve_observation(self):
"""Constructs observations exposed by the environment.
Returns whether game
is on or not.
"""
info = self._env.get_info()
result = {}
if self._env.game_config.render:
frame = self._env.get_frame()
frame = np.frombuffer(frame, dtype=np.uint8)
frame = np.reshape(frame, [
self._config['render_resolution_x'],
self._config['render_resolution_y'], 3
])
frame = np.reshape(
np.concatenate([frame[:, :, 0], frame[:, :, 1], frame[:, :, 2]]), [
3, self._config['render_resolution_y'],
self._config['render_resolution_x']
])
frame = np.transpose(frame, [1, 2, 0])
frame = np.flip(frame, 0)
result['frame'] = frame
result['ball'] = np.array(
[info.ball_position[0], info.ball_position[1], info.ball_position[2]])
# Ball's movement direction represented as [x, y] distance per step.
result['ball_direction'] = np.array([
info.ball_direction[0], info.ball_direction[1], info.ball_direction[2]
])
# Ball's rotation represented as [x, y, z] rotation angle per step.
result['ball_rotation'] = np.array(
[info.ball_rotation[0], info.ball_rotation[1], info.ball_rotation[2]])
self._convert_players_observation(info.left_team, 'left_team', result)
self._convert_players_observation(info.right_team, 'right_team', result)
result['left_agent_sticky_actions'] = []
result['left_agent_controlled_player'] = []
result['right_agent_sticky_actions'] = []
result['right_agent_controlled_player'] = []
for i in range(self._env.config.left_agents):
result['left_agent_controlled_player'].append(
info.left_controllers[i].controlled_player)
result['left_agent_sticky_actions'].append(
np.array(self.sticky_actions_state(True, i), dtype=np.uint8))
for i in range(self._env.config.right_agents):
result['right_agent_controlled_player'].append(
info.right_controllers[i].controlled_player)
result['right_agent_sticky_actions'].append(
np.array(self.sticky_actions_state(False, i), dtype=np.uint8))
result['game_mode'] = int(info.game_mode)
result['score'] = [info.left_goals, info.right_goals]
result['ball_owned_team'] = info.ball_owned_team
result['ball_owned_player'] = info.ball_owned_player
result['steps_left'] = self._env.config.game_duration - info.step
self._observation = result
self._step = info.step
return info.is_in_play
def _convert_players_observation(self, players, name, result):
"""Converts internal players representation to the public one.
Internal representation comes directly from gameplayfootball engine.
Public representation is part of environment observations.
Args:
players: collection of team players to convert.
name: name of the team being converted (left_team or right_team).
result: collection where conversion result is added.
"""
positions = []
directions = []
tired_factors = []
active = []
yellow_cards = []
roles = []
designated_player = -1
for id, player in enumerate(players):
positions.append(player.position[0])
positions.append(player.position[1])
directions.append(player.direction[0])
directions.append(player.direction[1])
tired_factors.append(player.tired_factor)
active.append(player.is_active)
yellow_cards.append(player.has_card)
roles.append(player.role)
if player.designated_player:
designated_player = id
result[name] = np.reshape(np.array(positions), [-1, 2])
# Players' movement direction represented as [x, y] distance per step.
result['{}_direction'.format(name)] = np.reshape(
np.array(directions), [-1, 2])
# Players' tired factor in the range [0, 1] (0 means not tired).
result['{}_tired_factor'.format(name)] = np.array(tired_factors)
result['{}_active'.format(name)] = np.array(active)
result['{}_yellow_card'.format(name)] = np.array(yellow_cards)
result['{}_roles'.format(name)] = np.array(roles)
result['{}_designated_player'.format(name)] = designated_player
def observation(self):
"""Returns the current observation of the game."""
assert (self._env.state == GameState.game_running or
self._env.state == GameState.game_done), (
'reset() must be called before observation()')
return copy.deepcopy(self._observation)
def sticky_actions_state(self, left_team, player_id):
result = []
for a in self._sticky_actions:
result.append(
self._env.sticky_action_state(a._backend_action, left_team,
player_id))
return np.uint8(result)
def get_state(self, to_pickle):
assert (self._env.state == GameState.game_running or
self._env.state == GameState.game_done), (
'reset() must be called before get_state()')
to_pickle['FootballEnvCore'] = self._state
pickle = six.moves.cPickle.dumps(to_pickle)
return self._env.get_state(pickle)
def set_state(self, state):
assert (self._env.state == GameState.game_running or
self._env.state == GameState.game_done), (
'reset() must be called before set_state()')
res = self._env.set_state(state)
assert self._retrieve_observation()
from_picle = six.moves.cPickle.loads(res)
self._state = from_picle['FootballEnvCore']
if self._trace is None:
self._trace = observation_processor.ObservationProcessor(self._config)
return from_picle
def tracker_setup(self, start, end):
self._env.tracker_setup(start, end)
def write_dump(self, name):
return self._trace.write_dump(name)
def render(self, mode):
global _unused_rendering_engine
if self._env.state == GameState.game_created:
self._rendering_in_use()
return False
if not self._env.game_config.render:
if not self._use_rendering_engine:
if self._env.state != GameState.game_created:
state = self.get_state({})
self._release_engine()
if _unused_rendering_engine:
self._env = _unused_rendering_engine
_unused_rendering_engine = None
else:
self._env = self._get_new_env()
self._rendering_in_use()
self._reset(animations=False, inc=0)
self.set_state(state)
# We call render twice, as the first call has bad camera position.
self._env.render(False)
else:
self._env.game_config.render = True
self._env.render(True)
self._retrieve_observation()
if mode == 'rgb_array':
frame = self._observation['frame']
b, g, r = cv2.split(frame)
return cv2.merge((r, g, b))
elif mode == 'human':
return True
return False
def disable_render(self):
self._env.game_config.render = False
| 17,067 | 37.269058 | 91 | py |
TiKick | TiKick-main/tmarl/envs/football/env/script_helpers.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of functions used by command line scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tmarl.envs.football.env import config
from gfootball.env import football_action_set
from tmarl.envs.football.env import football_env
from gfootball.env import observation_processor
import copy
import six.moves.cPickle
import os
import tempfile
class ScriptHelpers(object):
"""Set of methods used by command line scripts."""
def __init__(self):
pass
def __modify_trace(self, replay, fps):
"""Adopt replay to the new framerate and add additional steps at the end."""
trace = []
min_fps = replay[0]['debug']['config']['physics_steps_per_frame']
assert fps % min_fps == 0, (
'Trace has to be rendered in framerate being multiple of {}'.format(
min_fps))
assert fps <= 100, ('Framerate of up to 100 is supported')
empty_steps = int(fps / min_fps) - 1
for f in replay:
trace.append(f)
idle_step = copy.deepcopy(f)
idle_step['debug']['action'] = [football_action_set.action_idle
] * len(f['debug']['action'])
for _ in range(empty_steps):
trace.append(idle_step)
# Add some empty steps at the end, so that we can record videos.
for _ in range(10):
trace.append(idle_step)
return trace
def __build_players(self, dump_file, spec):
players = []
for player in spec:
players.extend(['replay:path={},left_players=1'.format(
dump_file)] * config.count_left_players(player))
players.extend(['replay:path={},right_players=1'.format(
dump_file)] * config.count_right_players(player))
return players
def load_dump(self, dump_file):
dump = []
with open(dump_file, 'rb') as in_fd:
while True:
try:
step = six.moves.cPickle.load(in_fd)
except EOFError:
return dump
dump.append(step)
def dump_to_txt(self, dump_file, output, include_debug):
with open(output, 'w') as out_fd:
dump = self.load_dump(dump_file)
if not include_debug:
for s in dump:
if 'debug' in s:
del s['debug']
with open(output, 'w') as f:
f.write(str(dump))
def dump_to_video(self, dump_file):
dump = self.load_dump(dump_file)
cfg = config.Config(dump[0]['debug']['config'])
cfg['dump_full_episodes'] = True
cfg['write_video'] = True
cfg['display_game_stats'] = True
processor = observation_processor.ObservationProcessor(cfg)
processor.write_dump('episode_done')
for frame in dump:
processor.update(frame)
def replay(self, dump, fps=10, config_update={}, directory=None, render=True):
replay = self.load_dump(dump)
trace = self.__modify_trace(replay, fps)
fd, temp_path = tempfile.mkstemp(suffix='.dump')
with open(temp_path, 'wb') as f:
for step in trace:
six.moves.cPickle.dump(step, f)
assert replay[0]['debug']['frame_cnt'] == 0, (
'Trace does not start from the beginning of the episode, can not replay')
cfg = config.Config(replay[0]['debug']['config'])
cfg['players'] = self.__build_players(temp_path, cfg['players'])
config_update['physics_steps_per_frame'] = int(100 / fps)
config_update['real_time'] = False
if directory:
config_update['tracesdir'] = directory
config_update['write_video'] = True
# my edition
# config_update['display_game_stats'] = False
# config_update['video_quality_level'] = 2
cfg.update(config_update)
env = football_env.FootballEnv(cfg)
if render:
env.render()
env.reset()
done = False
try:
while not done:
_, _, done, _ = env.step([])
except KeyboardInterrupt:
env.write_dump('shutdown')
exit(1)
os.close(fd)
| 4,449 | 32.712121 | 81 | py |
TiKick | TiKick-main/tmarl/envs/football/env/scenario_builder.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class responsible for generating scenarios."""
import importlib
import os
import pkgutil
import random
import sys
from absl import flags
from absl import logging
import gfootball_engine as libgame
Player = libgame.FormationEntry
Role = libgame.e_PlayerRole
Team = libgame.e_Team
FLAGS = flags.FLAGS
def all_scenarios():
path = os.path.abspath(__file__)
path = os.path.join(os.path.dirname(os.path.dirname(path)), 'scenarios')
scenarios = []
for m in pkgutil.iter_modules([path]):
# There was API change in pkgutil between Python 3.5 and 3.6...
if m.__class__ == tuple:
scenarios.append(m[1])
else:
scenarios.append(m.name)
return scenarios
class Scenario(object):
def __init__(self, config):
# Game config controls C++ engine and is derived from the main config.
self._scenario_cfg = libgame.ScenarioConfig.make()
self._config = config
self._active_team = Team.e_Left
scenario = None
try:
scenario = importlib.import_module('tmarl.envs.football.scenarios.{}'.format(config['level']))
except ImportError as e:
logging.error('Loading scenario "%s" failed' % config['level'])
logging.error(e)
sys.exit(1)
scenario.build_scenario(self)
self.SetTeam(libgame.e_Team.e_Left)
self._FakePlayersForEmptyTeam(self._scenario_cfg.left_team)
self.SetTeam(libgame.e_Team.e_Right)
self._FakePlayersForEmptyTeam(self._scenario_cfg.right_team)
self._BuildScenarioConfig()
def _FakePlayersForEmptyTeam(self, team):
if len(team) == 0:
self.AddPlayer(-1.000000, 0.420000, libgame.e_PlayerRole.e_PlayerRole_GK, True)
def _BuildScenarioConfig(self):
"""Builds scenario config from gfootball.environment config."""
self._scenario_cfg.real_time = self._config['real_time']
self._scenario_cfg.left_agents = self._config.number_of_left_players()
self._scenario_cfg.right_agents = self._config.number_of_right_players()
# This is needed to record 'game_engine_random_seed' in the dump.
if 'game_engine_random_seed' not in self._config._values:
self._config.set_scenario_value('game_engine_random_seed',
random.randint(0, 2000000000))
if not self._scenario_cfg.deterministic:
self._scenario_cfg.game_engine_random_seed = (
self._config['game_engine_random_seed'])
if 'reverse_team_processing' not in self._config:
self._config['reverse_team_processing'] = (
bool(self._config['game_engine_random_seed'] % 2))
if 'reverse_team_processing' in self._config:
self._scenario_cfg.reverse_team_processing = (
self._config['reverse_team_processing'])
def config(self):
return self._scenario_cfg
def SetTeam(self, team):
self._active_team = team
def AddPlayer(self, x, y, role, lazy=False, controllable=True):
"""Build player for the current scenario.
Args:
x: x coordinate of the player in the range [-1, 1].
y: y coordinate of the player in the range [-0.42, 0.42].
role: Player's role in the game (goal keeper etc.).
lazy: Computer doesn't perform any automatic actions for lazy player.
controllable: Whether player can be controlled.
"""
player = Player(x, y, role, lazy, controllable)
if self._active_team == Team.e_Left:
self._scenario_cfg.left_team.append(player)
else:
self._scenario_cfg.right_team.append(player)
def SetBallPosition(self, ball_x, ball_y):
self._scenario_cfg.ball_position[0] = ball_x
self._scenario_cfg.ball_position[1] = ball_y
def EpisodeNumber(self):
return self._config['episode_number']
def ScenarioConfig(self):
return self._scenario_cfg
| 4,305 | 33.725806 | 100 | py |
TiKick | TiKick-main/tmarl/envs/football/env/config.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config loader."""
from __future__ import print_function
import copy
from absl import flags
import gfootball_engine as libgame
FLAGS = flags.FLAGS
def parse_player_definition(definition):
"""Parses player definition.
An example of player definition is: "agent:players=4" or "replay:path=...".
Args:
definition: a string defining a player
Returns:
A tuple (name, dict).
"""
name = definition
d = {'left_players': 0,
'right_players': 0}
if ':' in definition:
(name, params) = definition.split(':')
for param in params.split(','):
(key, value) = param.split('=')
d[key] = value
if d['left_players'] == 0 and d['right_players'] == 0:
d['left_players'] = 1
return name, d
def count_players(definition):
"""Returns a number of players given a definition."""
_, player_definition = parse_player_definition(definition)
return (int(player_definition['left_players']) +
int(player_definition['right_players']))
def count_left_players(definition):
"""Returns a number of left players given a definition."""
return int(parse_player_definition(definition)[1]['left_players'])
def count_right_players(definition):
"""Returns a number of players given a definition."""
return int(parse_player_definition(definition)[1]['right_players'])
def get_agent_number_of_players(players):
"""Returns a total number of players controlled by an agent."""
return sum([count_players(player) for player in players
if player.startswith('agent')])
class Config(object):
def __init__(self, values=None):
self._values = {
'action_set': 'default',
'custom_display_stats': None,
'display_game_stats': True,
'dump_full_episodes': False,
'dump_scores': False,
'players': ['agent:left_players=1'],
'level': '11_vs_11_stochastic',
'physics_steps_per_frame': 10,
'render_resolution_x': 1280,
'real_time': False,
'tracesdir': '/tmp/dumps',
'video_format': 'avi',
'video_quality_level': 0, # 0 - low, 1 - medium, 2 - high
'write_video': False
}
self._values['render_resolution_y'] = int(
0.5625 * self._values['render_resolution_x'])
if values:
self._values.update(values)
self.NewScenario()
def number_of_left_players(self):
return sum([count_left_players(player)
for player in self._values['players']])
def number_of_right_players(self):
return sum([count_right_players(player)
for player in self._values['players']])
def number_of_players_agent_controls(self):
return get_agent_number_of_players(self._values['players'])
def __eq__(self, other):
assert isinstance(other, self.__class__)
return self._values == other._values and self._scenario_values == other._scenario_values
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, key):
if key in self._scenario_values:
return self._scenario_values[key]
return self._values[key]
def __setitem__(self, key, value):
self._values[key] = value
def __contains__(self, key):
return key in self._scenario_values or key in self._values
def get_dictionary(self):
cfg = copy.deepcopy(self._values)
cfg.update(self._scenario_values)
return cfg
def set_scenario_value(self, key, value):
"""Override value of specific config key for a single episode."""
self._scenario_values[key] = value
def serialize(self):
return self._values
def update(self, config):
self._values.update(config)
def ScenarioConfig(self):
return self._scenario_cfg
def NewScenario(self, inc = 1):
if 'episode_number' not in self._values:
self._values['episode_number'] = 0
self._values['episode_number'] += inc
self._scenario_values = {}
from tmarl.envs.football.env import scenario_builder
self._scenario_cfg = scenario_builder.Scenario(self).ScenarioConfig()
| 4,590 | 28.811688 | 92 | py |
TiKick | TiKick-main/tmarl/envs/football/env/__init__.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GFootball Environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tmarl.envs.football.env import config
from gfootball.env import football_env
from gfootball.env import observation_preprocessing
from gfootball.env import wrappers
def _process_reward_wrappers(env, rewards):
assert 'scoring' in rewards.split(',')
if 'checkpoints' in rewards.split(','):
env = wrappers.CheckpointRewardWrapper(env)
return env
def _process_representation_wrappers(env, representation, channel_dimensions):
"""Wraps with necessary representation wrappers.
Args:
env: A GFootball gym environment.
representation: See create_environment.representation comment.
channel_dimensions: (width, height) tuple that represents the dimensions of
SMM or pixels representation.
Returns:
Google Research Football environment.
"""
if representation.startswith('pixels'):
env = wrappers.PixelsStateWrapper(env, 'gray' in representation,
channel_dimensions)
elif representation == 'simple115':
env = wrappers.Simple115StateWrapper(env)
elif representation == 'simple115v2':
env = wrappers.Simple115StateWrapper(env, True)
elif representation == 'extracted':
env = wrappers.SMMWrapper(env, channel_dimensions)
elif representation == 'raw':
pass
else:
raise ValueError('Unsupported representation: {}'.format(representation))
return env
def _apply_output_wrappers(env, rewards, representation, channel_dimensions,
apply_single_agent_wrappers, stacked):
"""Wraps with necessary wrappers modifying the output of the environment.
Args:
env: A GFootball gym environment.
rewards: What rewards to apply.
representation: See create_environment.representation comment.
channel_dimensions: (width, height) tuple that represents the dimensions of
SMM or pixels representation.
apply_single_agent_wrappers: Whether to reduce output to single agent case.
stacked: Should observations be stacked.
Returns:
Google Research Football environment.
"""
env = _process_reward_wrappers(env, rewards)
env = _process_representation_wrappers(env, representation,
channel_dimensions)
if apply_single_agent_wrappers:
if representation != 'raw':
env = wrappers.SingleAgentObservationWrapper(env)
env = wrappers.SingleAgentRewardWrapper(env)
if stacked:
env = wrappers.FrameStack(env, 4)
env = wrappers.GetStateWrapper(env)
return env
def create_environment(env_name='',
stacked=False,
representation='extracted',
rewards='scoring',
write_goal_dumps=False,
write_full_episode_dumps=False,
render=False,
write_video=False,
dump_frequency=1,
logdir='',
extra_players=None,
number_of_left_players_agent_controls=1,
number_of_right_players_agent_controls=0,
channel_dimensions=(
observation_preprocessing.SMM_WIDTH,
observation_preprocessing.SMM_HEIGHT),
other_config_options={}):
"""Creates a Google Research Football environment.
Args:
env_name: a name of a scenario to run, e.g. "11_vs_11_stochastic".
The list of scenarios can be found in directory "scenarios".
stacked: If True, stack 4 observations, otherwise, only the last
observation is returned by the environment.
Stacking is only possible when representation is one of the following:
"pixels", "pixels_gray" or "extracted".
In that case, the stacking is done along the last (i.e. channel)
dimension.
representation: String to define the representation used to build
the observation. It can be one of the following:
'pixels': the observation is the rendered view of the football field
downsampled to 'channel_dimensions'. The observation size is:
'channel_dimensions'x3 (or 'channel_dimensions'x12 when "stacked" is
True).
'pixels_gray': the observation is the rendered view of the football field
in gray scale and downsampled to 'channel_dimensions'. The observation
size is 'channel_dimensions'x1 (or 'channel_dimensions'x4 when stacked
is True).
'extracted': also referred to as super minimap. The observation is
composed of 4 planes of size 'channel_dimensions'.
Its size is then 'channel_dimensions'x4 (or 'channel_dimensions'x16 when
stacked is True).
The first plane P holds the position of players on the left
team, P[y,x] is 255 if there is a player at position (x,y), otherwise,
its value is 0.
The second plane holds in the same way the position of players
on the right team.
The third plane holds the position of the ball.
The last plane holds the active player.
'simple115'/'simple115v2': the observation is a vector of size 115.
It holds:
- the ball_position and the ball_direction as (x,y,z)
- one hot encoding of who controls the ball.
[1, 0, 0]: nobody, [0, 1, 0]: left team, [0, 0, 1]: right team.
- one hot encoding of size 11 to indicate who is the active player
in the left team.
- 11 (x,y) positions for each player of the left team.
- 11 (x,y) motion vectors for each player of the left team.
- 11 (x,y) positions for each player of the right team.
- 11 (x,y) motion vectors for each player of the right team.
- one hot encoding of the game mode. Vector of size 7 with the
following meaning:
{NormalMode, KickOffMode, GoalKickMode, FreeKickMode,
CornerMode, ThrowInMode, PenaltyMode}.
Can only be used when the scenario is a flavor of normal game
(i.e. 11 versus 11 players).
rewards: Comma separated list of rewards to be added.
Currently supported rewards are 'scoring' and 'checkpoints'.
write_goal_dumps: whether to dump traces up to 200 frames before goals.
write_full_episode_dumps: whether to dump traces for every episode.
render: whether to render game frames.
Must be enable when rendering videos or when using pixels
representation.
write_video: whether to dump videos when a trace is dumped.
dump_frequency: how often to write dumps/videos (in terms of # of episodes)
Sub-sample the episodes for which we dump videos to save some disk space.
logdir: directory holding the logs.
extra_players: A list of extra players to use in the environment.
Each player is defined by a string like:
"$player_name:left_players=?,right_players=?,$param1=?,$param2=?...."
number_of_left_players_agent_controls: Number of left players an agent
controls.
number_of_right_players_agent_controls: Number of right players an agent
controls.
channel_dimensions: (width, height) tuple that represents the dimensions of
SMM or pixels representation.
other_config_options: dict that allows directly setting other options in
the Config
Returns:
Google Research Football environment.
"""
assert env_name
scenario_config = config.Config({'level': env_name}).ScenarioConfig()
players = [('agent:left_players=%d,right_players=%d' % (
number_of_left_players_agent_controls,
number_of_right_players_agent_controls))]
# Enable MultiAgentToSingleAgent wrapper?
multiagent_to_singleagent = False
if scenario_config.control_all_players:
if (number_of_left_players_agent_controls in [0, 1] and
number_of_right_players_agent_controls in [0, 1]):
multiagent_to_singleagent = True
players = [('agent:left_players=%d,right_players=%d' %
(scenario_config.controllable_left_players
if number_of_left_players_agent_controls else 0,
scenario_config.controllable_right_players
if number_of_right_players_agent_controls else 0))]
if extra_players is not None:
players.extend(extra_players)
config_values = {
'dump_full_episodes': write_full_episode_dumps,
'dump_scores': write_goal_dumps,
'players': players,
'level': env_name,
'tracesdir': logdir,
'write_video': write_video,
}
config_values.update(other_config_options)
c = config.Config(config_values)
env = football_env.FootballEnv(c)
if multiagent_to_singleagent:
env = wrappers.MultiAgentToSingleAgent(
env, number_of_left_players_agent_controls,
number_of_right_players_agent_controls)
if dump_frequency > 1:
env = wrappers.PeriodicDumpWriter(env, dump_frequency, render)
elif render:
env.render()
env = _apply_output_wrappers(
env, rewards, representation, channel_dimensions,
(number_of_left_players_agent_controls +
number_of_right_players_agent_controls == 1), stacked)
return env
def create_remote_environment(
username,
token,
model_name='',
track='',
stacked=False,
representation='raw',
rewards='scoring',
channel_dimensions=(
observation_preprocessing.SMM_WIDTH,
observation_preprocessing.SMM_HEIGHT),
include_rendering=False):
"""Creates a remote Google Research Football environment.
Args:
username: User name.
token: User token.
model_name: A model identifier to be displayed on the leaderboard.
track: which competition track to connect to.
stacked: If True, stack 4 observations, otherwise, only the last
observation is returned by the environment.
Stacking is only possible when representation is one of the following:
"pixels", "pixels_gray" or "extracted".
In that case, the stacking is done along the last (i.e. channel)
dimension.
representation: See create_environment.representation comment.
rewards: Comma separated list of rewards to be added.
Currently supported rewards are 'scoring' and 'checkpoints'.
channel_dimensions: (width, height) tuple that represents the dimensions of
SMM or pixels representation.
include_rendering: Whether to return frame as part of the output.
Returns:
Google Research Football environment.
"""
from gfootball.env import remote_football_env
env = remote_football_env.RemoteFootballEnv(
username, token, model_name=model_name, track=track,
include_rendering=include_rendering)
env = _apply_output_wrappers(
env, rewards, representation, channel_dimensions,
env._config.number_of_players_agent_controls() == 1, stacked)
return env
| 11,541 | 41.748148 | 80 | py |
TiKick | TiKick-main/tmarl/envs/football/env/football_env.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allows different types of players to play against each other."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import importlib
from absl import logging
from tmarl.envs.football.env import config as cfg
from gfootball.env import constants
from gfootball.env import football_action_set
from tmarl.envs.football.env import football_env_core
from gfootball.env import observation_rotation
import gym
import numpy as np
class FootballEnv(gym.Env):
"""Allows multiple players to play in the same environment."""
def __init__(self, config):
self._config = config
player_config = {'index': 0}
# There can be at most one agent at a time. We need to remember its
# team and the index on the team to generate observations appropriately.
self._agent = None
self._agent_index = -1
self._agent_left_position = -1
self._agent_right_position = -1
self._players = self._construct_players(config['players'], player_config)
self._env = football_env_core.FootballEnvCore(self._config)
self._num_actions = len(football_action_set.get_action_set(self._config))
self._cached_observation = None
@property
def action_space(self):
if self._config.number_of_players_agent_controls() > 1:
return gym.spaces.MultiDiscrete(
[self._num_actions] * self._config.number_of_players_agent_controls())
return gym.spaces.Discrete(self._num_actions)
def _construct_players(self, definitions, config):
result = []
left_position = 0
right_position = 0
for definition in definitions:
(name, d) = cfg.parse_player_definition(definition)
config_name = 'player_{}'.format(name)
if config_name in config:
config[config_name] += 1
else:
config[config_name] = 0
try:
player_factory = importlib.import_module(
'gfootball.env.players.{}'.format(name))
except ImportError as e:
logging.error('Failed loading player "%s"', name)
logging.error(e)
exit(1)
player_config = copy.deepcopy(config)
player_config.update(d)
player = player_factory.Player(player_config, self._config)
if name == 'agent':
assert not self._agent, 'Only one \'agent\' player allowed'
self._agent = player
self._agent_index = len(result)
self._agent_left_position = left_position
self._agent_right_position = right_position
result.append(player)
left_position += player.num_controlled_left_players()
right_position += player.num_controlled_right_players()
config['index'] += 1
return result
def _convert_observations(self, original, player,
left_player_position, right_player_position):
"""Converts generic observations returned by the environment to
the player specific observations.
Args:
original: original observations from the environment.
player: player for which to generate observations.
left_player_position: index into observation corresponding to the left
player.
right_player_position: index into observation corresponding to the right
player.
"""
observations = []
for is_left in [True, False]:
adopted = original if is_left or player.can_play_right(
) else observation_rotation.flip_observation(original, self._config)
prefix = 'left' if is_left or not player.can_play_right() else 'right'
position = left_player_position if is_left else right_player_position
for x in range(player.num_controlled_left_players() if is_left
else player.num_controlled_right_players()):
o = {}
for v in constants.EXPOSED_OBSERVATIONS:
o[v] = copy.deepcopy(adopted[v])
assert (len(adopted[prefix + '_agent_controlled_player']) == len(
adopted[prefix + '_agent_sticky_actions']))
o['designated'] = adopted[prefix + '_team_designated_player']
if position + x >= len(adopted[prefix + '_agent_controlled_player']):
o['active'] = -1
o['sticky_actions'] = []
else:
o['active'] = (
adopted[prefix + '_agent_controlled_player'][position + x])
o['sticky_actions'] = np.array(copy.deepcopy(
adopted[prefix + '_agent_sticky_actions'][position + x]))
# There is no frame for players on the right ATM.
if is_left and 'frame' in original:
o['frame'] = original['frame']
observations.append(o)
return observations
def _action_to_list(self, a):
if isinstance(a, np.ndarray):
return a.tolist()
if not isinstance(a, list):
return [a]
return a
def _get_actions(self):
obs = self._env.observation()
left_actions = []
right_actions = []
left_player_position = 0
right_player_position = 0
for player in self._players:
adopted_obs = self._convert_observations(obs, player,
left_player_position,
right_player_position)
left_player_position += player.num_controlled_left_players()
right_player_position += player.num_controlled_right_players()
a = self._action_to_list(player.take_action(adopted_obs))
assert len(adopted_obs) == len(
a), 'Player provided {} actions instead of {}.'.format(
len(a), len(adopted_obs))
if not player.can_play_right():
for x in range(player.num_controlled_right_players()):
index = x + player.num_controlled_left_players()
a[index] = observation_rotation.flip_single_action(
a[index], self._config)
left_actions.extend(a[:player.num_controlled_left_players()])
right_actions.extend(a[player.num_controlled_left_players():])
actions = left_actions + right_actions
return actions
def step(self, action):
action = self._action_to_list(action)
if self._agent:
self._agent.set_action(action)
else:
assert len(
action
) == 0, 'step() received {} actions, but no agent is playing.'.format(
len(action))
_, reward, done, info = self._env.step(self._get_actions())
score_reward = reward
if self._agent:
reward = ([reward] * self._agent.num_controlled_left_players() +
[-reward] * self._agent.num_controlled_right_players())
self._cached_observation = None
info['score_reward'] = score_reward
return (self.observation(), np.array(reward, dtype=np.float32), done, info)
def reset(self):
self._env.reset()
for player in self._players:
player.reset()
self._cached_observation = None
return self.observation()
def observation(self):
if not self._cached_observation:
self._cached_observation = self._env.observation()
if self._agent:
self._cached_observation = self._convert_observations(
self._cached_observation, self._agent,
self._agent_left_position, self._agent_right_position)
return self._cached_observation
def write_dump(self, name):
return self._env.write_dump(name)
def close(self):
self._env.close()
def get_state(self, to_pickle={}):
return self._env.get_state(to_pickle)
def set_state(self, state):
self._cached_observation = None
return self._env.set_state(state)
def tracker_setup(self, start, end):
self._env.tracker_setup(start, end)
def render(self, mode='human'):
self._cached_observation = None
return self._env.render(mode=mode)
def disable_render(self):
self._cached_observation = None
return self._env.disable_render()
| 8,348 | 36.272321 | 80 | py |
TiKick | TiKick-main/tmarl/algorithms/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/mappo_algorithm.py | import torch
from tmarl.utils.valuenorm import ValueNorm
# implement the loss of the MAPPO here
class MAPPOAlgorithm():
def __init__(self,
args,
init_module,
device=torch.device("cpu")):
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
self.algo_module = init_module
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.policy_value_loss_coef = args.policy_value_loss_coef
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self._use_recurrent_policy = args.use_recurrent_policy
self._use_naive_recurrent = args.use_naive_recurrent_policy
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
self._use_policy_vhead = args.use_policy_vhead
assert (self._use_popart and self._use_valuenorm) == False, ("self._use_popart and self._use_valuenorm can not be set True simultaneously")
if self._use_popart:
self.value_normalizer = self.algo_module.critic.v_out
if self._use_policy_vhead:
self.policy_value_normalizer = self.algo_module.actor.v_out
elif self._use_valuenorm:
self.value_normalizer = ValueNorm(1, device = self.device)
if self._use_policy_vhead:
self.policy_value_normalizer = ValueNorm(1, device = self.device)
else:
self.value_normalizer = None
if self._use_policy_vhead:
self.policy_value_normalizer = None
def prep_rollout(self):
self.algo_module.actor.eval()
| 2,234 | 38.210526 | 147 | py |
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/mappo_module.py | import torch
from tmarl.networks.policy_network import PolicyNetwork
class MAPPOModule:
def __init__(self, args, obs_space, share_obs_space, act_space, device=torch.device("cpu")):
self.device = device
self.lr = args.lr
self.critic_lr = args.critic_lr
self.opti_eps = args.opti_eps
self.weight_decay = args.weight_decay
self.obs_space = obs_space
self.share_obs_space = share_obs_space
self.act_space = act_space
self.actor = PolicyNetwork(args, self.obs_space, self.act_space, self.device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr, eps=self.opti_eps, weight_decay=self.weight_decay)
def get_actions(self, share_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None, deterministic=False):
actions, action_log_probs, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)
return None, actions, action_log_probs, rnn_states_actor, None | 1,050 | 41.04 | 135 | py |
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
| 638 | 34.5 | 74 | py |
TiKick | TiKick-main/tmarl/loggers/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import time
def timer(function):
"""
装饰器函数timer
:param function:想要计时的函数
:return:
"""
def wrapper(*args, **kwargs):
time_start = time.time()
res = function(*args, **kwargs)
cost_time = time.time() - time_start
print("{} running time: {}s".format(function.__name__, cost_time))
return res
return wrapper | 1,011 | 27.914286 | 74 | py |
TiKick | TiKick-main/tmarl/loggers/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/loggers/TSee/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/replay_buffers/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/replay_buffers/normal/shared_buffer.py | import torch
import numpy as np
from collections import defaultdict
from tmarl.utils.util import check,get_shape_from_obs_space, get_shape_from_act_space
def _flatten(T, N, x):
return x.reshape(T * N, *x.shape[2:])
def _cast(x):
return x.transpose(1, 2, 0, 3).reshape(-1, *x.shape[3:])
class SharedReplayBuffer(object):
def __init__(self, args, num_agents, obs_space, share_obs_space, act_space):
self.episode_length = args.episode_length
self.n_rollout_threads = args.n_rollout_threads
self.hidden_size = args.hidden_size
self.recurrent_N = args.recurrent_N
self.gamma = args.gamma
self.gae_lambda = args.gae_lambda
self._use_gae = args.use_gae
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_proper_time_limits = args.use_proper_time_limits
self._mixed_obs = False # for mixed observation
obs_shape = get_shape_from_obs_space(obs_space)
share_obs_shape = get_shape_from_obs_space(share_obs_space)
# for mixed observation
if 'Dict' in obs_shape.__class__.__name__:
self._mixed_obs = True
self.obs = {}
self.share_obs = {}
for key in obs_shape:
self.obs[key] = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape[key].shape), dtype=np.float32)
for key in share_obs_shape:
self.share_obs[key] = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape[key].shape), dtype=np.float32)
else:
# deal with special attn format
if type(obs_shape[-1]) == list:
obs_shape = obs_shape[:1]
if type(share_obs_shape[-1]) == list:
share_obs_shape = share_obs_shape[:1]
self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape), dtype=np.float32)
self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape), dtype=np.float32)
self.rnn_states = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)
self.rnn_states_critic = np.zeros_like(self.rnn_states)
self.value_preds = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.returns = np.zeros_like(self.value_preds)
if act_space.__class__.__name__ == 'Discrete':
self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, act_space.n), dtype=np.float32)
else:
self.available_actions = None
act_shape = get_shape_from_act_space(act_space)
self.actions = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.action_log_probs = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.rewards = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.bad_masks = np.ones_like(self.masks)
self.active_masks = np.ones_like(self.masks)
self.step = 0
def insert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
if self._mixed_obs:
for key in self.share_obs.keys():
self.share_obs[key][self.step + 1] = share_obs[key].copy()
for key in self.obs.keys():
self.obs[key][self.step + 1] = obs[key].copy()
else:
self.share_obs[self.step + 1] = share_obs.copy()
self.obs[self.step + 1] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step + 1] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step + 1] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def init_buffer(self,share_obs,obs):
self.share_obs[0] = share_obs
self.obs[0] = obs
def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
self.share_obs[self.step] = share_obs.copy()
self.obs[self.step] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def after_update(self):
if self._mixed_obs:
for key in self.share_obs.keys():
self.share_obs[key][0] = self.share_obs[key][-1].copy()
for key in self.obs.keys():
self.obs[key][0] = self.obs[key][-1].copy()
else:
self.share_obs[0] = self.share_obs[-1].copy()
self.obs[0] = self.obs[-1].copy()
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
self.active_masks[0] = self.active_masks[-1].copy()
if self.available_actions is not None:
self.available_actions[0] = self.available_actions[-1].copy()
def chooseafter_update(self):
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
def compute_returns(self, next_value, value_normalizer=None):
if self._use_proper_time_limits:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
# step + 1
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * gae * self.masks[step + 1]
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.value_preds[step])
else:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * self.value_preds[step]
else:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) * number of agents ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(n_rollout_threads, episode_length, num_agents, n_rollout_threads * episode_length * num_agents,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
rand = torch.randperm(batch_size).numpy()
sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
share_obs[key] = self.share_obs[key][:-1].reshape(-1, *self.share_obs[key].shape[3:])
for key in self.obs.keys():
obs[key] = self.obs[key][:-1].reshape(-1, *self.obs[key].shape[3:])
else:
share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].reshape(-1, *self.obs.shape[3:])
rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1])
value_preds = self.value_preds[:-1].reshape(-1, 1)
returns = self.returns[:-1].reshape(-1, 1)
masks = self.masks[:-1].reshape(-1, 1)
active_masks = self.active_masks[:-1].reshape(-1, 1)
action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, 1)
for indices in sampler:
# obs size [T+1 N M Dim]-->[T N M Dim]-->[T*N*M,Dim]-->[index,Dim]
if self._mixed_obs:
share_obs_batch = {}
obs_batch = {}
for key in share_obs.keys():
share_obs_batch[key] = share_obs[key][indices]
for key in obs.keys():
obs_batch[key] = obs[key][indices]
else:
share_obs_batch = share_obs[indices]
obs_batch = obs[indices]
rnn_states_batch = rnn_states[indices]
rnn_states_critic_batch = rnn_states_critic[indices]
actions_batch = actions[indices]
if self.available_actions is not None:
available_actions_batch = available_actions[indices]
else:
available_actions_batch = None
value_preds_batch = value_preds[indices]
return_batch = returns[indices]
masks_batch = masks[indices]
active_masks_batch = active_masks[indices]
old_action_log_probs_batch = action_log_probs[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages[indices]
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def naive_recurrent_generator(self, advantages, num_mini_batch):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads*num_agents
assert n_rollout_threads*num_agents >= num_mini_batch, (
"PPO requires the number of processes ({})* number of agents ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(n_rollout_threads, num_agents, num_mini_batch))
num_envs_per_batch = batch_size // num_mini_batch
perm = torch.randperm(batch_size).numpy()
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
share_obs[key] = self.share_obs[key].reshape(-1, batch_size, *self.share_obs[key].shape[3:])
for key in self.obs.keys():
obs[key] = self.obs[key].reshape(-1, batch_size, *self.obs[key].shape[3:])
else:
share_obs = self.share_obs.reshape(-1, batch_size, *self.share_obs.shape[3:])
obs = self.obs.reshape(-1, batch_size, *self.obs.shape[3:])
rnn_states = self.rnn_states.reshape(-1, batch_size, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic.reshape(-1, batch_size, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, batch_size, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions.reshape(-1, batch_size, self.available_actions.shape[-1])
value_preds = self.value_preds.reshape(-1, batch_size, 1)
returns = self.returns.reshape(-1, batch_size, 1)
masks = self.masks.reshape(-1, batch_size, 1)
active_masks = self.active_masks.reshape(-1, batch_size, 1)
action_log_probs = self.action_log_probs.reshape(-1, batch_size, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, batch_size, 1)
for start_ind in range(0, batch_size, num_envs_per_batch):
if self._mixed_obs:
share_obs_batch = defaultdict(list)
obs_batch = defaultdict(list)
else:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
if self._mixed_obs:
for key in share_obs.keys():
share_obs_batch[key].append(share_obs[key][:-1, ind])
for key in obs.keys():
obs_batch[key].append(obs[key][:-1, ind])
else:
share_obs_batch.append(share_obs[:-1, ind])
obs_batch.append(obs[:-1, ind])
rnn_states_batch.append(rnn_states[0:1, ind])
rnn_states_critic_batch.append(rnn_states_critic[0:1, ind])
actions_batch.append(actions[:, ind])
if self.available_actions is not None:
available_actions_batch.append(available_actions[:-1, ind])
value_preds_batch.append(value_preds[:-1, ind])
return_batch.append(returns[:-1, ind])
masks_batch.append(masks[:-1, ind])
active_masks_batch.append(active_masks[:-1, ind])
old_action_log_probs_batch.append(action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
# [N[T, dim]]
T, N = self.episode_length, num_envs_per_batch
# These are all from_numpys of size (T, N, -1)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = np.stack(share_obs_batch[key], 1)
for key in obs_batch.keys():
obs_batch[key] = np.stack(obs_batch[key], 1)
else:
share_obs_batch = np.stack(share_obs_batch, 1)
obs_batch = np.stack(obs_batch, 1)
actions_batch = np.stack(actions_batch, 1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, 1)
value_preds_batch = np.stack(value_preds_batch, 1)
return_batch = np.stack(return_batch, 1)
masks_batch = np.stack(masks_batch, 1)
active_masks_batch = np.stack(active_masks_batch, 1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1)
adv_targ = np.stack(adv_targ, 1)
# States is just a (N, dim) from_numpy [N[1,dim]]
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (T, N, ...) from_numpys to (T * N, ...)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = _flatten(T, N, share_obs_batch[key])
for key in obs_batch.keys():
obs_batch[key] = _flatten(T, N, obs_batch[key])
else:
share_obs_batch = _flatten(T, N, share_obs_batch)
obs_batch = _flatten(T, N, obs_batch)
actions_batch = _flatten(T, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(T, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(T, N, value_preds_batch)
return_batch = _flatten(T, N, return_batch)
masks_batch = _flatten(T, N, masks_batch)
active_masks_batch = _flatten(T, N, active_masks_batch)
old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch)
adv_targ = _flatten(T, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
data_chunks = batch_size // data_chunk_length # [C=r*T*M/L]
mini_batch_size = data_chunks // num_mini_batch
assert n_rollout_threads * episode_length * num_agents >= data_chunk_length, (
"PPO requires the number of processes ({})* number of agents ({}) * episode length ({}) "
"to be greater than or equal to the number of "
"data chunk length ({}).".format(n_rollout_threads, num_agents, episode_length ,data_chunk_length))
rand = torch.randperm(data_chunks).numpy()
sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
if len(self.share_obs[key].shape) == 6:
share_obs[key] = self.share_obs[key][:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs[key].shape[3:])
elif len(self.share_obs[key].shape) == 5:
share_obs[key] = self.share_obs[key][:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.share_obs[key].shape[3:])
else:
share_obs[key] = _cast(self.share_obs[key][:-1])
for key in self.obs.keys():
if len(self.obs[key].shape) == 6:
obs[key] = self.obs[key][:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs[key].shape[3:])
elif len(self.obs[key].shape) == 5:
obs[key] = self.obs[key][:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.obs[key].shape[3:])
else:
obs[key] = _cast(self.obs[key][:-1])
else:
if len(self.share_obs.shape) > 4:
share_obs = self.share_obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs.shape[3:])
else:
share_obs = _cast(self.share_obs[:-1])
obs = _cast(self.obs[:-1])
actions = _cast(self.actions)
action_log_probs = _cast(self.action_log_probs)
advantages = _cast(advantages)
value_preds = _cast(self.value_preds[:-1])
returns = _cast(self.returns[:-1])
masks = _cast(self.masks[:-1])
active_masks = _cast(self.active_masks[:-1])
# rnn_states = _cast(self.rnn_states[:-1])
# rnn_states_critic = _cast(self.rnn_states_critic[:-1])
rnn_states = self.rnn_states[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states_critic.shape[3:])
if self.available_actions is not None:
available_actions = _cast(self.available_actions[:-1])
for indices in sampler:
if self._mixed_obs:
share_obs_batch = defaultdict(list)
obs_batch = defaultdict(list)
else:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for index in indices:
ind = index * data_chunk_length
# size [T+1 N M Dim]-->[T N M Dim]-->[N,M,T,Dim]-->[N*M*T,Dim]-->[L,Dim]
if self._mixed_obs:
for key in share_obs.keys():
share_obs_batch[key].append(share_obs[key][ind:ind+data_chunk_length])
for key in obs.keys():
obs_batch[key].append(obs[key][ind:ind+data_chunk_length])
else:
share_obs_batch.append(share_obs[ind:ind+data_chunk_length])
obs_batch.append(obs[ind:ind+data_chunk_length])
actions_batch.append(actions[ind:ind+data_chunk_length])
if self.available_actions is not None:
available_actions_batch.append(available_actions[ind:ind+data_chunk_length])
value_preds_batch.append(value_preds[ind:ind+data_chunk_length])
return_batch.append(returns[ind:ind+data_chunk_length])
masks_batch.append(masks[ind:ind+data_chunk_length])
active_masks_batch.append(active_masks[ind:ind+data_chunk_length])
old_action_log_probs_batch.append(action_log_probs[ind:ind+data_chunk_length])
adv_targ.append(advantages[ind:ind+data_chunk_length])
# size [T+1 N M Dim]-->[T N M Dim]-->[N M T Dim]-->[N*M*T,Dim]-->[1,Dim]
rnn_states_batch.append(rnn_states[ind])
rnn_states_critic_batch.append(rnn_states_critic[ind])
L, N = data_chunk_length, mini_batch_size
# These are all from_numpys of size (L, N, Dim)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = np.stack(share_obs_batch[key], axis=1)
for key in obs_batch.keys():
obs_batch[key] = np.stack(obs_batch[key], axis=1)
else:
share_obs_batch = np.stack(share_obs_batch, axis=1)
obs_batch = np.stack(obs_batch, axis=1)
actions_batch = np.stack(actions_batch, axis=1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, axis=1)
value_preds_batch = np.stack(value_preds_batch, axis=1)
return_batch = np.stack(return_batch, axis=1)
masks_batch = np.stack(masks_batch, axis=1)
active_masks_batch = np.stack(active_masks_batch, axis=1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, axis=1)
adv_targ = np.stack(adv_targ, axis=1)
# States is just a (N, -1) from_numpy
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (L, N, ...) from_numpys to (L * N, ...)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = _flatten(L, N, share_obs_batch[key])
for key in obs_batch.keys():
obs_batch[key] = _flatten(L, N, obs_batch[key])
else:
share_obs_batch = _flatten(L, N, share_obs_batch)
obs_batch = _flatten(L, N, obs_batch)
actions_batch = _flatten(L, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(L, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(L, N, value_preds_batch)
return_batch = _flatten(L, N, return_batch)
masks_batch = _flatten(L, N, masks_batch)
active_masks_batch = _flatten(L, N, active_masks_batch)
old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch)
adv_targ = _flatten(L, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
| 28,769 | 52.081181 | 231 | py |
TiKick | TiKick-main/tmarl/replay_buffers/normal/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
| 638 | 34.5 | 74 | py |
TiKick | TiKick-main/tmarl/configs/config.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import argparse
def get_config():
parser = argparse.ArgumentParser(
description='TiKick', formatter_class=argparse.RawDescriptionHelpFormatter)
# prepare parameters
parser.add_argument("--algorithm_name", type=str,
default='rmappo', choices=["rmappo"])
parser.add_argument("--experiment_name", type=str, default="check",
help="an identifier to distinguish different experiment.")
parser.add_argument("--seed", type=int, default=1,
help="Random seed for numpy/torch")
parser.add_argument("--disable_cuda", action='store_true', default=False,
help="by default False, will use GPU to train; or else will use CPU;")
parser.add_argument("--cuda_deterministic",
action='store_false', default=True,
help="by default, make sure random seed effective. if set, bypass such function.")
parser.add_argument("--n_rollout_threads", type=int, default=2,
help="Number of parallel envs for training rollout")
parser.add_argument("--n_eval_rollout_threads", type=int, default=1,
help="Number of parallel envs for evaluating rollout")
parser.add_argument("--n_render_rollout_threads", type=int, default=1,
help="Number of parallel envs for rendering rollout")
parser.add_argument("--eval_num", type=int, default=1,
help='Number of environment steps to evaluate (default: 1)')
# env parameters
parser.add_argument("--env_name", type=str, default='StarCraft2',
help="specify the name of environment")
parser.add_argument("--use_obs_instead_of_state", action='store_true',
default=False, help="Whether to use global state or concatenated obs")
# replay buffer parameters
parser.add_argument("--episode_length", type=int,
default=200, help="Max length for any episode")
# network parameters
parser.add_argument("--separate_policy", action='store_true',
default=False, help='Whether agent seperate the policy')
parser.add_argument("--use_centralized_V", action='store_false',
default=True, help="Whether to use centralized V function")
parser.add_argument("--use_conv1d", action='store_true',
default=False, help="Whether to use conv1d")
parser.add_argument("--stacked_frames", type=int, default=1,
help="Dimension of hidden layers for actor/critic networks")
parser.add_argument("--use_stacked_frames", action='store_true',
default=False, help="Whether to use stacked_frames")
parser.add_argument("--hidden_size", type=int, default=256,
help="Dimension of hidden layers for actor/critic networks") # TODO @zoeyuchao. The same comment might in need of change.
parser.add_argument("--layer_N", type=int, default=3,
help="Number of layers for actor/critic networks")
parser.add_argument("--activation_id", type=int,
default=1, help="choose 0 to use tanh, 1 to use relu, 2 to use leaky relu, 3 to use elu")
parser.add_argument("--use_popart", action='store_true', default=False,
help="by default False, use PopArt to normalize rewards.")
parser.add_argument("--use_valuenorm", action='store_false', default=True,
help="by default True, use running mean and std to normalize rewards.")
parser.add_argument("--use_feature_normalization", action='store_false',
default=True, help="Whether to apply layernorm to the inputs")
parser.add_argument("--use_orthogonal", action='store_false', default=True,
help="Whether to use Orthogonal initialization for weights and 0 initialization for biases")
parser.add_argument("--gain", type=float, default=0.01,
help="The gain # of last action layer")
parser.add_argument("--cnn_layers_params", type=str, default=None,
help="The parameters of cnn layer")
parser.add_argument("--use_maxpool2d", action='store_true',
default=False, help="Whether to apply layernorm to the inputs")
# recurrent parameters
parser.add_argument("--use_naive_recurrent_policy", action='store_true',
default=False, help='Whether to use a naive recurrent policy')
parser.add_argument("--use_recurrent_policy", action='store_false',
default=True, help='use a recurrent policy')
parser.add_argument("--recurrent_N", type=int, default=1,
help="The number of recurrent layers.")
parser.add_argument("--data_chunk_length", type=int, default=25,
help="Time length of chunks used to train a recurrent_policy")
parser.add_argument("--use_influence_policy", action='store_true',
default=False, help='use a recurrent policy')
parser.add_argument("--influence_layer_N", type=int, default=1,
help="Number of layers for actor/critic networks")
# optimizer parameters
parser.add_argument("--lr", type=float, default=5e-4,
help='learning rate (default: 5e-4)')
parser.add_argument("--tau", type=float, default=0.995,
help='soft update polyak (default: 0.995)')
parser.add_argument("--critic_lr", type=float, default=5e-4,
help='critic learning rate (default: 5e-4)')
parser.add_argument("--opti_eps", type=float, default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument("--weight_decay", type=float, default=0)
# ppo parameters
parser.add_argument("--ppo_epoch", type=int, default=15,
help='number of ppo epochs (default: 15)')
parser.add_argument("--use_policy_vhead",
action='store_true', default=False,
help="by default, do not use policy vhead. if set, use policy vhead.")
parser.add_argument("--use_clipped_value_loss",
action='store_false', default=True,
help="by default, clip loss value. If set, do not clip loss value.")
parser.add_argument("--clip_param", type=float, default=0.2,
help='ppo clip parameter (default: 0.2)')
parser.add_argument("--num_mini_batch", type=int, default=1,
help='number of batches for ppo (default: 1)')
parser.add_argument("--policy_value_loss_coef", type=float,
default=1, help='policy value loss coefficient (default: 0.5)')
parser.add_argument("--entropy_coef", type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument("--value_loss_coef", type=float,
default=1, help='value loss coefficient (default: 0.5)')
parser.add_argument("--use_max_grad_norm",
action='store_false', default=True,
help="by default, use max norm of gradients. If set, do not use.")
parser.add_argument("--max_grad_norm", type=float, default=10.0,
help='max norm of gradients (default: 0.5)')
parser.add_argument("--use_gae", action='store_false',
default=True, help='use generalized advantage estimation')
parser.add_argument("--gamma", type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument("--gae_lambda", type=float, default=0.95,
help='gae lambda parameter (default: 0.95)')
parser.add_argument("--use_proper_time_limits", action='store_true',
default=False, help='compute returns taking into account time limits')
parser.add_argument("--use_huber_loss", action='store_false', default=True,
help="by default, use huber loss. If set, do not use huber loss.")
parser.add_argument("--use_value_active_masks",
action='store_false', default=True,
help="by default True, whether to mask useless data in value loss.")
parser.add_argument("--use_policy_active_masks",
action='store_false', default=True,
help="by default True, whether to mask useless data in policy loss.")
parser.add_argument("--huber_delta", type=float,
default=10.0, help=" coefficience of huber loss.")
# save parameters
parser.add_argument("--save_interval", type=int, default=1,
help="time duration between contiunous twice models saving.")
# log parameters
parser.add_argument("--log_interval", type=int, default=5,
help="time duration between contiunous twice log printing.")
# eval parameters
parser.add_argument("--use_eval", action='store_true', default=False,
help="by default, do not start evaluation. If set`, start evaluation alongside with training.")
parser.add_argument("--eval_interval", type=int, default=25,
help="time duration between contiunous twice evaluation progress.")
parser.add_argument("--eval_episodes", type=int, default=64,
help="number of episodes of a single evaluation.")
# pretrained parameters
parser.add_argument("--model_dir", type=str, default=None,
help="by default None. set the path to pretrained model.")
parser.add_argument("--replay_save_dir", type=str, default=None,
help="replay file save dir")
# replay buffer parameters
return parser
| 10,665 | 55.734043 | 146 | py |
TiKick | TiKick-main/tmarl/configs/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
| 638 | 34.5 | 74 | py |
TiKick | TiKick-main/tmarl/wrappers/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/wrappers/TWrapper/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/runners/base_evaluator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import random
import numpy as np
import torch
from tmarl.configs.config import get_config
from tmarl.runners.base_runner import Runner
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class Evaluator(Runner):
def __init__(self, argv,program_type=None, client=None):
super().__init__(argv)
parser = get_config()
all_args = self.extra_args_func(argv, parser)
all_args.cuda = not all_args.disable_cuda
self.algorithm_name = all_args.algorithm_name
# cuda
if not all_args.disable_cuda and torch.cuda.is_available():
device = torch.device("cuda:0")
if all_args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
print("choose to use cpu...")
device = torch.device("cpu")
# run dir
run_dir = self.setup_run_dir(all_args)
# env init
Env_Class, SubprocVecEnv, DummyVecEnv = self.get_env()
eval_envs = self.env_init(
all_args, Env_Class, SubprocVecEnv, DummyVecEnv)
num_agents = all_args.num_agents
config = {
"all_args": all_args,
"envs": None,
"eval_envs": eval_envs,
"num_agents": num_agents,
"device": device,
"run_dir": run_dir,
}
self.all_args, self.envs, self.eval_envs, self.config \
= all_args, None, eval_envs, config
self.driver = self.init_driver()
def run(self):
# run experiments
self.driver.run()
self.stop()
def stop(self):
pass
def extra_args_func(self, argv, parser):
raise NotImplementedError
def get_env(self):
raise NotImplementedError
def init_driver(self):
raise NotImplementedError
def make_eval_env(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
def get_env_fn(rank):
def init_env():
env = Env_Class(all_args)
env.seed(all_args.seed * 50000 + rank * 10000)
return env
return init_env
if all_args.n_eval_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)])
def env_init(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
eval_envs = self.make_eval_env(
all_args, Env_Class, SubprocVecEnv, DummyVecEnv) if all_args.use_eval else None
return eval_envs
def setup_run_dir(self, all_args):
return None
| 3,402 | 28.08547 | 97 | py |
TiKick | TiKick-main/tmarl/runners/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/runners/base_runner.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import os
import random
import socket
import setproctitle
import numpy as np
from pathlib import Path
import torch
from tmarl.configs.config import get_config
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class Runner:
def __init__(self, argv):
self.argv = argv
def run(self):
# main run
raise NotImplementedError | 1,079 | 22.478261 | 74 | py |
TiKick | TiKick-main/tmarl/runners/football/football_evaluator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import sys
import os
from pathlib import Path
from tmarl.runners.base_evaluator import Evaluator
from tmarl.envs.football.football import RllibGFootball
from tmarl.envs.env_wrappers import ShareSubprocVecEnv, ShareDummyVecEnv
class FootballEvaluator(Evaluator):
def __init__(self, argv):
super(FootballEvaluator, self).__init__(argv)
def setup_run_dir(self, all_args):
dump_dir = Path(all_args.replay_save_dir)
if not dump_dir.exists():
os.makedirs(str(dump_dir))
self.dump_dir = dump_dir
return super(FootballEvaluator, self).setup_run_dir(all_args)
def make_eval_env(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
def get_env_fn(rank):
def init_env():
env = Env_Class(all_args, rank, log_dir=str(self.dump_dir), isEval=True)
env.seed(all_args.seed * 50000 + rank * 10000)
return env
return init_env
if all_args.n_eval_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)])
def extra_args_func(self, args, parser):
parser.add_argument('--scenario_name', type=str,
default='simple_spread', help="Which scenario to run on")
parser.add_argument('--num_agents', type=int,
default=0, help="number of players")
# football config
parser.add_argument('--representation', type=str,
default='raw', help="format of the observation in gfootball env")
parser.add_argument('--rewards', type=str,
default='scoring', help="format of the reward in gfootball env")
parser.add_argument("--render_only", action='store_true', default=False,
help="if ture, render without training")
all_args = parser.parse_known_args(args)[0]
return all_args
def get_env(self):
return RllibGFootball, ShareSubprocVecEnv, ShareDummyVecEnv
def init_driver(self):
if not self.all_args.separate_policy:
from tmarl.drivers.shared_distributed.football_driver import FootballDriver as Driver
else:
raise NotImplementedError
driver = Driver(self.config)
return driver
def main(argv):
evaluator = FootballEvaluator(argv)
evaluator.run()
if __name__ == "__main__":
main(sys.argv[1:])
| 3,181 | 34.355556 | 97 | py |
TiKick | TiKick-main/tmarl/utils/multi_discrete.py | import gym
import numpy as np
# An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates)
# (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py)
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
self.n = np.sum(self.high) + 2
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
random_array = np.random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)
| 2,346 | 50.021739 | 198 | py |
TiKick | TiKick-main/tmarl/utils/valuenorm.py |
import numpy as np
import torch
import torch.nn as nn
class ValueNorm(nn.Module):
""" Normalize a vector of observations - across the first norm_axes dimensions"""
def __init__(self, input_shape, norm_axes=1, beta=0.99999, per_element_update=False, epsilon=1e-5, device=torch.device("cpu")):
super(ValueNorm, self).__init__()
self.input_shape = input_shape
self.norm_axes = norm_axes
self.epsilon = epsilon
self.beta = beta
self.per_element_update = per_element_update
self.tpdv = dict(dtype=torch.float32, device=device)
self.running_mean = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
self.running_mean_sq = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_()
self.running_mean_sq.zero_()
self.debiasing_term.zero_()
def running_mean_var(self):
debiased_mean = self.running_mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
if self.per_element_update:
batch_size = np.prod(input_vector.size()[:self.norm_axes])
weight = self.beta ** batch_size
else:
weight = self.beta
self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight))
self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight))
self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight))
def normalize(self, input_vector):
# Make sure input is float32
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.running_mean_var()
out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
return out
def denormalize(self, input_vector):
""" Transform normalized data back into original distribution """
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.running_mean_var()
out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,110 | 37.8875 | 131 | py |
TiKick | TiKick-main/tmarl/utils/util.py |
import copy
import numpy as np
import math
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.autograd import Variable
from gym.spaces import Box, Discrete, Tuple
def check(input):
if type(input) == np.ndarray:
return torch.from_numpy(input)
def get_gard_norm(it):
sum_grad = 0
for x in it:
if x.grad is None:
continue
sum_grad += x.grad.norm() ** 2
return math.sqrt(sum_grad)
def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):
"""Decreases the learning rate linearly"""
lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (e > d).float()
return a*e**2/2 + b*d*(abs(e)-d/2)
def mse_loss(e):
return e**2/2
def get_shape_from_obs_space(obs_space):
if obs_space.__class__.__name__ == 'Box':
obs_shape = obs_space.shape
elif obs_space.__class__.__name__ == 'list':
obs_shape = obs_space
elif obs_space.__class__.__name__ == 'Dict':
obs_shape = obs_space.spaces
else:
raise NotImplementedError
return obs_shape
def get_shape_from_act_space(act_space):
if act_space.__class__.__name__ == 'Discrete':
act_shape = 1
elif act_space.__class__.__name__ == "MultiDiscrete":
act_shape = act_space.shape
elif act_space.__class__.__name__ == "Box":
act_shape = act_space.shape[0]
elif act_space.__class__.__name__ == "MultiBinary":
act_shape = act_space.shape[0]
else: # agar
act_shape = act_space[0].shape[0] + 1
return act_shape
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(
list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c
def to_torch(input):
return torch.from_numpy(input) if type(input) == np.ndarray else input
def to_numpy(x):
return x.detach().cpu().numpy()
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample()
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space
where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
self.n = np.sum(self.high) + 2
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
random_array = np.random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)
class DecayThenFlatSchedule():
def __init__(self,
start,
finish,
time_length,
decay="exp"):
self.start = start
self.finish = finish
self.time_length = time_length
self.delta = (self.start - self.finish) / self.time_length
self.decay = decay
if self.decay in ["exp"]:
self.exp_scaling = (-1) * self.time_length / \
np.log(self.finish) if self.finish > 0 else 1
def eval(self, T):
if self.decay in ["linear"]:
return max(self.finish, self.start - self.delta * T)
elif self.decay in ["exp"]:
return min(self.start, max(self.finish, np.exp(- T / self.exp_scaling)))
pass
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (e > d).float()
return a*e**2/2 + b*d*(abs(e)-d/2)
def mse_loss(e):
return e**2
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L11
def soft_update(target, source, tau):
"""
Perform DDPG soft update (move target params toward source based on weight
factor tau)
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
tau (float, 0 < x < 1): Weight factor for update
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau)
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L15
def hard_update(target, source):
"""
Copy network parameters from source to target
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
# https://github.com/seba-1511/dist_tuto.pth/blob/gh-pages/train_dist.py
def average_gradients(model):
""" Gradient averaging. """
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0)
param.grad.data /= size
def onehot_from_logits(logits, avail_logits=None, eps=0.0):
"""
Given batch of logits, return one-hot sample using epsilon greedy strategy
(based on given epsilon)
"""
# get best (according to current policy) actions in one-hot form
logits = to_torch(logits)
dim = len(logits.shape) - 1
if avail_logits is not None:
avail_logits = to_torch(avail_logits)
logits[avail_logits == 0] = -1e10
argmax_acs = (logits == logits.max(dim, keepdim=True)[0]).float()
if eps == 0.0:
return argmax_acs
# get random actions in one-hot form
rand_acs = Variable(torch.eye(logits.shape[1])[[np.random.choice(
range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False)
# chooses between best and random actions using epsilon greedy
return torch.stack([argmax_acs[i] if r > eps else rand_acs[i] for i, r in
enumerate(torch.rand(logits.shape[0]))])
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def sample_gumbel(shape, eps=1e-20, tens_type=torch.FloatTensor):
"""Sample from Gumbel(0, 1)"""
U = Variable(tens_type(*shape).uniform_(), requires_grad=False)
return -torch.log(-torch.log(U + eps) + eps)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax_sample(logits, avail_logits, temperature, device=torch.device('cpu')):
""" Draw a sample from the Gumbel-Softmax distribution"""
if str(device) == 'cpu':
y = logits + sample_gumbel(logits.shape, tens_type=type(logits.data))
else:
y = (logits.cpu() + sample_gumbel(logits.shape,
tens_type=type(logits.data))).cuda()
dim = len(logits.shape) - 1
if avail_logits is not None:
avail_logits = to_torch(avail_logits).to(device)
y[avail_logits == 0] = -1e10
return F.softmax(y / temperature, dim=dim)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax(logits, avail_logits=None, temperature=1.0, hard=False, device=torch.device('cpu')):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, avail_logits, temperature, device)
if hard:
y_hard = onehot_from_logits(y)
y = (y_hard - y).detach() + y
return y
def gaussian_noise(shape, std):
return torch.empty(shape).normal_(mean=0, std=std)
def get_obs_shape(obs_space):
if obs_space.__class__.__name__ == "Box":
obs_shape = obs_space.shape
elif obs_space.__class__.__name__ == "list":
obs_shape = obs_space
else:
raise NotImplementedError
return obs_shape
def get_dim_from_space(space):
if isinstance(space, Box):
dim = space.shape[0]
elif isinstance(space, Discrete):
dim = space.n
elif isinstance(space, Tuple):
dim = sum([get_dim_from_space(sp) for sp in space])
elif "MultiDiscrete" in space.__class__.__name__:
return (space.high - space.low) + 1
elif isinstance(space, list):
dim = space[0]
else:
raise Exception("Unrecognized space: ", type(space))
return dim
def get_state_dim(observation_dict, action_dict):
combined_obs_dim = sum([get_dim_from_space(space)
for space in observation_dict.values()])
combined_act_dim = 0
for space in action_dict.values():
dim = get_dim_from_space(space)
if isinstance(dim, np.ndarray):
combined_act_dim += int(sum(dim))
else:
combined_act_dim += dim
return combined_obs_dim, combined_act_dim, combined_obs_dim+combined_act_dim
def get_cent_act_dim(action_space):
cent_act_dim = 0
for space in action_space:
dim = get_dim_from_space(space)
if isinstance(dim, np.ndarray):
cent_act_dim += int(sum(dim))
else:
cent_act_dim += dim
return cent_act_dim
def is_discrete(space):
if isinstance(space, Discrete) or "MultiDiscrete" in space.__class__.__name__:
return True
else:
return False
def is_multidiscrete(space):
if "MultiDiscrete" in space.__class__.__name__:
return True
else:
return False
def make_onehot(int_action, action_dim, seq_len=None):
if type(int_action) == torch.Tensor:
int_action = int_action.cpu().numpy()
if not seq_len:
return np.eye(action_dim)[int_action]
if seq_len:
onehot_actions = []
for i in range(seq_len):
onehot_action = np.eye(action_dim)[int_action[i]]
onehot_actions.append(onehot_action)
return np.stack(onehot_actions)
def avail_choose(x, avail_x=None):
x = to_torch(x)
if avail_x is not None:
avail_x = to_torch(avail_x)
x[avail_x == 0] = -1e10
return x # FixedCategorical(logits=x)
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(
list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c
| 13,893 | 31.846336 | 122 | py |
TiKick | TiKick-main/tmarl/utils/segment_tree.py | import numpy as np
def unique(sorted_array):
"""
More efficient implementation of np.unique for sorted arrays
:param sorted_array: (np.ndarray)
:return:(np.ndarray) sorted_array without duplicate elements
"""
if len(sorted_array) == 1:
return sorted_array
left = sorted_array[:-1]
right = sorted_array[1:]
uniques = np.append(right != left, True)
return sorted_array[uniques]
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""
Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array that supports Index arrays, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
:param capacity: (int) Total size of the array - must be a power of two.
:param operation: (lambda (Any, Any): Any) operation for combining elements (eg. sum, max) must form a
mathematical group together with the set of possible values for array elements (i.e. be associative)
:param neutral_element: (Any) neutral element for the operation above. eg. float('-inf') for max and 0 for sum.
"""
assert capacity > 0 and capacity & (
capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
self.neutral_element = neutral_element
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(
mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""
Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
:param start: (int) beginning of the subsequence
:param end: (int) end of the subsequences
:return: (Any) result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# indexes of the leaf
idxs = idx + self._capacity
self._value[idxs] = val
if isinstance(idxs, int):
idxs = np.array([idxs])
# go up one level in the tree and remove duplicate indexes
idxs = unique(idxs // 2)
while len(idxs) > 1 or idxs[0] > 0:
# as long as there are non-zero indexes, update the corresponding values
self._value[idxs] = self._operation(
self._value[2 * idxs],
self._value[2 * idxs + 1]
)
# go up one level in the tree and remove duplicate indexes
idxs = unique(idxs // 2)
def __getitem__(self, idx):
assert np.max(idx) < self._capacity
assert 0 <= np.min(idx)
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=np.add,
neutral_element=0.0
)
self._value = np.array(self._value)
def sum(self, start=0, end=None):
"""
Returns arr[start] + ... + arr[end]
:param start: (int) start position of the reduction (must be >= 0)
:param end: (int) end position of the reduction (must be < len(arr), can be None for len(arr) - 1)
:return: (Any) reduction of SumSegmentTree
"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""
Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum for each entry in prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
:param prefixsum: (np.ndarray) float upper bounds on the sum of array prefix
:return: (np.ndarray) highest indexes satisfying the prefixsum constraint
"""
if isinstance(prefixsum, float):
prefixsum = np.array([prefixsum])
assert 0 <= np.min(prefixsum)
assert np.max(prefixsum) <= self.sum() + 1e-5
assert isinstance(prefixsum[0], float)
idx = np.ones(len(prefixsum), dtype=int)
cont = np.ones(len(prefixsum), dtype=bool)
while np.any(cont): # while not all nodes are leafs
idx[cont] = 2 * idx[cont]
prefixsum_new = np.where(
self._value[idx] <= prefixsum, prefixsum - self._value[idx], prefixsum)
# prepare update of prefixsum for all right children
idx = np.where(np.logical_or(
self._value[idx] > prefixsum, np.logical_not(cont)), idx, idx + 1)
# Select child node for non-leaf nodes
prefixsum = prefixsum_new
# update prefixsum
cont = idx < self._capacity
# collect leafs
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=np.minimum,
neutral_element=float('inf')
)
self._value = np.array(self._value)
def min(self, start=0, end=None):
"""
Returns min(arr[start], ..., arr[end])
:param start: (int) start position of the reduction (must be >= 0)
:param end: (int) end position of the reduction (must be < len(arr), can be None for len(arr) - 1)
:return: (Any) reduction of MinSegmentTree
"""
return super(MinSegmentTree, self).reduce(start, end)
| 6,859 | 40.325301 | 119 | py |
TiKick | TiKick-main/tmarl/utils/__init__.py | 0 | 0 | 0 | py |
|
TiKick | TiKick-main/tmarl/utils/gpu_mem_track.py | # code from https://github.com/Oldpan/Pytorch-Memory-Utils
import gc
import datetime
import inspect
import torch
import numpy as np
dtype_memory_size_dict = {
torch.float64: 64/8,
torch.double: 64/8,
torch.float32: 32/8,
torch.float: 32/8,
torch.float16: 16/8,
torch.half: 16/8,
torch.int64: 64/8,
torch.long: 64/8,
torch.int32: 32/8,
torch.int: 32/8,
torch.int16: 16/8,
torch.short: 16/6,
torch.uint8: 8/8,
torch.int8: 8/8,
}
# compatibility of torch1.0
if getattr(torch, "bfloat16", None) is not None:
dtype_memory_size_dict[torch.bfloat16] = 16/8
if getattr(torch, "bool", None) is not None:
dtype_memory_size_dict[torch.bool] = 8/8 # pytorch use 1 byte for a bool, see https://github.com/pytorch/pytorch/issues/41571
def get_mem_space(x):
try:
ret = dtype_memory_size_dict[x]
except KeyError:
print(f"dtype {x} is not supported!")
return ret
class MemTracker(object):
"""
Class used to track pytorch memory usage
Arguments:
detail(bool, default True): whether the function shows the detail gpu memory usage
path(str): where to save log file
verbose(bool, default False): whether show the trivial exception
device(int): GPU number, default is 0
"""
def __init__(self, detail=True, path='', verbose=False, device=0):
self.print_detail = detail
self.last_tensor_sizes = set()
self.gpu_profile_fn = path + f'{datetime.datetime.now():%d-%b-%y-%H:%M:%S}-gpu_mem_track.txt'
self.verbose = verbose
self.begin = True
self.device = device
def get_tensors(self):
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
tensor = obj
else:
continue
if tensor.is_cuda:
yield tensor
except Exception as e:
if self.verbose:
print('A trivial exception occured: {}'.format(e))
def get_tensor_usage(self):
sizes = [np.prod(np.array(tensor.size())) * get_mem_space(tensor.dtype) for tensor in self.get_tensors()]
return np.sum(sizes) / 1024**2
def get_allocate_usage(self):
return torch.cuda.memory_allocated() / 1024**2
def clear_cache(self):
gc.collect()
torch.cuda.empty_cache()
def print_all_gpu_tensor(self, file=None):
for x in self.get_tensors():
print(x.size(), x.dtype, np.prod(np.array(x.size()))*get_mem_space(x.dtype)/1024**2, file=file)
def track(self):
"""
Track the GPU memory usage
"""
frameinfo = inspect.stack()[1]
where_str = frameinfo.filename + ' line ' + str(frameinfo.lineno) + ': ' + frameinfo.function
with open(self.gpu_profile_fn, 'a+') as f:
if self.begin:
f.write(f"GPU Memory Track | {datetime.datetime.now():%d-%b-%y-%H:%M:%S} |"
f" Total Tensor Used Memory:{self.get_tensor_usage():<7.1f}Mb"
f" Total Allocated Memory:{self.get_allocate_usage():<7.1f}Mb\n\n")
self.begin = False
if self.print_detail is True:
ts_list = [(tensor.size(), tensor.dtype) for tensor in self.get_tensors()]
new_tensor_sizes = {(type(x),
tuple(x.size()),
ts_list.count((x.size(), x.dtype)),
np.prod(np.array(x.size()))*get_mem_space(x.dtype)/1024**2,
x.dtype) for x in self.get_tensors()}
for t, s, n, m, data_type in new_tensor_sizes - self.last_tensor_sizes:
f.write(f'+ | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20} | {data_type}\n')
for t, s, n, m, data_type in self.last_tensor_sizes - new_tensor_sizes:
f.write(f'- | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20} | {data_type}\n')
self.last_tensor_sizes = new_tensor_sizes
f.write(f"\nAt {where_str:<50}"
f" Total Tensor Used Memory:{self.get_tensor_usage():<7.1f}Mb"
f" Total Allocated Memory:{self.get_allocate_usage():<7.1f}Mb\n\n")
| 4,432 | 36.888889 | 129 | py |
TiKick | TiKick-main/tmarl/utils/modelsize_estimate.py | # code from https://github.com/Oldpan/Pytorch-Memory-Utils
import torch.nn as nn
import numpy as np
def modelsize(model, input, type_size=4):
para = sum([np.prod(list(p.size())) for p in model.parameters()])
# print('Model {} : Number of params: {}'.format(model._get_name(), para))
print('Model {} : params: {:4f}M'.format(model._get_name(), para * type_size / 1000 / 1000))
input_ = input.clone()
input_.requires_grad_(requires_grad=False)
mods = list(model.modules())
out_sizes = []
for i in range(1, len(mods)):
m = mods[i]
if isinstance(m, nn.ReLU):
if m.inplace:
continue
out = m(input_)
out_sizes.append(np.array(out.size()))
input_ = out
total_nums = 0
for i in range(len(out_sizes)):
s = out_sizes[i]
nums = np.prod(np.array(s))
total_nums += nums
# print('Model {} : Number of intermedite variables without backward: {}'.format(model._get_name(), total_nums))
# print('Model {} : Number of intermedite variables with backward: {}'.format(model._get_name(), total_nums*2))
print('Model {} : intermedite variables: {:3f} M (without backward)'
.format(model._get_name(), total_nums * type_size / 1000 / 1000))
print('Model {} : intermedite variables: {:3f} M (with backward)'
.format(model._get_name(), total_nums * type_size*2 / 1000 / 1000))
| 1,428 | 34.725 | 116 | py |
TiKick | TiKick-main/scripts/football/replay2video.py | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script allowing to replay a given trace file.
Example usage:
python replay.py --trace_file=/tmp/dumps/shutdown_20190521-165136974075.dump
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tmarl.envs.football.env import script_helpers
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('replay_file', None, 'replay file path')
flags.DEFINE_string('video_save_dir', '../../results/videos', 'video save dir')
flags.DEFINE_integer('fps', 10, 'How many frames per second to render')
flags.mark_flag_as_required('replay_file')
def main(_):
script_helpers.ScriptHelpers().replay(FLAGS.replay_file, FLAGS.fps,directory=FLAGS.video_save_dir)
if __name__ == '__main__':
app.run(main) | 1,389 | 31.325581 | 102 | py |
criterion.rs | criterion.rs-master/benches/benchmarks/external_process.py | import time
import sys
def fibonacci(n):
if n == 0 or n == 1:
return 1
return fibonacci(n - 1) + fibonacci(n - 2)
MILLIS = 1000
MICROS = MILLIS * 1000
NANOS = MICROS * 1000
def benchmark():
depth = int(sys.argv[1])
for line in sys.stdin:
iters = int(line.strip())
# Setup
start = time.perf_counter()
for x in range(iters):
fibonacci(depth)
end = time.perf_counter()
# Teardown
delta = end - start
nanos = int(delta * NANOS)
print("%d" % nanos)
sys.stdout.flush()
benchmark()
| 603 | 15.324324 | 46 | py |
RobDanns | RobDanns-main/deep_learning/setup.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Setup pycls."""
from setuptools import setup
setup(
name='pycls',
packages=['pycls']
)
| 443 | 22.368421 | 107 | py |
RobDanns | RobDanns-main/deep_learning/yaml_gen.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Generate yaml files for experiment configurations."""
import yaml
# import math
import os
import re
import argparse
import numpy as np
import shutil
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--task',
dest='task',
help='Generate configs for the given tasks: e.g., mlp_cifar, cnn_cifar, cnn_imagenet, resnet18_tinyimagenet, resenet18_imagenet',
default='mlp_cifar10',
type=str
)
return parser.parse_args()
def makedirs_rm_exist(dir):
if os.path.isdir(dir):
shutil.rmtree(dir)
os.makedirs(dir, exist_ok=True)
def purge(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
os.remove(os.path.join(dir, f))
def gen(dir_in, dir_out, fname_base, vars_label, vars_alias, vars_value):
'''Generate yaml files'''
with open(dir_in + fname_base + '.yaml') as f:
data_base = yaml.unsafe_load(f)
for vars in vars_value:
data = data_base.copy()
fname_new = fname_base
for id, var in enumerate(vars):
if vars_label[id][0] in data: # if key1 exist
data[vars_label[id][0]][vars_label[id][1]] = var
else:
data[vars_label[id][0]] = {vars_label[id][1]: var}
if vars_label[id][1] == 'TRANS_FUN':
var = var.split('_')[0]
fname_new += '_{}{}'.format(vars_alias[id], var)
with open(dir_out + fname_new + '.yaml', "w") as f:
yaml.dump(data, f, default_flow_style=False)
def gen_single(dir_in, dir_out, fname_base, vars_label, vars_alias, vars, comment='best'):
'''Generate yaml files for a single experiment'''
with open(dir_in + fname_base + '.yaml') as f:
data_base = yaml.unsafe_load(f)
data = data_base.copy()
fname_new = '{}_{}'.format(fname_base, comment)
for id, var in enumerate(vars):
if vars_label[id][0] in data: # if key1 exist
data[vars_label[id][0]][vars_label[id][1]] = var
else:
data[vars_label[id][0]] = {vars_label[id][1]: var}
with open(dir_out + fname_new + '.yaml', "w") as f:
yaml.dump(data, f, default_flow_style=False)
def grid2list(grid):
'''grid search to list'''
list_in = [[i] for i in grid[0]]
grid.pop(0)
for grid_temp in grid:
list_out = []
for val in grid_temp:
for list_temp in list_in:
list_out.append(list_temp + [val])
list_in = list_out
return list_in
args = parse_args()
# Format for all experiments
# Note: many arguments are deprecated, they are kept to be consistent with existing experimental results
vars_value = []
vars_label = [['RESNET', 'TRANS_FUN'], ['RGRAPH', 'TALK_MODE'], ['RGRAPH', 'GROUP_NUM'],
['RGRAPH', 'MESSAGE_TYPE'], ['RGRAPH', 'SPARSITY'], ['RGRAPH', 'P'], ['RGRAPH', 'AGG_FUNC'],
['RGRAPH', 'SEED_GRAPH'], ['RGRAPH', 'SEED_TRAIN_START'], ['RGRAPH', 'SEED_TRAIN_END'],
['RGRAPH', 'KEEP_GRAPH'],
['RGRAPH', 'ADD_1x1'], ['RGRAPH', 'UPPER'], ['TRAIN', 'AUTO_MATCH'], ['OPTIM', 'MAX_EPOCH'], ['TRAIN', 'CHECKPOINT_PERIOD']]
vars_alias = ['trans', 'talkmode', 'num',
'message', 'sparsity', 'p', 'agg',
'graphseed', 'starttrainseed', 'endtrainseed', 'keep',
'add1x1', 'upper', 'match', 'epoch', 'chkpt'
]
## Note: (1) how many relational graphs used to run: graphs_n64_54, graphs_n64_441, graphs_n64_3854
## (2): "best_id" is to be discovered based on experimental results. Given best_id is for graph2nn experiments
## (3): Each ImageNet experiment provides with 1 seed. One can change SEED_TRAIN_START and SEED_TRAIN_END
## to get results for multiple seeds
### 5 layer 64 dim MLP, CIFAR-10
# usage: python yaml_gen.py --task mlp_cifar10
if args.task == 'mlp_cifar10':
# best_id = 3552 # best_id is for graph2nn experiments.
fname_bases = ['mlp_bs128_1gpu_layer3']
# graphs = np.load('analysis/graphs_n64_53.npy')
# To load the .npy file
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
graphs = np.load('analysis/graphs_n64_53.npy')
# restore np.load for future normal usage
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['talklinear_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 6, True,
0, True, True, 200, 200]]
vars_value += [['linear_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 6, True,
0, True, True, 200, 200]]
### CNN, CIFAR-10
# usage : python yaml_gen.py --task cnn_cifar10
if args.task == 'cnn_cifar10':
# best_id = 3552 # best_id is for graph2nn experiments.
fname_bases = ['cnn6_bs1024_8gpu_64d']
# graphs = np.load('analysis/graphs_n64_53.npy')
# To load the .npy file
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
graphs = np.load('analysis/graphs_n64_53.npy')
# restore np.load for future normal usage
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['convtalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 6, True,
0, True, True, 100, 100]]
vars_value += [['convbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 6, True,
0, True, True, 100, 100]]
### CNN, CIFAR-100
# uage python yaml_gen.py --task cnn_cifar100
elif args.task == 'cnn_cifar100':
# best_id = 3552 # best_id is for graph2nn experiments.
fname_bases = ['cnn6_bs640_1gpu_64d']
# graphs = np.load('analysis/graphs_n64_53.npy')
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
graphs = np.load('analysis/graphs_n64_53.npy')
# restore np.load for future normal usage
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['convtalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 6, True,
0, True, True, 100, 100]]
vars_value += [['convbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 6, True,
0, True, True, 100, 100]]
### Res18, tinyimagenet
# usage: python yaml_gen.py --task resnet18_tinyimagenet
elif args.task == 'resnet18_tinyimagenet':
fname_bases = ['R-18_tiny_bs256_1gpu']
# graphs = np.load('analysis/graphs_n64_53.npy')
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
graphs = np.load('analysis/graphs_n64_53.npy')
# restore np.load for future normal usage
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['groupbasictalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 75, 25]]
vars_value += [['channelbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 75, 25]]
### CNN, imagenet
elif args.task == 'cnn_imagenet':
# best_id = 27 # best_id is for graph2nn experiments.
fname_bases = ['cnn6_bs32_1gpu_64d', 'cnn6_bs256_8gpu_64d']
# graphs = np.load('analysis/graphs_n64_53.npy')
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
graphs = np.load('analysis/graphs_n64_53.npy')
# restore np.load for future normal usage
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['convtalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 100, 100]]
vars_value += [['convbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 100, 100]]
### Res18, ImageNet
# usage : python yaml_gen.py --task resnet18_imagenet
elif args.task == 'resnet18_imagenet':
# best_id = 37 # best_id is for graph2nn experiments.
fname_bases = ['R-18_bs450_1gpu']
# graphs = np.load('analysis/graphs_n64_53.npy')
np_load_old = np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
graphs = np.load('analysis/graphs_n64_53.npy')
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['groupbasictalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 75, 25]]
vars_value += [['channelbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 75, 25]]
### Res34, ImageNet
# usage: python yaml_gen.py --task resnet34_imagenet
elif args.task == 'resnet34_imagenet':
# best_id = 37 # best_id is for graph2nn experiments.
fname_bases = ['R-34_bs32_1gpu', 'R-34_bs256_8gpu']
# graphs = np.load('analysis/graphs_n64_52.npy')
np_load_old = np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
graphs = np.load('analysis/graphs_n64_53.npy')
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['groupbasictalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 100, 25]]
vars_value += [['channelbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 100, 25]]
### Res34-sep, ImageNet
elif args.task == 'resnet34sep_imagenet':
# best_id = 36 # best_id is for graph2nn experiments.
fname_bases = ['R-34_bs32_1gpu', 'R-34_bs256_8gpu']
graphs = np.load('analysis/graphs_n64_53.npy')
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['groupseptalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 100, 25]]
vars_value += [['channelsep_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 100, 25]]
### Res50, ImageNet
elif args.task == 'resnet50_imagenet':
# best_id = 22 # best_id is for graph2nn experiments.
fname_bases = ['R-50_bs32_1gpu', 'R-50_bs256_8gpu']
graphs = np.load('analysis/graphs_n64_53.npy')
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['talkbottleneck_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 100, 25]]
vars_value += [['bottleneck_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 100, 25]]
### Efficient net, ImageNet
# uage : python yaml_gen.py --task efficient_imagenet
elif args.task == 'efficient_imagenet':
# best_id = 42 # best_id is for graph2nn experiments.
fname_bases = ['EN-B0_bs64_1gpu_nms', 'EN-B0_bs512_8gpu_nms']
# graphs = np.load('analysis/graphs_n64_53.npy'))
np_load_old = np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
graphs = np.load('analysis/graphs_n64_53.npy')
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['mbtalkconv_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 100, 25]]
vars_value += [['mbconv_transform', 'dense', 16,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 100, 25]]
# ### MLP, cifar10, bio
# elif args.task == 'mlp_cifar10_bio':
# fname_bases = ['mlp_bs128_1gpu_layer3']
# for graph_type in ['mcwholeraw']:
# vars_value += [['talklinear_transform', 'dense', 71,
# graph_type, 1.0, 0.0, 'sum',
# 1, 1, 6, True,
# 0, True, True, 200]]
# for graph_type in ['mcvisualraw']:
# vars_value += [['talklinear_transform', 'dense', 30,
# graph_type, 1.0, 0.0, 'sum',
# 1, 1, 6, True,
# 0, True, True, 200]]
# for graph_type in ['catraw']:
# vars_value += [['talklinear_transform', 'dense', 52,
# graph_type, 1.0, 0.0, 'sum',
# 1, 1, 6, True,
# 0, True, True, 200]]
# vars_value += [['linear_transform', 'dense', 64,
# 'ws', 1.0, 0.0, 'sum',
# 1, 1, 6, True,
# 0, True, True, 200]]
if args.task == 'cifar0':
dir_name = 'cifar10'
elif 'cifar100' in args.task:
dir_name = 'cifar100'
elif 'tinyimagenet' in args.task:
dir_name = 'tinyimagenet200'
else:
dir_name = 'imagenet'
dir_in = 'configs/baselines/{}/'.format(dir_name)
dir_out = 'configs/baselines/{}/{}/'.format(dir_name, args.task)
dir_out_all = 'configs/baselines/{}/{}/all/'.format(dir_name, args.task)
dir_out_best = 'configs/baselines/{}/{}/best/'.format(dir_name, args.task)
# makedirs_rm_exist(dir_out)
# makedirs_rm_exist(dir_out_all)
# makedirs_rm_exist(dir_out_best)
# print(vars_value)
for fname_base in fname_bases:
if 'bio' not in args.task:
gen(dir_in, dir_out_all, fname_base, vars_label, vars_alias, vars_value)
# gen_single(dir_in, dir_out_best, fname_base, vars_label, vars_alias, vars_value[best_id], comment='best')
gen_single(dir_in, dir_out_best, fname_base, vars_label, vars_alias, vars_value[-1], comment='baseline')
else:
gen(dir_in, dir_out_best, fname_base, vars_label, vars_alias, vars_value)
| 16,638 | 38.058685 | 138 | py |
RobDanns | RobDanns-main/deep_learning/tools/corruptions-inference-tinyimagenet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
from torchvision.utils import save_image
from skimage.util import random_noise
print("Let's use GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST(VAL) DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
#if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
# return eval_stats
def save_noisy_image(img, name):
if img.size(2) == 32:
img = img.view(img.size(0), 3, 32, 32)
save_image(img, name)
if img.size(2) == 64:
img = img.view(img.size(0), 3, 64, 64)
save_image(img, name)
else:
img = img.view(img.size(0), 3, 224, 224)
save_image(img, name)
## Functions to save noisy images.
# def gaussian_noise(test_loader):
# print("Adding gaussian_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# gaussian_img_05 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.05, clip=True))
# gaussian_img_2 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.2, clip=True))
# gaussian_img_4 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.4, clip=True))
# gaussian_img_6 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.6, clip=True))
# save_noisy_image(gaussian_img_05, r"noisy-images/gaussian_05.png")
# save_noisy_image(gaussian_img_2, r"noisy-images/gaussian_2.png")
# save_noisy_image(gaussian_img_4, r"noisy-images/gaussian_4.png")
# save_noisy_image(gaussian_img_6, r"noisy-images/gaussian_6.png")
# break
# def salt_pepper_noise(test_loader):
# print("Adding salt_pepper_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# s_vs_p_5 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.5, clip=True))
# s_vs_p_6 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.6, clip=True))
# s_vs_p_7 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.7, clip=True))
# save_noisy_image(s_vs_p_5, r"noisy-images/s&p_5.png")
# break
# def speckle_noise(test_loader):
# print("Adding speckle_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# speckle_img_05 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.05, clip=True))
# speckle_img_2 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.2, clip=True))
# speckle_img_4 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.4, clip=True))
# speckle_img_6 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.6, clip=True))
# save_noisy_image(speckle_img_05, r"noisy-images/speckle_05.png")
# save_noisy_image(speckle_img_2, r"noisy-images/speckle_2.png")
# save_noisy_image(speckle_img_4, r"noisy-images/speckle_4.png")
# save_noisy_image(speckle_img_6, r"noisy-images/speckle_6.png")
# break
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
data_path = dp.get_data_path(cfg.TRAIN.DATASET) # Retrieve the data path for the dataset
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
test_meter = TestMeter(len(test_loader))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
noise_mode = ['gaussian', 'speckle', 's&p']
noise_std = [0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6] # change the variance values as desired.
model.eval()
accuracies_gaussian = []
accuracies_saltpepper = []
accuracies_speckle = []
for mode in noise_mode:
for level in noise_std:
print("Adding noise={} at level={} to images".format(mode, level))
ctr = 0
correct = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader):
if not 's&p' in mode:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, mean=0, var=level, clip=True))
else:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, salt_vs_pepper=0.5, clip=True))
noisy_img, labels = noisy_img.cuda(), labels.cuda(non_blocking=True)
outputs = model(noisy_img.float())
_, predicted = torch.max(outputs.data, 1)
ctr += 1
total += labels.size(0)
correct += (predicted == labels).sum()
if total > X: # replace X with the number of images to be generated for adversarial attacks.
break
acc = 100 * float(correct) / total
print("acc =", round(acc, 2), "correct =", float(correct), "total =", total)
if 'gaussian' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_gaussian.append(round(acc, 2))
print("Guassian Accuracies after append :", accuracies_gaussian)
elif 'speckle' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_speckle.append(round(acc, 2))
print("Speckle Accuracies after append :", accuracies_speckle)
elif 's&p' in mode:
print('Robust Accuracy = {:.3f} for S&P noise'.format(acc))
accuracies_saltpepper.append(round(acc, 2))
print("Salt&Pepper Accuracies after append :", accuracies_saltpepper)
break
else:
print("noise mode not supported")
# gaussian_noise(test_loader)
# salt_pepper_noise(test_loader)
# speckle_noise(test_loader)
# Change the number of variable as desired number of outputs.
gaus_001, gaus_01, gaus_05, gaus_1, gaus_2, gaus_3, gaus_4, gaus_5, gaus_6 = (items for items in accuracies_gaussian)
speck_001, speck_01, speck_05, speck_1, speck_2, speck_3, speck_4, speck_5, speck_6 = (items for items in accuracies_speckle)
saltpepper = accuracies_saltpepper[0]
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_gaussian = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(gaus_001), str(gaus_01), str(gaus_05), str(gaus_1), str(gaus_2), str(gaus_3), str(gaus_4), str(gaus_5), str(gaus_6)])
result_speck = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(speck_001), str(speck_01), str(speck_05), str(speck_1), str(speck_2), str(speck_3), str(speck_4), str(speck_5), str(speck_6)])
result_sp = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(saltpepper)])
with open("{}/gaus_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Gaussian:{} ".format(accuracies_gaussian))
text_file.write(result_gaussian + '\n')
with open("{}/saltpepper_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Salt & Pepper:{} ".format(accuracies_saltpepper))
text_file.write(result_sp + '\n')
with open("{}/speckle_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Speckle:{} ".format(accuracies_speckle))
text_file.write(result_speck + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Inference seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 25,928 | 41.092532 | 139 | py |
RobDanns | RobDanns-main/deep_learning/tools/train_resnet18_on_tinyimagenet200.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
logger = lu.get_logger(__name__)
print("Let's use GPU :", torch.cuda.current_device())
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST/VAL DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
#if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
# logger.info('Number of images: {}'.format(len(self.imgs)))
# logger.info('Number of classes: {}'.format(len(self.classnames)))
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
def train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch, writer_train=None, params=0, flops=0, is_master=False):
"""Performs one epoch of training."""
# Shuffle the data
loader.shuffle(train_loader, cur_epoch)
# Update the learning rate
lr = optim.get_epoch_lr(cur_epoch)
optim.set_lr(optimizer, lr)
# Enable training mode
model.train()
train_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(train_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parameters
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.scaled_all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point)
loss, top1_err, top5_err = loss.item(), top1_err.item(), top5_err.item()
train_meter.iter_toc()
# Update and log stats
train_meter.update_stats(
top1_err, top5_err, loss, lr, inputs.size(0) * cfg.NUM_GPUS
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch, writer_train, params, flops, is_master=is_master)
trg_stats = train_meter.get_epoch_stats(cur_epoch)
train_meter.reset()
return trg_stats
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
return eval_stats
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cfg.TRAIN.AUTO_RESUME and cu.has_checkpoint():
last_checkpoint = cu.get_checkpoint_last()
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
# Create data loaders
# Retrieve the data path for the dataset
data_path = dp.get_data_path(cfg.TRAIN.DATASET)
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
train_meter = TrainMeter(len(train_loader))
test_meter = TestMeter(len(test_loader))
# Create meters for fgsm
test_meter_fgsm = TestMeter(len(test_loader_adv))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
# do eval at initialization
initial_eval_stats = eval_epoch(test_loader, model, test_meter, -1,
writer_eval, params, flops, is_master=is_master)
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
last_epoch_eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
else:
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
print('Epoch {} Started'.format(cur_epoch))
# Train for one epoch
trg_stats = train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch,
writer_train, is_master=is_master
)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# Save a checkpoint
if cu.is_checkpoint_epoch(cur_epoch):
checkpoint_file = cu.save_checkpoint(model, optimizer, cur_epoch)
logger.info('Wrote checkpoint to: {}'.format(checkpoint_file))
# Evaluate the model
if is_eval_epoch(cur_epoch):
eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Train the model
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
if cfg.NUM_GPUS > 1:
mpu.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=single_proc_train)
else:
single_proc_train()
else:
print('Seed {} exists, skip!'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 21,617 | 37.741935 | 129 | py |
RobDanns | RobDanns-main/deep_learning/tools/adversarial-inference-tinyimagenet200.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
print("Let's use GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST/VAL DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
# if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
# return eval_stats
class Normalize(torch.nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, input):
# Broadcasting
mean = self.mean.reshape(1,3,1,1)
std = self.std.reshape(1,3,1,1)
norm_img = (input - mean) / std
return norm_img
# Helper class for printing model layers
class PrintLayer(torch.nn.Module):
def __init__(self):
super(PrintLayer, self).__init__()
def forward(self, x):
# Do your print / debug stuff here
print(x)
return x
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# for name, param in model.named_parameters():
# print(name, param.shape)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
data_path = dp.get_data_path(cfg.TRAIN.DATASET) # Retrieve the data path for the dataset
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
# normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# create adversarial dataset
adv_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor()]))
# create adversarial loader
test_loader_adv = torch.utils.data.DataLoader(
adv_dataset,
batch_size=1,
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
test_meter = TestMeter(len(test_loader))
test_meter_adv = TestMeter(len(test_loader_adv))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
# when epsilon=0 --> PGD, epsilon=1 --> CW, otherwise FGSM-->replace eps1, eps2, ... with required epsilon of attack versions
epsilons = [0, eps1, eps2, ... epsN, 1]
# Per-channel mean and SD values in BGR order for TinyImageNet dataset
tinyimagenet_MEAN = [0.485, 0.456, 0.406]
tinyimagenet_SD = [0.229, 0.224, 0.225]
accuracies = []
# add normalization layer to the model
norm_layer = Normalize(mean=tinyimagenet_MEAN, std=tinyimagenet_SD)
net = torch.nn.Sequential(norm_layer, model).cuda()
net = net.eval()
for epsilon in epsilons:
if epsilon == 0:
print("Running PGD Attack")
atk = torchattacks.PGD(net, eps=1/510, alpha=2/225, steps=7) # for relevant dataset, use parameters from torchattacks official notebook
elif epsilon == 1:
print("Running CW Attack")
atk = torchattacks.CW(net, c=0.1, kappa=0, steps=100, lr=0.01) # choose suitable values for c, kappa, steps, and lr.
else:
print("Running FGSM Attacks on epsilon :", epsilon)
atk = torchattacks.FGSM(net, eps=epsilon)
ctr = 0
correct = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader_adv):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
adv_images = atk(inputs, labels)
outputs = net(adv_images)
_, predicted = torch.max(outputs.data, 1)
ctr += 1
total += 1
correct += (predicted == labels).sum()
if ctr > X: # replace X with the number of images to be generated for adversarial attacks.
print(ctr, " images done for epsilon:", epsilon)
break
acc = 100 * float(correct) / total
print("acc =", round(acc, 2), "correct =", float(correct), "total =", total)
accuracies.append(round(acc, 2))
print('Attack Accuracy = {:.3f} with epsilon = {:.4f}'.format(acc, epsilon))
print("accuracies after apend :", accuracies)
# save items inside accuracies list to separate float objects, update the # of variables according to requirement.
accPGD, accFGSM1, accFGSM2, accFGSM3, accFGSM4, accFGSM5, accFGSM6, accFGSM7, accCW = (items for items in accuracies)
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_info = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(accPGD), str(accFGSM1), str(accFGSM2), str(accFGSM3), str(accFGSM4), str(accFGSM5),
str(accFGSM6), str(accFGSM7), str(accCW)])
with open("{}/stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies {} ".format(accuracies))
text_file.write(result_info + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Inference seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 23,184 | 38.768439 | 147 | py |
RobDanns | RobDanns-main/deep_learning/tools/adversarial-inference.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
import argparse
import pickle
import numpy as np
import os
import sys
import torch
import math
import torchvision
import torchattacks
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.transforms as transforms
from datetime import datetime
from tensorboardX import SummaryWriter
import foolbox as fb
import art
import art.attacks.evasion as evasion
from art.estimators.classification import PyTorchClassifier
print("Using GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
# val_input_imgs,
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
class Normalize(torch.nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, input):
# Broadcasting
mean = self.mean.reshape(1,3,1,1)
std = self.std.reshape(1,3,1,1)
norm_img = (input - mean) / std
return norm_img
# Helper class for printing model layers
class PrintLayer(torch.nn.Module):
def __init__(self):
super(PrintLayer, self).__init__()
def forward(self, x):
# Do your print / debug stuff here
print(x)
return x
def train_model(writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
if cfg.MODEL.DEPTH == 20:
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.DEPTH == 26:
stats_baseline = 56140000 # ResNet26
elif cfg.MODEL.DEPTH == 34:
stats_baseline = 71480000 # ResNet34
elif cfg.MODEL.DEPTH == 38:
stats_baseline = 86819000 # ResNet38
elif cfg.MODEL.DEPTH == 50:
stats_baseline = 130000000 # ResNet50
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
elif cfg.TRAIN.DATASET == 'imagenet':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
if cfg.IS_INFERENCE and cfg.IS_DDP:
model = torch.nn.parallel.DataParallel(model)
# for name, param in model.named_parameters():
# print(name, param.shape)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
test_loader = loader.construct_test_loader()
test_loader_adv = loader.construct_test_loader_adv()
# Create meters
test_meter = TestMeter(len(test_loader))
test_meter_adv = TestMeter(len(test_loader_adv))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
# when epsilon=0, 1 --> PGD, epsilon=2, 3 --> CW, otherwise FGSM-->replace eps1, eps2, ... with required epsilon of attack versions
epsilons = [0, 1, eps1, eps2, ... epsN, 2, 3]
# Per-channel mean and SD values in BGR order for ImageNet dataset
cifar10_MEAN = [0.491, 0.482, 0.4465]
cifar10_SD = [0.247, 0.243, 0.262]
cifar100_MEAN = [0.507, 0.487, 0.441]
cifar100_SD = [0.267, 0.256, 0.276]
imagenet_MEAN = [0.406, 0.456, 0.485]
imagenet_SD = [0.225, 0.224, 0.229]
accuracies = []
# replace the MEAN and SD variable in the following line for the relevant dataset.
norm_layer = Normalize(mean=cifar10_MEAN, std=cifar10_SD)
net = torch.nn.Sequential(norm_layer, model).cuda()
# net = torch.nn.Sequential(norm_layer, PrintLayer(), model).cuda()
net = net.eval()
print("Adversarial Loader Batch Size =", test_loader_adv.batch_size)
for epsilon in epsilons:
if epsilon == 0:
print("Running PGD Attack")
atk_ta = torchattacks.PGD(net, eps=6/255, alpha=2/255, steps=7) # for relevant dataset, use parameters from torchattacks official notebook
elif epsilon == 1:
print("Running PGD Attack")
atk_ta = torchattacks.PGD(net, eps=9/255, alpha=2/255, steps=7) # for relevant dataset, use parameters from torchattacks official notebook
elif epsilon == 2:
print("Running Torchattacks.CW")
atk_ta = torchattacks.CW(net, c=0.15, kappa=0, steps=100, lr=0.01) # replace the values of c and steps according to hyperparameters reported in the paper.
elif epsilon == 3:
print("Running Torchattacks.CW")
atk_ta = torchattacks.CW(net, c=0.25, kappa=0, steps=100, lr=0.01) # replace the values of c and steps according to hyperparameters reported in the paper.
# For Foolbox or ART attacks, uncomment the following lines.
# print("-> FoolBox.CW")
# fmodel = fb.PyTorchModel(net, bounds=(0, 1))
# atk_fb = fb.attacks.L2CarliniWagnerAttack(binary_search_steps=1, initial_const=0.05,
# confidence=0, steps=100, stepsize=0.01)
# print("-> Adversarial Robustness Toolbox.CW")
# classifier = PyTorchClassifier(model=net, clip_values=(0, 1),
# loss=loss_fun,
# optimizer=optimizer,
# input_shape=(3, 32, 32), nb_classes=10)
# atk_art = evasion.CarliniL2Method(batch_size=1, classifier=classifier,
# binary_search_steps=1, initial_const=0.05,
# confidence=0, max_iter=100,
# learning_rate=0.01)
else:
print("Running FGSM Attacks on epsilon :", epsilon)
atk_ta = torchattacks.FGSM(net, eps=epsilon)
ctr = 0
correct_ta = 0
# correct_fb = 0
# correct_art = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader_adv):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
inputs = inputs.float().div(255)
adv_images_ta = atk_ta(inputs, labels)
# _, adv_images_fb, _ = atk_fb(fmodel, inputs, labels, epsilons=1)
# adv_images_art = torch.tensor(atk_art.generate(inputsnp, labelsnp)).cuda()
adv_inputs_ta = adv_images_ta.float()
# adv_inputs_fb = adv_images_fb.float()
# adv_inputs_art = adv_images_art.float()
outputs_ta = net(adv_inputs_ta)
# outputs_fb = net(adv_inputs_fb)
# outputs_art = net(adv_inputs_art)
_, predicted_ta = torch.max(outputs_ta.data, 1)
# _, predicted_fb = torch.max(outputs_fb.data, 1)
# _, predicted_art = torch.max(outputs_art.data, 1)
ctr += 1
total += 1
correct_ta += (predicted_ta == labels).sum()
# correct_fb += (predicted_fb == labels).sum()
# correct_art += (predicted_art == labels).sum()
if ctr > X: # replace X with the number of images to be generated for adversarial attacks.
print(ctr, " images done for epsilon:", epsilon)
break
acc_ta = 100 * float(correct_ta) / total
# acc_fb = 100 * float(correct_fb) / total
# acc_art = 100 * float(correct_art) / total
print("ta acc =", round(acc_ta, 2), ", ta correct =", float(correct_ta), ", total =", total)
# print("fb acc =", round(acc_fb, 2), ", fb correct =", float(correct_fb), ", total =", total)
# print("art acc =", round(acc_art, 2), ", art correct =", float(correct_art), ", total =", total)
accuracies.append(round(acc_ta, 2))
print('Attack Accuracy = {:.3f} with epsilon = {:.2f}'.format(acc_ta, epsilon))
print("accuracies after apend :", accuracies)
# save items inside accuracies list to separate float objects, update the # of variables according to requirement.
accPGD_6by255, accPGD_9by255, accFGSM1, accFGSM2, accFGSM3, accFGSM4, accFGSM5, accCW_15, accCW_25 = (items for items in accuracies)
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_info = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(accPGD_6by255), str(accPGD_9by255), str(accFGSM1), str(accFGSM2), str(accFGSM3), str(accFGSM4), str(accFGSM5),
str(accCW_15), str(accCW_25)])
#
with open("{}/stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies {} ".format(accuracies))
text_file.write(result_info + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
# If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_eval, is_master=du.is_master_proc())
if writer_eval is not None:
# writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Trained seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 23,798 | 41.72711 | 166 | py |
RobDanns | RobDanns-main/deep_learning/tools/corruptions-inference.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
import argparse
import pickle
import numpy as np
import os
import sys
import torch
import math
import torchvision
import torchattacks
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.transforms as transforms
from datetime import datetime
from tensorboardX import SummaryWriter
from torchvision.utils import save_image
from skimage.util import random_noise
print("Using GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
# val_input_imgs,
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
def save_noisy_image(img, name):
if img.size(2) == 32:
img = img.view(img.size(0), 3, 32, 32)
save_image(img, name)
else:
img = img.view(img.size(0), 3, 224, 224)
save_image(img, name)
## Functions to save noisy images.
# def gaussian_noise(test_loader):
# print("Adding gaussian_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# gaussian_img_05 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.05, clip=True))
# gaussian_img_2 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.2, clip=True))
# gaussian_img_4 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.4, clip=True))
# gaussian_img_6 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.6, clip=True))
# save_noisy_image(gaussian_img_05, r"noisy-images/gaussian_05.png")
# save_noisy_image(gaussian_img_2, r"noisy-images/gaussian_2.png")
# save_noisy_image(gaussian_img_4, r"noisy-images/gaussian_4.png")
# save_noisy_image(gaussian_img_6, r"noisy-images/gaussian_6.png")
# break
# def salt_pepper_noise(test_loader):
# print("Adding salt_pepper_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# s_vs_p_5 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.5, clip=True))
# s_vs_p_6 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.6, clip=True))
# s_vs_p_7 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.7, clip=True))
# save_noisy_image(s_vs_p_5, r"noisy-images/s&p_5.png")
# save_noisy_image(s_vs_p_6, r"noisy-images/s&p_6.png")
# save_noisy_image(s_vs_p_7, r"noisy-images/s&p_7.png")
# break
# def speckle_noise(test_loader):
# print("Adding speckle_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# speckle_img_05 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.05, clip=True))
# speckle_img_2 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.2, clip=True))
# speckle_img_4 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.4, clip=True))
# speckle_img_6 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.6, clip=True))
# save_noisy_image(speckle_img_05, r"noisy-images/speckle_05.png")
# save_noisy_image(speckle_img_2, r"noisy-images/speckle_2.png")
# save_noisy_image(speckle_img_4, r"noisy-images/speckle_4.png")
# save_noisy_image(speckle_img_6, r"noisy-images/speckle_6.png")
# break
def train_model(writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
if cfg.MODEL.DEPTH == 20:
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.DEPTH == 26:
stats_baseline = 56140000 # ResNet26
elif cfg.MODEL.DEPTH == 34:
stats_baseline = 71480000 # ResNet34
elif cfg.MODEL.DEPTH == 38:
stats_baseline = 86819000 # ResNet38
elif cfg.MODEL.DEPTH == 50:
stats_baseline = 130000000 # ResNet50
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
elif cfg.TRAIN.DATASET == 'imagenet':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
if cfg.IS_INFERENCE and cfg.IS_DDP:
model = torch.nn.parallel.DataParallel(model)
# for name, param in model.named_parameters():
# print(name, param.shape)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
test_loader = loader.construct_test_loader()
# Create meters
test_meter = TestMeter(len(test_loader))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
noise_mode = ['gaussian', 'speckle', 's&p']
noise_var = [0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6] # change the variance values as desired.
model.eval()
accuracies_gaussian = []
accuracies_saltpepper = []
accuracies_speckle = []
for mode in noise_mode:
for level in noise_var:
print("Adding noise={} at level={} to images".format(mode, level))
ctr = 0
correct = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader):
if not 's&p' in mode:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, mean=0, var=level, clip=True))
else:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, salt_vs_pepper=0.5, clip=True))
noisy_img, labels = noisy_img.cuda(), labels.cuda(non_blocking=True)
outputs = model(noisy_img.float())
_, predicted = torch.max(outputs.data, 1)
ctr += 1
total += labels.size(0)
correct += (predicted == labels).sum()
if total > X: # replace X with the number of images to be generated for adversarial attacks.
break
acc = 100 * float(correct) / total
print("acc =", round(acc, 2), "correct =", float(correct), "total =", total)
if 'gaussian' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_gaussian.append(round(acc, 2))
print("Guassian Accuracies after append :", accuracies_gaussian)
elif 'speckle' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_speckle.append(round(acc, 2))
print("Speckle Accuracies after append :", accuracies_speckle)
elif 's&p' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_saltpepper.append(round(acc, 2))
print("Salt&Pepper Accuracies after append :", accuracies_saltpepper)
break
else:
print("noise mode not supported")
# gaussian_noise(test_loader)
# salt_pepper_noise(test_loader)
# speckle_noise(test_loader)
# Change the number of variable as desired number of outputs.
gaus_001, gaus_01, gaus_05, gaus_1, gaus_2, gaus_3, gaus_4, gaus_5, gaus_6 = (items for items in accuracies_gaussian)
speck_001, speck_01, speck_05, speck_1, speck_2, speck_3, speck_4, speck_5, speck_6 = (items for items in accuracies_speckle)
saltpepper = accuracies_saltpepper[0]
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_gaussian = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(gaus_001), str(gaus_01), str(gaus_05), str(gaus_1), str(gaus_2), str(gaus_3), str(gaus_4), str(gaus_5), str(gaus_6)])
result_speck = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(speck_001), str(speck_01), str(speck_05), str(speck_1), str(speck_2), str(speck_3), str(speck_4), str(speck_5), str(speck_6)])
result_sp = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(saltpepper)])
with open("{}/gaus_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Gaussian:{} ".format(accuracies_gaussian))
text_file.write(result_gaussian + '\n')
with open("{}/saltpepper_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Salt & Pepper:{} ".format(accuracies_saltpepper))
text_file.write(result_sp + '\n')
with open("{}/speckle_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Speckle:{} ".format(accuracies_speckle))
text_file.write(result_speck + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
# If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_eval, is_master=du.is_master_proc())
if writer_eval is not None:
# writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Inference seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 23,864 | 42.708791 | 139 | py |
RobDanns | RobDanns-main/deep_learning/tools/train_net.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
import argparse
import pickle
import numpy as np
import os
import sys
import torch
import math
# import torchvision
# import time
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
from datetime import datetime
from tensorboardX import SummaryWriter
# import wandb
logger = lu.get_logger(__name__)
print("Let's use GPU :", torch.cuda.current_device())
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
def train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch, writer_train=None, params=0, flops=0,
is_master=False):
"""Performs one epoch of training."""
# Shuffle the data
loader.shuffle(train_loader, cur_epoch)
# Update the learning rate
lr = optim.get_epoch_lr(cur_epoch)
optim.set_lr(optimizer, lr)
# Enable training mode
model.train()
train_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(train_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parameters
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.scaled_all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point)
loss, top1_err, top5_err = loss.item(), top1_err.item(), top5_err.item()
train_meter.iter_toc()
# Update and log stats
train_meter.update_stats(
top1_err, top5_err, loss, lr, inputs.size(0) * cfg.NUM_GPUS
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch, writer_train, params, flops, is_master=is_master)
trg_stats = train_meter.get_epoch_stats(cur_epoch)
train_meter.reset()
return trg_stats
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
return stats
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
if cfg.MODEL.DEPTH == 20:
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.DEPTH == 26:
stats_baseline = 56140000 # ResNet26
elif cfg.MODEL.DEPTH == 34:
stats_baseline = 71480000 # ResNet34
elif cfg.MODEL.DEPTH == 38:
stats_baseline = 86819000 # ResNet38
elif cfg.MODEL.DEPTH == 50:
stats_baseline = 130000000 # ResNet50
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'imagenet':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# wandb.watch(model)
# Load a checkpoint if applicable
start_epoch = 0
if cfg.TRAIN.AUTO_RESUME and cu.has_checkpoint():
last_checkpoint = cu.get_checkpoint_last1()
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
# Create data loaders
train_loader = loader.construct_train_loader()
test_loader = loader.construct_test_loader()
# Create meters
train_meter = TrainMeter(len(train_loader))
test_meter = TestMeter(len(test_loader))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
# do eval at initialization
initial_eval_stats = eval_epoch(test_loader, model, test_meter, -1,
writer_eval, params, flops, is_master=is_master)
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
last_epoch_eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
else:
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
print('Epoch {} Started'.format(cur_epoch))
# Train for one epoch
trg_stats = train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch,
writer_train, is_master=is_master
)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# Save a checkpoint
if cu.is_checkpoint_epoch(cur_epoch):
checkpoint_file = cu.save_checkpoint(model, optimizer, cur_epoch)
logger.info('Wrote checkpoint to: {}'.format(checkpoint_file))
# Evaluate the model
if is_eval_epoch(cur_epoch):
eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
# wandb.log({'Epoch': cur_epoch, 'Train top1_err': trg_stats['top1_err'], 'Test top1_err': eval_stats['top1_err']})
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Train the model
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# wandb.init(project = 'Rob_G2NN', entity='rowanai-graph-robustness')
# Parse cmd line args
args = parse_args()
# wandb.config.update(args)
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
if cfg.NUM_GPUS > 1:
mpu.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=single_proc_train)
else:
single_proc_train()
else:
print('Seed {} exists, skip!'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 18,692 | 39.113734 | 127 | py |
RobDanns | RobDanns-main/deep_learning/pycls/config.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Configuration file."""
import os
from yacs.config import CfgNode as CN
# Global config object
_C = CN()
# Example usage:
# from core.config import cfg
cfg = _C
# ---------------------------------------------------------------------------- #
# Model options
# ---------------------------------------------------------------------------- #
_C.MODEL = CN()
# Model type to use
_C.MODEL.TYPE = ''
# Number of weight layers
_C.MODEL.DEPTH = 0
# Number of classes
_C.MODEL.NUM_CLASSES = 10
# Loss function (see pycls/models/loss.py for options)
_C.MODEL.LOSS_FUN = 'cross_entropy'
# Num layers, excluding the stem and head layers. Total layers used should +2
_C.MODEL.LAYERS = 3
# ---------------------------------------------------------------------------- #
# ResNet options
# ---------------------------------------------------------------------------- #
_C.RESNET = CN()
# Transformation function (see pycls/models/resnet.py for options)
_C.RESNET.TRANS_FUN = 'basic_transform'
# Number of groups to use (1 -> ResNet; > 1 -> ResNeXt)
_C.RESNET.NUM_GROUPS = 1
# Width of each group (64 -> ResNet; 4 -> ResNeXt)
_C.RESNET.WIDTH_PER_GROUP = 64
# Apply stride to 1x1 conv (True -> MSRA; False -> fb.torch)
_C.RESNET.STRIDE_1X1 = False
# Whether append 1x1 resblock
_C.RESNET.APPEND1x1 = 0
# For group conv only
_C.RESNET.GROUP_SIZE = 2
# ---------------------------------------------------------------------------- #
# EfficientNet options
# ---------------------------------------------------------------------------- #
_C.EFFICIENT_NET = CN()
# Stem width
_C.EFFICIENT_NET.STEM_W = 32
# Depth for each stage (number of blocks in the stage)
_C.EFFICIENT_NET.DEPTHS = []
# Width for each stage (width of each block in the stage)
_C.EFFICIENT_NET.WIDTHS = []
# Expansion ratios for MBConv blocks in each stage
_C.EFFICIENT_NET.EXP_RATIOS = []
# Squeeze-and-Excitation (SE) operation
_C.EFFICIENT_NET.SE_ENABLED = True
# Squeeze-and-Excitation (SE) ratio
_C.EFFICIENT_NET.SE_RATIO = 0.25
# Linear projection
_C.EFFICIENT_NET.LIN_PROJ = True
# Strides for each stage (applies to the first block of each stage)
_C.EFFICIENT_NET.STRIDES = []
# Kernel sizes for each stage
_C.EFFICIENT_NET.KERNELS = []
# Head type ('conv_head' or 'simple_head')
_C.EFFICIENT_NET.HEAD_TYPE = 'conv_head'
# Head width (applies to 'conv_head')
_C.EFFICIENT_NET.HEAD_W = 1280
# Ativation function
_C.EFFICIENT_NET.ACT_FUN = 'swish'
# Drop connect ratio
_C.EFFICIENT_NET.DC_RATIO = 0.0
# Drop connect implementation
_C.EFFICIENT_NET.DC_IMP = 'tf'
# Dropout ratio
_C.EFFICIENT_NET.DROPOUT_RATIO = 0.0
# ---------------------------------------------------------------------------- #
# Relational graph options
# ---------------------------------------------------------------------------- #
_C.RGRAPH = CN()
# dim for first layer. NOTE: this is fixed when matching FLOPs
_C.RGRAPH.DIM_FIRST = 16
# dim for each stage
_C.RGRAPH.DIM_LIST = []
# wide stem module
_C.RGRAPH.STEM_MODE = 'default'
# How to message exchange: dense, hier (deprecated)
_C.RGRAPH.TALK_MODE = 'dense'
# Num of nodes
_C.RGRAPH.GROUP_NUM = 32
# Size of nodes in Stage 1
_C.RGRAPH.GROUP_SIZE = 1
# The type of message passing used
_C.RGRAPH.MESSAGE_TYPE = 'ws'
# Whether use directed graph
_C.RGRAPH.DIRECTED = False
# Graph sparsity
_C.RGRAPH.SPARSITY = 0.5
# Graph Randomness
_C.RGRAPH.P = 0.0
# Graph seed
_C.RGRAPH.SEED_GRAPH = 1
# training seed used
_C.RGRAPH.SEED_TRAIN = 1
# training seed, start, end
_C.RGRAPH.SEED_TRAIN_START = 1
_C.RGRAPH.SEED_TRAIN_END = 2
# Keep graph across the network
_C.RGRAPH.KEEP_GRAPH = True
# Append additaion 1x1 layers for additional talks
_C.RGRAPH.ADD_1x1 = 0
# Match upper computational bound
_C.RGRAPH.UPPER = True
# Auto match computational budget
_C.RGRAPH.AUTO_MATCH = True
# AGG func. Only sum is supported in current mask-based implementation
_C.RGRAPH.AGG_FUNC = 'sum'
# Save weight matrices as graphs. Warning: the saved matrices can be huge
_C.RGRAPH.SAVE_GRAPH = False
# ---------------------------------------------------------------------------- #
# Batch norm options
# ---------------------------------------------------------------------------- #
_C.BN = CN()
# BN epsilon
_C.BN.EPS = 1e-5
# BN momentum (BN momentum in PyTorch = 1 - BN momentum in Caffe2)
_C.BN.MOM = 0.1
# Precise BN stats
_C.BN.USE_PRECISE_STATS = True
_C.BN.NUM_SAMPLES_PRECISE = 1024
# Initialize the gamma of the final BN of each block to zero
_C.BN.ZERO_INIT_FINAL_GAMMA = False
# ---------------------------------------------------------------------------- #
# Optimizer options
# ---------------------------------------------------------------------------- #
_C.OPTIM = CN()
# Base learning rate
_C.OPTIM.BASE_LR = 0.1
# Learning rate policy select from {'cos', 'exp', 'steps'}
_C.OPTIM.LR_POLICY = 'cos'
# Exponential decay factor
_C.OPTIM.GAMMA = 0.1
# Step size for 'exp' and 'cos' policies (in epochs)
_C.OPTIM.STEP_SIZE = 1
# Steps for 'steps' policy (in epochs)
_C.OPTIM.STEPS = []
# Learning rate multiplier for 'steps' policy
_C.OPTIM.LR_MULT = 0.1
# Maximal number of epochs
_C.OPTIM.MAX_EPOCH = 200
# Momentum
_C.OPTIM.MOMENTUM = 0.9
# Momentum dampening
_C.OPTIM.DAMPENING = 0.0
# Nesterov momentum
_C.OPTIM.NESTEROV = True
# L2 regularization
_C.OPTIM.WEIGHT_DECAY = 5e-4
# Start the warm up from OPTIM.BASE_LR * OPTIM.WARMUP_FACTOR
_C.OPTIM.WARMUP_FACTOR = 0.1
# Gradually warm up the OPTIM.BASE_LR over this number of epochs
_C.OPTIM.WARMUP_EPOCHS = 0
# ---------------------------------------------------------------------------- #
# Training options
# ---------------------------------------------------------------------------- #
_C.TRAIN = CN()
# Dataset and split
_C.TRAIN.DATASET = ''
_C.TRAIN.SPLIT = 'train'
# Total mini-batch size
_C.TRAIN.BATCH_SIZE = 128
# Evaluate model on test data every eval period epochs
_C.TRAIN.EVAL_PERIOD = 1
# Save model checkpoint every checkpoint period epochs
_C.TRAIN.CHECKPOINT_PERIOD = 50
# Resume training from the latest checkpoint in the output directory
_C.TRAIN.AUTO_RESUME = True
# Checkpoint to start training from (if no automatic checkpoint saved)
_C.TRAIN.START_CHECKPOINT = ''
_C.TRAIN.AUTO_MATCH = False
# ---------------------------------------------------------------------------- #
# Testing options
# ---------------------------------------------------------------------------- #
_C.TEST = CN()
# Dataset and split
_C.TEST.DATASET = ''
_C.TEST.SPLIT = 'val'
# Total mini-batch size
_C.TEST.BATCH_SIZE = 200
# ---------------------------------------------------------------------------- #
# Common train/test data loader options
# ---------------------------------------------------------------------------- #
_C.DATA_LOADER = CN()
# Number of data loader workers per training process
_C.DATA_LOADER.NUM_WORKERS = 4
# Load data to pinned host memory
_C.DATA_LOADER.PIN_MEMORY = True
# ---------------------------------------------------------------------------- #
# Memory options
# ---------------------------------------------------------------------------- #
_C.MEM = CN()
# Perform ReLU inplace
_C.MEM.RELU_INPLACE = True
# ---------------------------------------------------------------------------- #
# CUDNN options
# ---------------------------------------------------------------------------- #
_C.CUDNN = CN()
# Perform benchmarking to select the fastest CUDNN algorithms to use
# Note that this may increase the memory usage and will likely not result
# in overall speedups when variable size inputs are used (e.g. COCO training)
_C.CUDNN.BENCHMARK = False
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
# Number of GPUs to use (applies to both training and testing)
_C.NUM_GPUS = 1
# Output directory
_C.OUT_DIR = '/tmp'
# Checkpoint directory for inference
_C.CHECKPT_DIR = '/tmp'
_C.IS_INFERENCE = False
_C.IS_DDP = False
# Config destination (in OUT_DIR)
_C.CFG_DEST = 'config.yaml'
# Note that non-determinism may still be present due to non-deterministic
# operator implementations in GPU operator libraries
_C.RNG_SEED = 1
# Log destination ('stdout' or 'file')
_C.LOG_DEST = 'file'
# Log period in iters
_C.LOG_PERIOD = 10
# Distributed backend
_C.DIST_BACKEND = 'nccl'
# Hostname and port for initializing multi-process groups
_C.HOST = 'localhost'
_C.PORT = 12002
# Computing flops by online foward pass
_C.ONLINE_FLOPS = False
# Whether use Tensorboard
_C.TENSORBOARD = False
def assert_cfg():
"""Checks config values invariants."""
assert not _C.OPTIM.STEPS or _C.OPTIM.STEPS[0] == 0, \
'The first lr step must start at 0'
assert _C.TRAIN.SPLIT in ['train', 'val', 'test'], \
'Train split \'{}\' not supported'.format(_C.TRAIN.SPLIT)
assert _C.TRAIN.BATCH_SIZE % _C.NUM_GPUS == 0, \
'Train mini-batch size should be a multiple of NUM_GPUS.'
assert _C.TEST.SPLIT in ['train', 'val', 'test'], \
'Test split \'{}\' not supported'.format(_C.TEST.SPLIT)
assert _C.TEST.BATCH_SIZE % _C.NUM_GPUS == 0, \
'Test mini-batch size should be a multiple of NUM_GPUS.'
# assert not _C.BN.USE_PRECISE_STATS or _C.NUM_GPUS == 1, \
# 'Precise BN stats computation not verified for > 1 GPU'
assert _C.LOG_DEST in ['stdout', 'file'], \
'Log destination \'{}\' not supported'.format(_C.LOG_DEST)
def dump_cfg():
"""Dumps the config to the output directory."""
cfg_file = os.path.join(_C.OUT_DIR, _C.CFG_DEST)
with open(cfg_file, 'w') as f:
_C.dump(stream=f)
def load_cfg(out_dir, cfg_dest='config.yaml'):
"""Loads config from specified output directory."""
cfg_file = os.path.join(out_dir, cfg_dest)
_C.merge_from_file(cfg_file)
| 10,201 | 24.378109 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/losses.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Loss functions."""
import torch.nn as nn
from pycls.config import cfg
# Supported losses
_LOSS_FUNS = {
'cross_entropy': nn.CrossEntropyLoss,
}
def get_loss_fun():
"""Retrieves the loss function."""
assert cfg.MODEL.LOSS_FUN in _LOSS_FUNS.keys(), \
'Loss function \'{}\' not supported'.format(cfg.TRAIN.LOSS)
return _LOSS_FUNS[cfg.MODEL.LOSS_FUN]().cuda()
| 730 | 26.074074 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/efficientnet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""EfficientNet models."""
import math
import torch
import torch.nn as nn
from pycls.config import cfg
import pycls.utils.net as nu
import pycls.utils.logging as logging
from .relation_graph import *
logger = logging.get_logger(__name__)
def get_conv(name):
"""Retrieves the transformation function by name."""
trans_funs = {
'mbconv_transform': MBConv,
'mbtalkconv_transform': MBTalkConv,
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
def drop_connect_tf(x, drop_ratio):
"""Drop connect (tensorflow port)."""
keep_ratio = 1.0 - drop_ratio
rt = torch.rand([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
rt.add_(keep_ratio)
bt = torch.floor(rt)
x.div_(keep_ratio)
x.mul_(bt)
return x
def drop_connect_pt(x, drop_ratio):
"""Drop connect (pytorch version)."""
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
def get_act_fun(act_type):
"""Retrieves the activations function."""
act_funs = {
'swish': Swish,
'relu': nn.ReLU,
}
assert act_type in act_funs.keys(), \
'Activation function \'{}\' not supported'.format(act_type)
return act_funs[act_type]
class SimpleHead(nn.Module):
"""Simple head."""
def __init__(self, dim_in, num_classes):
super(SimpleHead, self).__init__()
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# Dropout
if cfg.EFFICIENT_NET.DROPOUT_RATIO > 0.0:
self.dropout = nn.Dropout(p=cfg.EFFICIENT_NET.DROPOUT_RATIO)
# FC
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x) if hasattr(self, 'dropout') else x
x = self.fc(x)
return x
class ConvHead(nn.Module):
"""EfficientNet conv head."""
def __init__(self, in_w, out_w, num_classes, act_fun):
super(ConvHead, self).__init__()
self._construct_class(in_w, out_w, num_classes, act_fun)
def _construct_class(self, in_w, out_w, num_classes, act_fun):
# 1x1, BN, Swish
self.conv = nn.Conv2d(
in_w, out_w,
kernel_size=1, stride=1, padding=0, bias=False
)
self.conv_bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.conv_swish = act_fun()
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# Dropout
if cfg.EFFICIENT_NET.DROPOUT_RATIO > 0.0:
self.dropout = nn.Dropout(p=cfg.EFFICIENT_NET.DROPOUT_RATIO)
# FC
self.fc = nn.Linear(out_w, num_classes, bias=True)
def forward(self, x):
# 1x1, BN, Swish
x = self.conv_swish(self.conv_bn(self.conv(x)))
# AvgPool
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
# Dropout
x = self.dropout(x) if hasattr(self, 'dropout') else x
# FC
x = self.fc(x)
return x
class LinearHead(nn.Module):
"""EfficientNet linear head."""
def __init__(self, in_w, out_w, num_classes, act_fun):
super(LinearHead, self).__init__()
self._construct_class(in_w, out_w, num_classes, act_fun)
def _construct_class(self, in_w, out_w, num_classes, act_fun):
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# FC0
self.fc0 = nn.Linear(in_w, out_w, bias=False)
self.fc0_bn = nn.BatchNorm1d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.fc0_swish = act_fun()
# FC
self.fc = nn.Linear(out_w, num_classes, bias=True)
def forward(self, x):
# AvgPool
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
# Linear, BN, Swish
x = self.fc0_swish(self.fc0_bn(self.fc0(x)))
# FC
x = self.fc(x)
return x
class MBConv(nn.Module):
"""Mobile inverted bottleneck block with SE (MBConv)."""
def __init__(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun, seed=None, exp_w=None):
super(MBConv, self).__init__()
self._construct_class(in_w, exp_r, kernel, stride, se_r, out_w, act_fun)
def _construct_class(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun):
# Expansion: 1x1, BN, Swish
self.expand = None
exp_w = int(in_w * exp_r)
# Include exp ops only if the exp ratio is different from 1
if exp_w != in_w:
self.expand = nn.Conv2d(
in_w, exp_w,
kernel_size=1, stride=1, padding=0, bias=False
)
self.expand_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.expand_swish = act_fun()
# Depthwise: 3x3 dwise, BN, Swish
self.dwise = nn.Conv2d(
exp_w, exp_w,
kernel_size=kernel, stride=stride, groups=exp_w, bias=False,
# Hacky padding to preserve res (supports only 3x3 and 5x5)
padding=(1 if kernel == 3 else 2)
)
self.dwise_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.dwise_swish = act_fun()
# SE: x * F_ex(x)
if cfg.EFFICIENT_NET.SE_ENABLED:
se_w = int(in_w * se_r)
self.se = SE(exp_w, se_w, act_fun)
# Linear projection: 1x1, BN
self.lin_proj = nn.Conv2d(
exp_w, out_w,
kernel_size=1, stride=1, padding=0, bias=False
)
self.lin_proj_bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
self.lin_proj_swish = act_fun()
# Skip connections on blocks w/ same in and out shapes (MN-V2, Fig. 4)
self.has_skip = (stride == 1) and (in_w == out_w)
def forward(self, x):
f_x = x
# Expansion
if self.expand:
f_x = self.expand_swish(self.expand_bn(self.expand(f_x)))
# Depthwise
f_x = self.dwise_swish(self.dwise_bn(self.dwise(f_x)))
# SE
if cfg.EFFICIENT_NET.SE_ENABLED:
f_x = self.se(f_x)
# Linear projection
f_x = self.lin_proj_bn(self.lin_proj(f_x))
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
f_x = self.lin_proj_swish(f_x)
# Skip connection
if self.has_skip:
# Drop connect
if self.training and cfg.EFFICIENT_NET.DC_RATIO > 0.0:
if cfg.EFFICIENT_NET.DC_IMP == 'tf':
f_x = drop_connect_tf(f_x, cfg.EFFICIENT_NET.DC_RATIO)
else:
f_x = drop_connect_pt(f_x, cfg.EFFICIENT_NET.DC_RATIO)
f_x = x + f_x
return f_x
class MBTalkConv(nn.Module):
"""Mobile inverted bottleneck block with SE (MBConv)."""
def __init__(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun, seed=None, exp_w=None):
super(MBTalkConv, self).__init__()
self.seed=seed
self._construct_class(in_w, exp_r, kernel, stride, se_r, out_w, act_fun, exp_w)
def _construct_class(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun, exp_w):
# Expansion: 1x1, BN, Swish
self.expand = None
if int(exp_r)==1:
exp_w = in_w
else:
self.expand = TalkConv2d(
in_w, exp_w, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.expand_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.expand_swish = act_fun()
# Depthwise: 3x3 dwise, BN, Swish
self.dwise = nn.Conv2d(
exp_w, exp_w,
kernel_size=kernel, stride=stride, groups=exp_w, bias=False,
# Hacky padding to preserve res (supports only 3x3 and 5x5)
padding=(1 if kernel == 3 else 2)
)
self.dwise_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.dwise_swish = act_fun()
# SE: x * F_ex(x)
if cfg.EFFICIENT_NET.SE_ENABLED:
se_w = int(in_w * se_r)
self.se = SE(exp_w, se_w, act_fun)
# Linear projection: 1x1, BN
self.lin_proj = TalkConv2d(
exp_w, out_w, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.lin_proj_bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
self.lin_proj_swish = act_fun()
# Skip connections on blocks w/ same in and out shapes (MN-V2, Fig. 4)
self.has_skip = (stride == 1) and (in_w == out_w)
def forward(self, x):
f_x = x
# Expansion
if self.expand:
f_x = self.expand_swish(self.expand_bn(self.expand(f_x)))
# Depthwise
f_x = self.dwise_swish(self.dwise_bn(self.dwise(f_x)))
# SE
if cfg.EFFICIENT_NET.SE_ENABLED:
f_x = self.se(f_x)
# Linear projection
f_x = self.lin_proj_bn(self.lin_proj(f_x))
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
f_x = self.lin_proj_swish(f_x)
# Skip connection
if self.has_skip:
# Drop connect
if self.training and cfg.EFFICIENT_NET.DC_RATIO > 0.0:
if cfg.EFFICIENT_NET.DC_IMP == 'tf':
f_x = drop_connect_tf(f_x, cfg.EFFICIENT_NET.DC_RATIO)
else:
f_x = drop_connect_pt(f_x, cfg.EFFICIENT_NET.DC_RATIO)
f_x = x + f_x
return f_x
class Stage(nn.Module):
"""EfficientNet stage."""
def __init__(self, in_w, exp_r, kernel, stride, se_r, out_w, d, act_fun, exp_w=None):
super(Stage, self).__init__()
self._construct_class(in_w, exp_r, kernel, stride, se_r, out_w, d, act_fun, exp_w)
def _construct_class(self, in_w, exp_r, kernel, stride, se_r, out_w, d, act_fun, exp_w):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(cfg.RGRAPH.SEED_GRAPH*100)
# Construct a sequence of blocks
for i in range(d):
trans_fun = get_conv(cfg.RESNET.TRANS_FUN)
# Stride and input width apply to the first block of the stage
stride_b = stride if i == 0 else 1
in_w_b = in_w if i == 0 else out_w
# Construct the block
self.add_module(
'b{}'.format(i + 1),
trans_fun(in_w_b, exp_r, kernel, stride_b, se_r, out_w, act_fun, seed=seed, exp_w=exp_w)
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
def forward(self, x):
for block in self.children():
x = block(x)
return x
class StemIN(nn.Module):
"""EfficientNet stem for ImageNet."""
def __init__(self, in_w, out_w, act_fun):
super(StemIN, self).__init__()
self._construct_class(in_w, out_w, act_fun)
def _construct_class(self, in_w, out_w, act_fun):
self.conv = nn.Conv2d(
in_w, out_w,
kernel_size=3, stride=2, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.swish = act_fun()
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class EfficientNet(nn.Module):
"""EfficientNet model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['imagenet'], \
'Training on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['imagenet'], \
'Testing on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
assert cfg.EFFICIENT_NET.HEAD_TYPE in ['conv_head', 'simple_head', 'linear_head'], \
'Unsupported head type: {}'.format(cfg.EFFICIENT_NET.HEAD_TYPE)
super(EfficientNet, self).__init__()
self._construct_class(
stem_w=cfg.EFFICIENT_NET.STEM_W,
ds=cfg.EFFICIENT_NET.DEPTHS,
ws=cfg.EFFICIENT_NET.WIDTHS,
exp_rs=cfg.EFFICIENT_NET.EXP_RATIOS,
se_r=cfg.EFFICIENT_NET.SE_RATIO,
ss=cfg.EFFICIENT_NET.STRIDES,
ks=cfg.EFFICIENT_NET.KERNELS,
head_type=cfg.EFFICIENT_NET.HEAD_TYPE,
head_w=cfg.EFFICIENT_NET.HEAD_W,
act_type=cfg.EFFICIENT_NET.ACT_FUN,
nc=cfg.MODEL.NUM_CLASSES
)
self.apply(nu.init_weights)
def _construct_class(
self, stem_w, ds, ws, exp_rs, se_r, ss, ks,
head_type, head_w, act_type, nc
):
"""Constructs imagenet models."""
# Group params by stage
stage_params = list(zip(ds, ws, exp_rs, ss, ks))
# Activation function
act_fun = get_act_fun(act_type)
# Set dim for each stage
dim_list = cfg.RGRAPH.DIM_LIST
expdim_list = [int(cfg.EFFICIENT_NET.WIDTHS[i]*cfg.EFFICIENT_NET.EXP_RATIOS[i])
for i in range(len(cfg.EFFICIENT_NET.WIDTHS))]
# Construct the stems
self.stem = StemIN(3, stem_w, act_fun)
prev_w = stem_w
# Construct the stages
for i, (d, w, exp_r, stride, kernel) in enumerate(stage_params):
if cfg.RESNET.TRANS_FUN != 'mbconv_transform':
w = dim_list[i]
exp_w = expdim_list[i]
self.add_module(
's{}'.format(i + 1),
Stage(prev_w, exp_r, kernel, stride, se_r, w, d, act_fun, exp_w=exp_w)
)
prev_w = w
# Construct the head
if head_type == 'conv_head':
self.head = ConvHead(prev_w, head_w, nc, act_fun)
elif head_type == 'linear_head':
self.head = LinearHead(prev_w, head_w, nc, act_fun)
else:
self.head = SimpleHead(prev_w, nc)
def forward(self, x):
for module in self.children():
x = module(x)
return x | 15,385 | 33.809955 | 108 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/resnet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""ResNet or ResNeXt model."""
import torch.nn as nn
import torch
from pycls.config import cfg
import pycls.utils.logging as lu
import pycls.utils.net as nu
from .relation_graph import *
import time
import pdb
logger = lu.get_logger(__name__)
# Stage depths for an ImageNet model {model depth -> (d2, d3, d4, d5)}
_IN_MODEL_STAGE_DS = {
18: (2, 2, 2, 2),
34: (3, 4, 6, 3),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
def get_trans_fun(name):
"""Retrieves the transformation function by name."""
trans_funs = {
############ Res-34
'channelbasic_transform': ChannelBasicTransform,
'groupbasictalk_transform': GroupBasicTalkTransform,
############ Res-34-sep
'channelsep_transform': ChannelSepTransform,
'groupseptalk_transform': GroupSepTalkTransform,
############ Res-50
'bottleneck_transform': BottleneckTransform,
'talkbottleneck_transform': TalkBottleneckTransform,
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
############ Res-34
class ChannelBasicTransform(nn.Module):
"""Basic transformation: 3x3, 3x3"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(ChannelBasicTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN
self.b = nn.Conv2d(
dim_out, dim_out, kernel_size=3,
stride=1, padding=1, bias=False
)
self.b_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupBasicTalkTransform(nn.Module):
"""Basic transformation: 3x3, 3x3, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
self.seed = seed
super(GroupBasicTalkTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=stride, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN
self.b = TalkConv2d(
dim_out, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=1, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.b_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
############ Res-34-sep
class ChannelSepTransform(nn.Module):
"""Separable transformation: 3x3, 3x3"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(ChannelSepTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# ReLU, 3x3, BN, 1x1, BN
self.a_3x3 = nn.Conv2d(
dim_in, dim_in, kernel_size=3,
stride=stride, padding=1, bias=False, groups=dim_in
)
self.a_1x1 = nn.Conv2d(
dim_in, dim_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.a_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# ReLU, 3x3, BN, 1x1, BN
self.b_3x3 = nn.Conv2d(
dim_out, dim_out, kernel_size=3,
stride=1, padding=1, bias=False, groups=dim_out
)
self.b_1x1 = nn.Conv2d(
dim_out, dim_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.b_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_1x1_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupSepTalkTransform(nn.Module):
"""Separable transformation: 3x3, 3x3, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
self.seed = seed
super(GroupSepTalkTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# ReLU, 3x3, BN, 1x1, BN
self.a_3x3 = nn.Conv2d(
dim_in, dim_in, kernel_size=3,
stride=stride, padding=1, bias=False, groups=dim_in
)
self.a_1x1 = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# ReLU, 3x3, BN, 1x1, BN
self.b_3x3 = nn.Conv2d(
dim_out, dim_out, kernel_size=3,
stride=1, padding=1, bias=False, groups=dim_out
)
self.b_1x1 = TalkConv2d(
dim_out, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.b_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_1x1_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
############ Res-50
class BottleneckTransform(nn.Module):
"""Bottleneck transformation: 1x1, 3x3, 1x1"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(BottleneckTransform, self).__init__()
dim_inner = int(round(dim_out / 4))
self._construct_class(dim_in, dim_out, stride, dim_inner, num_gs, seed)
def _construct_class(self, dim_in, dim_out, stride, dim_inner, num_gs, seed):
# MSRA -> stride=2 is on 1x1; TH/C2 -> stride=2 is on 3x3
# (str1x1, str3x3) = (stride, 1) if cfg.RESNET.STRIDE_1X1 else (1, stride)
(str1x1, str3x3) = (1, stride)
# 1x1, BN, ReLU
self.a = nn.Conv2d(
dim_in, dim_inner, kernel_size=1,
stride=str1x1, padding=0, bias=False
)
self.a_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN, ReLU
self.b = nn.Conv2d(
dim_inner, dim_inner, kernel_size=3,
stride=str3x3, padding=1, groups=num_gs, bias=False
)
self.b_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.b_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 1x1, BN
self.c = nn.Conv2d(
dim_inner, dim_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.c_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class TalkBottleneckTransform(nn.Module):
"""Bottleneck transformation: 1x1, 3x3, 1x1, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(TalkBottleneckTransform, self).__init__()
dim_inner = int(round(dim_out / 4))
self.seed = seed
self._construct_class(dim_in, dim_out, stride, dim_inner, num_gs, seed)
def _construct_class(self, dim_in, dim_out, stride, dim_inner, num_gs, seed):
# MSRA -> stride=2 is on 1x1; TH/C2 -> stride=2 is on 3x3
# (str1x1, str3x3) = (stride, 1) if cfg.RESNET.STRIDE_1X1 else (1, stride)
(str1x1, str3x3) = (1, stride)
# 1x1, BN, ReLU
self.a = TalkConv2d(
dim_in, dim_inner, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=str1x1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN, ReLU
self.b = TalkConv2d(
dim_inner, dim_inner, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=str3x3, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.b_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.b_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 1x1, BN
self.c = TalkConv2d(
dim_inner, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.c_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
##### Remaining ResNet code
class ResBlock(nn.Module):
"""Residual block: x + F(x)"""
def __init__(
self, dim_in, dim_out, stride, trans_fun, dim_inner=None, num_gs=1, seed=None):
super(ResBlock, self).__init__()
self.seed = seed
self._construct_class(dim_in, dim_out, stride, trans_fun, dim_inner, num_gs, seed)
def _add_skip_proj(self, dim_in, dim_out, stride):
if 'group' in cfg.RESNET.TRANS_FUN and 'share' not in cfg.RESNET.TRANS_FUN:
self.proj = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=stride, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
else:
self.proj = nn.Conv2d(
dim_in, dim_out, kernel_size=1,
stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
def _construct_class(self, dim_in, dim_out, stride, trans_fun, dim_inner, num_gs, seed):
# Use skip connection with projection if dim or res change
self.proj_block = (dim_in != dim_out) or (stride != 1)
if self.proj_block:
self._add_skip_proj(dim_in, dim_out, stride)
self.f = trans_fun(dim_in, dim_out, stride, dim_inner, num_gs, seed)
self.act = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
x = self.act(x)
return x
class ResStage(nn.Module):
"""Stage of ResNet."""
def __init__(
self, dim_in, dim_out, stride, num_bs, dim_inner=None, num_gs=1):
super(ResStage, self).__init__()
self._construct_class(dim_in, dim_out, stride, num_bs, dim_inner, num_gs)
def _construct_class(self, dim_in, dim_out, stride, num_bs, dim_inner, num_gs):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
for i in range(num_bs):
# Stride and dim_in apply to the first block of the stage
b_stride = stride if i == 0 else 1
b_dim_in = dim_in if i == 0 else dim_out
# Retrieve the transformation function
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
# Construct the block
res_block = ResBlock(
b_dim_in, dim_out, b_stride, trans_fun, dim_inner, num_gs, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}'.format(i + 1), res_block)
for j in range(cfg.RGRAPH.ADD_1x1):
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN + '1x1')
# Construct the block
res_block = ResBlock(
dim_out, dim_out, 1, trans_fun, dim_inner, num_gs, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}_{}1x1'.format(i + 1, j + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class ResStem(nn.Module):
"""Stem of ResNet."""
def __init__(self, dim_in, dim_out):
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(ResStem, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar(dim_in, dim_out)
else:
self._construct_imagenet(dim_in, dim_out)
def _construct_cifar(self, dim_in, dim_out):
# 3x3, BN, ReLU
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=1, padding=1, bias=False
# )
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=7,
stride=1, padding=3, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def _construct_imagenet(self, dim_in, dim_out):
# 7x7, BN, ReLU, pool
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=7,
stride=2, padding=3, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResHead(nn.Module):
"""ResNet head."""
def __init__(self, dim_in, num_classes):
super(ResHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResNet(nn.Module):
"""ResNet model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Training ResNet on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Testing ResNet on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(ResNet, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar()
elif cfg.TRAIN.DATASET == 'cifar100':
self._construct_cifar()
else:
self._construct_imagenet()
self.apply(nu.init_weights)
# # ##### basic transform
def _construct_cifar(self):
assert (cfg.MODEL.DEPTH - 2) % 6 == 0, \
'Model depth should be of the format 6n + 2 for cifar'
logger.info('Constructing: ResNet-{}, cifar'.format(cfg.MODEL.DEPTH))
# Each stage has the same number of blocks for cifar
num_blocks = int((cfg.MODEL.DEPTH - 2) / 6)
# length = num of stages (excluding stem and head)
dim_list = cfg.RGRAPH.DIM_LIST
# Stage 1: (N, 3, 32, 32) -> (N, 16, 32, 32)*8
# self.s1 = ResStem(dim_in=3, dim_out=16)
self.s1 = ResStem(dim_in=3, dim_out=64)
# Stage 2: (N, 16, 32, 32) -> (N, 16, 32, 32)
# self.s2 = ResStage(dim_in=16, dim_out=dim_list[0], stride=1, num_bs=num_blocks)
self.s2 = ResStage(dim_in=64, dim_out=dim_list[0], stride=1, num_bs=num_blocks)
# Stage 3: (N, 16, 32, 32) -> (N, 32, 16, 16)
self.s3 = ResStage(dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=num_blocks)
# Stage 4: (N, 32, 16, 16) -> (N, 64, 8, 8)
self.s4 = ResStage(dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=num_blocks)
# Head: (N, 64, 8, 8) -> (N, num_classes)
self.head = ResHead(dim_in=dim_list[2], num_classes=cfg.MODEL.NUM_CLASSES)
# smaller imagenet
def _construct_imagenet(self):
logger.info('Constructing: ResNet-{}, Imagenet'.format(cfg.MODEL.DEPTH))
# Retrieve the number of blocks per stage (excluding base)
(d2, d3, d4, d5) = _IN_MODEL_STAGE_DS[cfg.MODEL.DEPTH]
# Compute the initial inner block dim
dim_list = cfg.RGRAPH.DIM_LIST
print(dim_list)
# Stage 1: (N, 3, 224, 224) -> (N, 64, 56, 56)
self.s1 = ResStem(dim_in=3, dim_out=64)
# Stage 2: (N, 64, 56, 56) -> (N, 256, 56, 56)
self.s2 = ResStage(
dim_in=64, dim_out=dim_list[0], stride=1, num_bs=d2
)
# Stage 3: (N, 256, 56, 56) -> (N, 512, 28, 28)
self.s3 = ResStage(
dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=d3
)
# Stage 4: (N, 512, 56, 56) -> (N, 1024, 14, 14)
self.s4 = ResStage(
dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=d4
)
# Stage 5: (N, 1024, 14, 14) -> (N, 2048, 7, 7)
self.s5 = ResStage(
dim_in=dim_list[2], dim_out=dim_list[3], stride=2, num_bs=d5
)
# Head: (N, 2048, 7, 7) -> (N, num_classes)
self.head = ResHead(dim_in=dim_list[3], num_classes=cfg.MODEL.NUM_CLASSES)
def forward(self, x):
for module in self.children():
x = module(x)
return x
| 20,015 | 37.198473 | 108 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/cnn.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""CNN model."""
import torch.nn as nn
import torch
from pycls.config import cfg
import pycls.utils.logging as lu
import pycls.utils.net as nu
from .relation_graph import *
logger = lu.get_logger(__name__)
def get_trans_fun(name):
"""Retrieves the transformation function by name."""
trans_funs = {
##### (1) Level 1: channel
### (1.1) Basic Conv
'convbasic_transform': ConvBasicTransform,
'symconvbasic_transform': SymConvBasicTransform,
'convtalk_transform': ConvTalkTransform, # relational graph
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
##### (1) Level 1: channel
### (1.1) Basic Conv
class ConvBasicTransform(nn.Module):
"""Basic transformation: 3x3"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(ConvBasicTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.a_bn.final_bn = True
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SymConvBasicTransform(nn.Module):
"""Basic transformation: 3x3 conv, symmetric"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(SymConvBasicTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = SymConv2d(
dim_in, dim_out, kernel_size=3,
stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.a_bn.final_bn = True
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ConvTalkTransform(nn.Module):
"""Basic transformation: 3x3 conv, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
self.seed = seed
super(ConvTalkTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=stride, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.a_bn.final_bn = True
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
##### Remaining CNN code
class CNNStage(nn.Module):
"""Stage of CNN."""
def __init__(
self, dim_in, dim_out, stride, num_bs, dim_inner=None, num_gs=1):
super(CNNStage, self).__init__()
self._construct_class(dim_in, dim_out, stride, num_bs, dim_inner, num_gs)
def _construct_class(self, dim_in, dim_out, stride, num_bs, dim_inner, num_gs):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
for i in range(num_bs):
# Stride and dim_in apply to the first block of the stage
b_stride = stride if i == 0 else 1
b_dim_in = dim_in if i == 0 else dim_out
# Retrieve the transformation function
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
# Construct the block
res_block = trans_fun(
b_dim_in, dim_out, b_stride, dim_inner, num_gs, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}'.format(i + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class CNNStem(nn.Module):
"""Stem of CNN."""
def __init__(self, dim_in, dim_out):
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(CNNStem, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar(dim_in, dim_out)
elif cfg.TRAIN.DATASET == 'cifar100':
self._construct_cifar(dim_in, dim_out)
else:
self._construct_imagenet(dim_in, dim_out)
def _construct_cifar(self, dim_in, dim_out):
# 3x3, BN, ReLU
if cfg.RGRAPH.STEM_MODE == 'default':
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=1, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
elif cfg.RGRAPH.STEM_MODE == 'downsample':
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=1, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _construct_imagenet(self, dim_in, dim_out):
# 3x3, BN, ReLU, pool
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=2, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class CNNHead(nn.Module):
"""CNN head."""
def __init__(self, dim_in, num_classes):
super(CNNHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(p=0.15)
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.fc(x)
return x
class CNN(nn.Module):
"""CNN model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Training CNN on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Testing CNN on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(CNN, self).__init__()
self._construct()
self.apply(nu.init_weights)
# # ##### basic transform
def _construct(self):
# Each stage has the same number of blocks for cifar
dim_list = cfg.RGRAPH.DIM_LIST
num_bs = cfg.MODEL.LAYERS // 3
self.s1 = CNNStem(dim_in=3, dim_out=cfg.RGRAPH.DIM_FIRST)
self.s2 = CNNStage(dim_in=cfg.RGRAPH.DIM_FIRST, dim_out=dim_list[0], stride=2, num_bs=num_bs)
self.s3 = CNNStage(dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=num_bs)
self.s4 = CNNStage(dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=num_bs)
# self.s5 = CNNStage(dim_in=dim_list[2], dim_out=dim_list[3], stride=2, num_bs=num_bs)
self.head = CNNHead(dim_in=dim_list[2], num_classes=cfg.MODEL.NUM_CLASSES)
def forward(self, x):
for module in self.children():
x = module(x)
return x
# #!/usr/bin/env python3
# # Copyright (c) Facebook, Inc. and its affiliates.
# #
# # This source code is licensed under the MIT license found in the
# # LICENSE file in the root directory of this source tree.
# """CNN model."""
# import torch.nn as nn
# import torch
# from pycls.config import cfg
# import pycls.utils.logging as lu
# import pycls.utils.net as nu
# from .relation_graph import *
# logger = lu.get_logger(__name__)
# def get_trans_fun(name):
# """Retrieves the transformation function by name."""
# trans_funs = {
# ##### (1) Level 1: channel
# ### (1.1) Basic Conv
# 'convbasic_transform': ConvBasicTransform,
# 'symconvbasic_transform': SymConvBasicTransform,
# 'convtalk_transform': ConvTalkTransform, # relational graph
# }
# assert name in trans_funs.keys(), \
# 'Transformation function \'{}\' not supported'.format(name)
# return trans_funs[name]
# ##### (1) Level 1: channel
# ### (1.1) Basic Conv
# class ConvBasicTransform(nn.Module):
# """Basic transformation: 3x3"""
# def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
# super(ConvBasicTransform, self).__init__()
# self._construct_class(dim_in, dim_out, stride)
# def _construct_class(self, dim_in, dim_out, stride):
# # 3x3, BN, ReLU
# self.a = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=stride, padding=1, bias=False
# )
# self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# # self.a_bn.final_bn = True
# self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# class SymConvBasicTransform(nn.Module):
# """Basic transformation: 3x3 conv, symmetric"""
# def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
# super(SymConvBasicTransform, self).__init__()
# self._construct_class(dim_in, dim_out, stride)
# def _construct_class(self, dim_in, dim_out, stride):
# # 3x3, BN, ReLU
# self.a = SymConv2d(
# dim_in, dim_out, kernel_size=3,
# stride=stride, padding=1, bias=False
# )
# self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# # self.a_bn.final_bn = True
# self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# class ConvTalkTransform(nn.Module):
# """Basic transformation: 3x3 conv, relational graph"""
# def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
# self.seed = seed
# super(ConvTalkTransform, self).__init__()
# self._construct_class(dim_in, dim_out, stride)
# def _construct_class(self, dim_in, dim_out, stride):
# # 3x3, BN, ReLU
# self.a = TalkConv2d(
# dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
# stride=stride, padding=1, bias=False,
# message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
# sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
# )
# self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# # self.a_bn.final_bn = True
# self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# ##### Remaining CNN code
# class CNNStage(nn.Module):
# """Stage of CNN."""
# def __init__(
# self, dim_in, dim_out, stride, num_bs, dim_inner=None, num_gs=1):
# super(CNNStage, self).__init__()
# self._construct_class(dim_in, dim_out, stride, num_bs, dim_inner, num_gs)
# def _construct_class(self, dim_in, dim_out, stride, num_bs, dim_inner, num_gs):
# if cfg.RGRAPH.KEEP_GRAPH:
# seed = cfg.RGRAPH.SEED_GRAPH
# else:
# seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
# for i in range(num_bs):
# # Stride and dim_in apply to the first block of the stage
# b_stride = stride if i == 0 else 1
# b_dim_in = dim_in if i == 0 else dim_out
# # Retrieve the transformation function
# trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
# # Construct the block
# res_block = trans_fun(
# b_dim_in, dim_out, b_stride, dim_inner, num_gs, seed=seed
# )
# if not cfg.RGRAPH.KEEP_GRAPH:
# seed += 1
# self.add_module('b{}'.format(i + 1), res_block)
# def forward(self, x):
# for block in self.children():
# x = block(x)
# return x
# class CNNStem(nn.Module):
# """Stem of CNN."""
# def __init__(self, dim_in, dim_out):
# assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
# 'Train and test dataset must be the same for now'
# super(CNNStem, self).__init__()
# if cfg.TRAIN.DATASET == 'cifar10':
# self._construct_cifar(dim_in, dim_out)
# else:
# self._construct_imagenet(dim_in, dim_out)
# def _construct_cifar(self, dim_in, dim_out):
# # 3x3, BN, ReLU
# if cfg.RGRAPH.STEM_MODE == 'default':
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=1, padding=1, bias=False
# )
# self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
# momentum=cfg.BN.MOM)
# self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
# elif cfg.RGRAPH.STEM_MODE == 'downsample':
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=1, padding=1, bias=False
# )
# self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
# momentum=cfg.BN.MOM)
# self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
# self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# def _construct_imagenet(self, dim_in, dim_out):
# # 3x3, BN, ReLU, pool
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=2, padding=1, bias=False
# )
# self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
# self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# class CNNHead(nn.Module):
# """CNN head."""
# def __init__(self, dim_in, num_classes):
# super(CNNHead, self).__init__()
# self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(dim_in, num_classes, bias=True)
# def forward(self, x):
# x = self.avg_pool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
# return x
# class CNN(nn.Module):
# """CNN model."""
# def __init__(self):
# assert cfg.TRAIN.DATASET in ['cifar10', 'imagenet'], \
# 'Training ResNet on {} is not supported'.format(cfg.TRAIN.DATASET)
# assert cfg.TEST.DATASET in ['cifar10', 'imagenet'], \
# 'Testing ResNet on {} is not supported'.format(cfg.TEST.DATASET)
# assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
# 'Train and test dataset must be the same for now'
# super(CNN, self).__init__()
# self._construct()
# self.apply(nu.init_weights)
# # # ##### basic transform
# def _construct(self):
# # Each stage has the same number of blocks for cifar
# dim_list = cfg.RGRAPH.DIM_LIST
# num_bs = cfg.MODEL.LAYERS // 3
# self.s1 = CNNStem(dim_in=3, dim_out=cfg.RGRAPH.DIM_FIRST)
# self.s2 = CNNStage(dim_in=cfg.RGRAPH.DIM_FIRST, dim_out=dim_list[0], stride=2, num_bs=num_bs)
# self.s3 = CNNStage(dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=num_bs)
# self.s4 = CNNStage(dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=num_bs)
# self.head = CNNHead(dim_in=dim_list[2], num_classes=cfg.MODEL.NUM_CLASSES)
# def forward(self, x):
# for module in self.children():
# x = module(x)
# return x
| 17,388 | 34.779835 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/vgg.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""VGG example"""
import torch.nn as nn
import torch.nn.functional as F
from pycls.config import cfg
import pycls.utils.net as nu
from .relation_graph import *
class VGG(nn.Module):
def __init__(self, num_classes=1024):
super(VGG, self).__init__()
self.seed = cfg.RGRAPH.SEED_GRAPH
def conv_bn(dim_in, dim_out, stride, stem=False):
if stem:
conv = get_conv('convbasic_transform', dim_in, dim_out, stride)
else:
conv = get_conv(cfg.RESNET.TRANS_FUN, dim_in, dim_out, stride)
return nn.Sequential(
conv,
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True)
)
def get_conv(name, dim_in, dim_out, stride=1):
if not cfg.RGRAPH.KEEP_GRAPH:
self.seed += 1
if name == 'convbasic_transform':
return nn.Conv2d(dim_in, dim_out,
kernel_size=3, stride=stride,
padding=1, bias=False)
elif name == 'convtalk_transform':
return TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=stride, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE,
directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P,
talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.dim_list = cfg.RGRAPH.DIM_LIST
# print(self.dim_list)
self.model = nn.Sequential(
conv_bn(3, 64, 1, stem=True),
conv_bn(64, self.dim_list[0], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[0], self.dim_list[1], 1),
conv_bn(self.dim_list[1], self.dim_list[1], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[1], self.dim_list[2], 1),
conv_bn(self.dim_list[2], self.dim_list[2], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[2], self.dim_list[3], 1),
conv_bn(self.dim_list[3], self.dim_list[3], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[3], self.dim_list[3], 1),
conv_bn(self.dim_list[3], self.dim_list[3], 1),
)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(self.dim_list[3], num_classes)
self.apply(nu.init_weights)
def forward(self, x):
x = self.model(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
| 3,097 | 35.880952 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/mlp.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""MLP model."""
import torch.nn as nn
import torch
from pycls.config import cfg
import pycls.utils.logging as lu
import pycls.utils.net as nu
from .relation_graph import *
import time
import pdb
logger = lu.get_logger(__name__)
def get_trans_fun(name):
"""Retrieves the transformation function by name."""
trans_funs = {
##### (1) Level 1: channel
'linear_transform': LinearTransform,
'symlinear_transform': SymLinearTransform,
'grouplinear_transform': GroupLinearTransform,
'groupshufflelinear_transform': GroupShuffleLinearTransform,
'talklinear_transform': TalkLinearTransform, # relational graph
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
##### (0) Basic
class LinearTransform(nn.Module):
"""Basic transformation: linear"""
def __init__(self, dim_in, dim_out, seed=None):
super(LinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = nn.Linear(
dim_in, dim_out, bias=False
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SymLinearTransform(nn.Module):
"""Basic transformation: linear, symmetric"""
def __init__(self, dim_in, dim_out, seed=None):
super(SymLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = SymLinear(
dim_in, dim_out, bias=False
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupLinearTransform(nn.Module):
"""Basic transformation: linear, group"""
def __init__(self, dim_in, dim_out, seed=None):
super(GroupLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = GroupLinear(
dim_in, dim_out, bias=False, group_size=cfg.RGRAPH.GROUP_SIZE
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupShuffleLinearTransform(nn.Module):
"""Basic transformation: linear, shuffle"""
def __init__(self, dim_in, dim_out, seed=None):
super(GroupShuffleLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = GroupLinear(
dim_in, dim_out, bias=False, group_size=cfg.RGRAPH.GROUP_SIZE
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.shuffle_shape = (dim_out // cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.GROUP_NUM)
def forward(self, x):
x = self.a(x)
x = x.view(x.shape[0], self.shuffle_shape[0], self.shuffle_shape[1]).permute(0, 2, 1).contiguous()
x = x.view(x.shape[0], x.shape[1] * x.shape[2])
x = self.a_bn(x)
x = self.relu(x)
return x
class TalkLinearTransform(nn.Module):
"""Basic transformation: linear, relational graph"""
def __init__(self, dim_in, dim_out, seed=None):
self.seed = seed
super(TalkLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
self.a = TalkLinear(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, sparsity=cfg.RGRAPH.SPARSITY,
p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class MLPStage(nn.Module):
"""Stage of MLPNet."""
def __init__(
self, dim_in, dim_out, num_bs):
super(MLPStage, self).__init__()
self._construct_class(dim_in, dim_out, num_bs)
def _construct_class(self, dim_in, dim_out, num_bs):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(dim_out * 100 * cfg.RGRAPH.SPARSITY)
for i in range(num_bs):
b_dim_in = dim_in if i == 0 else dim_out
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
res_block = trans_fun(
b_dim_in, dim_out, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}'.format(i + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class MLPStem(nn.Module):
"""Stem of MLPNet."""
def __init__(self, dim_in, dim_out):
super(MLPStem, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar(dim_in, dim_out)
else:
raise NotImplementedError
def _construct_cifar(self, dim_in, dim_out):
self.linear = nn.Linear(
dim_in, dim_out, bias=False
)
self.bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
x = x.view(x.size(0), -1)
for layer in self.children():
x = layer(x)
return x
class MLPHead(nn.Module):
"""MLPNet head."""
def __init__(self, dim_in, num_classes):
super(MLPHead, self).__init__()
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.fc(x)
return x
class MLPNet(nn.Module):
"""MLPNet model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['cifar10'], \
'Training MLPNet on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['cifar10'], \
'Testing MLPNet on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(MLPNet, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar()
else:
raise NotImplementedError
self.apply(nu.init_weights)
# ##### basic transform
def _construct_cifar(self):
num_layers = cfg.MODEL.LAYERS
dim_inner = cfg.RGRAPH.DIM_LIST[0]
dim_first = cfg.RGRAPH.DIM_FIRST
self.s1 = MLPStem(dim_in=3072, dim_out=dim_first)
self.s2 = MLPStage(dim_in=dim_first, dim_out=dim_inner, num_bs=num_layers)
self.head = MLPHead(dim_in=dim_inner, num_classes=cfg.MODEL.NUM_CLASSES)
def forward(self, x):
for module in self.children():
x = module(x)
return x
| 8,012 | 30.300781 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/model_builder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Model construction functions."""
import torch
from pycls.config import cfg
from pycls.models.resnet import ResNet
from pycls.models.mlp import MLPNet
from pycls.models.cnn import CNN
from pycls.models.mobilenet import MobileNetV1
from pycls.models.efficientnet import EfficientNet
from pycls.models.vgg import VGG
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
logger = lu.get_logger(__name__)
# Supported model types
_MODEL_TYPES = {
'resnet': ResNet,
'mlpnet': MLPNet,
'cnn': CNN,
'mobilenet': MobileNetV1,
'efficientnet': EfficientNet,
'vgg': VGG,
}
def build_model():
"""Builds the model."""
assert cfg.MODEL.TYPE in _MODEL_TYPES.keys(), \
'Model type \'{}\' not supported'.format(cfg.MODEL.TYPE)
assert cfg.NUM_GPUS <= torch.cuda.device_count(), \
'Cannot use more GPU devices than available'
# Construct the model
model = _MODEL_TYPES[cfg.MODEL.TYPE]()
# Determine the GPU used by the current process
cur_device = torch.cuda.current_device()
# Transfer the model to the current GPU device
model = model.cuda(device=cur_device)
# Use multi-process data parallel model in the multi-gpu setting
if cfg.NUM_GPUS > 1:
# Make model replica operate on the current device
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
return model
## auto match flop
def build_model_stats(mode='flops'):
"""Builds the model."""
assert cfg.MODEL.TYPE in _MODEL_TYPES.keys(), \
'Model type \'{}\' not supported'.format(cfg.MODEL.TYPE)
assert cfg.NUM_GPUS <= torch.cuda.device_count(), \
'Cannot use more GPU devices than available'
# Construct the model
model = _MODEL_TYPES[cfg.MODEL.TYPE]()
if mode == 'flops':
flops = mu.flops_count(model)
return flops
else:
params = mu.params_count(model)
return params
| 2,355 | 30 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/mobilenet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""MobileNet example"""
import torch.nn as nn
import torch.nn.functional as F
from pycls.config import cfg
import pycls.utils.net as nu
from .relation_graph import *
class MobileNetV1(nn.Module):
def __init__(self, num_classes=1024):
super(MobileNetV1, self).__init__()
if cfg.RGRAPH.KEEP_GRAPH:
self.seed = cfg.RGRAPH.SEED_GRAPH
else:
self.seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
def conv_bn(dim_in, dim_out, stride):
return nn.Sequential(
nn.Conv2d(dim_in, dim_out, 3, stride, 1, bias=False),
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True)
)
def get_conv(name, dim_in, dim_out):
if not cfg.RGRAPH.KEEP_GRAPH:
self.seed += 1
if name == 'channelbasic_transform':
return nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
elif name == 'groupbasictalk_transform':
return TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE,
directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P,
talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
def conv_dw(dim_in, dim_out, stride):
conv1x1 = get_conv(cfg.RESNET.TRANS_FUN, dim_in, dim_out)
return nn.Sequential(
nn.Conv2d(dim_in, dim_in, 3, stride, 1, groups=dim_in,
bias=False),
nn.BatchNorm2d(dim_in),
nn.ReLU(inplace=True),
conv1x1,
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True),
)
self.dim_list = cfg.RGRAPH.DIM_LIST
# print(self.dim_list)
self.model = nn.Sequential(
conv_bn(3, 32, 2),
conv_dw(32, self.dim_list[1], 1),
conv_dw(self.dim_list[1], self.dim_list[2], 2),
conv_dw(self.dim_list[2], self.dim_list[2], 1),
conv_dw(self.dim_list[2], self.dim_list[3], 2),
conv_dw(self.dim_list[3], self.dim_list[3], 1),
conv_dw(self.dim_list[3], self.dim_list[4], 2),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[5], 2),
conv_dw(self.dim_list[5], self.dim_list[5], 1),
)
self.fc = nn.Linear(self.dim_list[5], num_classes)
self.apply(nu.init_weights)
def forward(self, x):
x = self.model(x)
x = F.avg_pool2d(x, 7)
x = x.view(-1, self.dim_list[5])
x = self.fc(x)
return x
| 3,404 | 35.223404 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/optimizer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Optimizer."""
import torch
from pycls.config import cfg
import pycls.utils.lr_policy as lr_policy
def construct_optimizer(model):
"""Constructs the optimizer.
Note that the momentum update in PyTorch differs from the one in Caffe2.
In particular,
Caffe2:
V := mu * V + lr * g
p := p - V
PyTorch:
V := mu * V + g
p := p - lr * V
where V is the velocity, mu is the momentum factor, lr is the learning rate,
g is the gradient and p are the parameters.
Since V is defined independently of the learning rate in PyTorch,
when the learning rate is changed there is no need to perform the
momentum correction by scaling V (unlike in the Caffe2 case).
"""
return torch.optim.SGD(
model.parameters(),
lr=cfg.OPTIM.BASE_LR,
momentum=cfg.OPTIM.MOMENTUM,
weight_decay=cfg.OPTIM.WEIGHT_DECAY,
dampening=cfg.OPTIM.DAMPENING,
nesterov=cfg.OPTIM.NESTEROV
)
def get_epoch_lr(cur_epoch):
"""Retrieves the lr for the given epoch (as specified by the lr policy)."""
return lr_policy.get_epoch_lr(cur_epoch)
def set_lr(optimizer, new_lr):
"""Sets the optimizer lr to the specified value."""
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
| 1,678 | 27.457627 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/relation_graph.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Relational graph modules"""
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.nn.init as init
import networkx as nx
import numpy as np
from torch.nn.modules.utils import _pair
from torch.nn.modules.conv import _ConvNd
from torch.autograd import Function
from itertools import repeat
from networkx.utils import py_random_state
from pycls.datasets.load_graph import load_graph
import pdb
import time
import random
def compute_count(channel, group):
divide = channel // group
remain = channel % group
out = np.zeros(group, dtype=int)
out[:remain] = divide + 1
out[remain:] = divide
return out
@py_random_state(3)
def ws_graph(n, k, p, seed=1):
"""Returns a ws-flex graph, k can be real number in [2,n]
"""
assert k >= 2 and k <= n
# compute number of edges:
edge_num = int(round(k * n / 2))
count = compute_count(edge_num, n)
# print(count)
G = nx.Graph()
for i in range(n):
source = [i] * count[i]
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
# print(source, target)
G.add_edges_from(zip(source, target))
# rewire edges from each node
nodes = list(G.nodes())
for i in range(n):
u = i
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
for v in target:
if seed.random() < p:
w = seed.choice(nodes)
# Enforce no self-loops or multiple edges
while w == u or G.has_edge(u, w):
w = seed.choice(nodes)
if G.degree(u) >= n - 1:
break # skip this rewiring
else:
G.remove_edge(u, v)
G.add_edge(u, w)
return G
@py_random_state(4)
def connected_ws_graph(n, k, p, tries=100, seed=1):
"""Returns a connected ws-flex graph.
"""
for i in range(tries):
# seed is an RNG so should change sequence each call
G = ws_graph(n, k, p, seed)
if nx.is_connected(G):
return G
raise nx.NetworkXError('Maximum number of tries exceeded')
def nx_to_edge(graph, directed=False, add_self_loops=True,
shuffle_id=False, seed=1):
'''nx graph to edge index'''
graph.remove_edges_from(graph.selfloop_edges())
# relabel graphs
keys = list(graph.nodes)
vals = list(range(graph.number_of_nodes()))
# shuffle node id assignment
if shuffle_id:
random.seed(seed)
random.shuffle(vals)
mapping = dict(zip(keys, vals))
graph = nx.relabel_nodes(graph, mapping, copy=True)
# get edges
edge_index = np.array(list(graph.edges))
if not directed:
edge_index = np.concatenate((edge_index, edge_index[:, ::-1]), axis=0)
if add_self_loops:
edge_self = np.arange(graph.number_of_nodes())[:, np.newaxis]
edge_self = np.tile(edge_self, (1, 2))
edge_index = np.concatenate((edge_index, edge_self), axis=0)
# sort edges
idx = np.argsort(edge_index[:, 0])
edge_index = edge_index[idx, :]
return edge_index
# edge index generator
def generate_index(message_type='ba', n=16, sparsity=0.5, p=0.2,
directed=False, seed=123):
degree = n * sparsity
known_names = ['mcwhole', 'mcwholeraw', 'mcvisual', 'mcvisualraw', 'cat', 'catraw']
if message_type == 'er':
graph = nx.gnm_random_graph(n=n, m=n * degree // 2, seed=seed)
elif message_type == 'random':
edge_num = int(n * n * sparsity)
edge_id = np.random.choice(n * n, edge_num, replace=False)
edge_index = np.zeros((edge_num, 2), dtype=int)
for i in range(edge_num):
edge_index[i, 0] = edge_id[i] // n
edge_index[i, 1] = edge_id[i] % n
elif message_type == 'ws':
graph = connected_ws_graph(n=n, k=degree, p=p, seed=seed)
elif message_type == 'ba':
graph = nx.barabasi_albert_graph(n=n, m=degree // 2, seed=seed)
elif message_type == 'hypercube':
graph = nx.hypercube_graph(n=int(np.log2(n)))
elif message_type == 'grid':
m = degree
n = n // degree
graph = nx.grid_2d_graph(m=m, n=n)
elif message_type == 'cycle':
graph = nx.cycle_graph(n=n)
elif message_type == 'tree':
graph = nx.random_tree(n=n, seed=seed)
elif message_type == 'regular':
graph = nx.connected_watts_strogatz_graph(n=n, k=degree, p=0, seed=seed)
elif message_type in known_names:
graph = load_graph(message_type)
edge_index = nx_to_edge(graph, directed=True, seed=seed)
else:
raise NotImplementedError
if message_type != 'random' and message_type not in known_names:
edge_index = nx_to_edge(graph, directed=directed, seed=seed)
return edge_index
def compute_size(channel, group, seed=1):
np.random.seed(seed)
divide = channel // group
remain = channel % group
out = np.zeros(group, dtype=int)
out[:remain] = divide + 1
out[remain:] = divide
out = np.random.permutation(out)
return out
def compute_densemask(in_channels, out_channels, group_num, edge_index):
repeat_in = compute_size(in_channels, group_num)
repeat_out = compute_size(out_channels, group_num)
mask = np.zeros((group_num, group_num))
mask[edge_index[:, 0], edge_index[:, 1]] = 1
mask = np.repeat(mask, repeat_out, axis=0)
mask = np.repeat(mask, repeat_in, axis=1)
return mask
def get_mask(in_channels, out_channels, group_num,
message_type='ba', directed=False, sparsity=0.5, p=0.2, talk_mode='dense', seed=123):
assert group_num <= in_channels and group_num <= out_channels
# high-level graph edge index
edge_index_high = generate_index(message_type=message_type,
n=group_num, sparsity=sparsity, p=p, directed=directed, seed=seed)
# get in/out size for each high-level node
in_sizes = compute_size(in_channels, group_num)
out_sizes = compute_size(out_channels, group_num)
# decide low-level node num
group_num_low = int(min(np.min(in_sizes), np.min(out_sizes)))
# decide how to fill each node
mask_high = compute_densemask(in_channels, out_channels, group_num, edge_index_high)
return mask_high
############## Linear model
class TalkLinear(nn.Linear):
'''Relational graph version of Linear. Neurons "talk" according to the graph structure'''
def __init__(self, in_channels, out_channels, group_num, bias=False,
message_type='ba', directed=False,
sparsity=0.5, p=0.2, talk_mode='dense', seed=None):
group_num_max = min(in_channels, out_channels)
if group_num > group_num_max:
group_num = group_num_max
# print(group_num, in_channels, out_channels, kernel_size, stride)
super(TalkLinear, self).__init__(
in_channels, out_channels, bias)
self.mask = get_mask(in_channels, out_channels, group_num,
message_type, directed, sparsity, p, talk_mode, seed)
nonzero = np.sum(self.mask)
self.mask = torch.from_numpy(self.mask).float().cuda()
self.flops_scale = nonzero / (in_channels * out_channels)
self.params_scale = self.flops_scale
self.init_scale = torch.sqrt(out_channels / torch.sum(self.mask.cpu(), dim=0, keepdim=True))
def forward(self, x):
weight = self.weight * self.mask
# pdb.set_trace()
return F.linear(x, weight, self.bias)
class SymLinear(nn.Module):
'''Linear with symmetric weight matrices'''
def __init__(self, in_features, out_features, bias=True):
super(SymLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
weight = self.weight + self.weight.permute(1, 0)
return F.linear(input, weight, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
############## Conv model
class TalkConv2d(_ConvNd):
'''Relational graph version of Conv2d. Neurons "talk" according to the graph structure'''
def __init__(self, in_channels, out_channels, group_num, kernel_size, stride=1,
padding=0, dilation=1, bias=False, message_type='ba', directed=False, agg='sum',
sparsity=0.5, p=0.2, talk_mode='dense', seed=None):
group_num_max = min(in_channels, out_channels)
if group_num > group_num_max:
group_num = group_num_max
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(TalkConv2d, self).__init__(
in_channels, out_channels,
kernel_size, stride, padding, dilation,
False, _pair(0), 1, bias, 'zeros')
self.mask = get_mask(in_channels, out_channels, group_num,
message_type, directed, sparsity, p, talk_mode, seed)
nonzero = np.sum(self.mask)
self.mask = torch.from_numpy(self.mask[:, :, np.newaxis, np.newaxis]).float().cuda()
self.init_scale = torch.sqrt(out_channels / torch.sum(self.mask.cpu(), dim=0, keepdim=True))
self.flops_scale = nonzero / (in_channels * out_channels)
self.params_scale = self.flops_scale
def forward(self, input):
weight = self.weight * self.mask
return F.conv2d(input, weight, self.bias, self.stride, self.padding, self.dilation, 1)
class SymConv2d(_ConvNd):
'''Conv2d with symmetric weight matrices'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros'):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(SymConv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
def forward(self, input):
weight = self.weight + self.weight.permute(1, 0, 2, 3)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
return F.conv2d(F.pad(input, expanded_padding, mode='circular'),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
########### Other OPs
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)"""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish activation fun."""
def __init__(self, in_w, se_w, act_fun):
super(SE, self).__init__()
self._construct_class(in_w, se_w, act_fun)
def _construct_class(self, in_w, se_w, act_fun):
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# FC, Swish, FC, Sigmoid
self.f_ex = nn.Sequential(
nn.Conv2d(in_w, se_w, kernel_size=1, bias=True),
act_fun(),
nn.Conv2d(se_w, in_w, kernel_size=1, bias=True),
nn.Sigmoid()
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
class SparseLinear(nn.Linear):
'''Sparse Linear layer'''
def __init__(self, group_num, in_scale, out_scale, bias=False,
edge_index=None, flops_scale=0.5, params_scale=0.5):
# mask is used for reset to zero
mask_one = np.ones((out_scale, in_scale), dtype=bool)
mask_zero = np.zeros((out_scale, in_scale), dtype=bool)
mask_list = [[mask_one for i in range(group_num)] for j in range(group_num)]
for i in range(edge_index.shape[0]):
mask_list[edge_index[i, 0]][edge_index[i, 1]] = mask_zero
self.mask = np.block(mask_list)
self.edge_index = edge_index
# todo: update to pytorch 1.2.0, then use bool() dtype
self.mask = torch.from_numpy(self.mask).byte().cuda()
self.flops_scale = flops_scale
self.params_scale = params_scale
super(SparseLinear, self).__init__(
group_num * in_scale, group_num * out_scale, bias)
def forward(self, x):
weight = self.weight.clone().masked_fill_(self.mask, 0)
# pdb.set_trace()
return F.linear(x, weight, self.bias)
class GroupLinear(nn.Module):
'''Group conv style linear layer'''
def __init__(self, in_channels, out_channels, bias=False, group_size=1):
super(GroupLinear, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.group_size = group_size
self.group_num = in_channels // group_size
self.in_scale = in_channels // self.group_num
self.out_scale = out_channels // self.group_num
assert in_channels % self.group_num == 0
assert out_channels % self.group_num == 0
assert in_channels % self.group_size == 0
# Note: agg_fun is always sum
self.edge_index = np.arange(self.group_num)[:, np.newaxis].repeat(2, axis=1)
self.edge_num = self.edge_index.shape[0]
flops_scale = self.edge_num / (self.group_num * self.group_num)
params_scale = self.edge_num / (self.group_num * self.group_num)
self.linear = SparseLinear(self.group_num, self.in_scale, self.out_scale, bias,
edge_index=self.edge_index, flops_scale=flops_scale, params_scale=params_scale)
def forward(self, x):
x = self.linear(x)
return x
| 15,045 | 35.877451 | 114 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/cifar100.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""CIFAR100 dataset."""
import numpy as np
import os
import pickle
import torch
import torch.utils.data
import pycls.datasets.transforms as transforms
from torchvision import datasets
import pycls.utils.logging as lu
logger = lu.get_logger(__name__)
# Per-channel mean and SD values in BGR order
_MEAN = [129.3, 124.1, 112.4]
_SD = [68.2, 65.4, 70.4]
class Cifar100(torch.utils.data.Dataset):
"""CIFAR-100 dataset."""
def __init__(self, data_path, split, batch_size):
assert os.path.exists(data_path), \
'Data path \'{}\' not found'.format(data_path)
assert split in ['train', 'test'], \
'Split \'{}\' not supported for cifar'.format(split)
logger.info('Constructing CIFAR-100 {}...'.format(split))
self._data_path = data_path
self._split = split
self._batch_size = batch_size
# Data format:
# self._inputs - (split_size, 3, 32, 32) ndarray
# self._labels - split_size list
self._inputs, self._labels = self._load_data()
def _load_batch(self, batch_path):
with open(batch_path, 'rb') as f:
d = pickle.load(f, encoding='bytes')
return d[b'data'], d[b'fine_labels']
# return d[b'data'], d[b'labels']
def _load_data(self):
"""Loads data in memory."""
logger.info('{} data path: {}'.format(self._split, self._data_path))
# Compute data batch names
if self._split == 'train':
batch_names = ['train']
# datasets.CIFAR100(self._data_path, train=True)
# batch_names = ['data_batch_{}'.format(i) for i in range(1, 6)]
else:
batch_names = ['test']
# Load data batches
inputs, labels = [], []
for batch_name in batch_names:
batch_path = os.path.join(self._data_path, batch_name)
inputs_batch, labels_batch = self._load_batch(batch_path)
inputs.append(inputs_batch)
labels += labels_batch
# Combine and reshape the inputs
inputs = np.vstack(inputs).astype(np.float32)
inputs = inputs.reshape((-1, 3, 32, 32))
return inputs, labels
def _transform_image(self, image):
"""Transforms the image for network input."""
if self._batch_size != 1:
image = transforms.color_normalization(image, _MEAN, _SD)
if self._split == 'train':
image = transforms.horizontal_flip(image=image, prob=0.5)
image = transforms.random_crop(image=image, size=32, pad_size=4)
return image
def __getitem__(self, index):
image, label = self._inputs[index, ...], self._labels[index]
image = self._transform_image(image)
return image, label
def __len__(self):
return self._inputs.shape[0]
| 3,163 | 34.155556 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/cifar10.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""CIFAR10 dataset."""
import numpy as np
import os
import pickle
import torch
import torch.utils.data
import pycls.datasets.transforms as transforms
import pycls.utils.logging as lu
from pycls.config import cfg
logger = lu.get_logger(__name__)
# Per-channel mean and SD values in BGR order
_MEAN = [125.3, 123.0, 113.9]
_SD = [63.0, 62.1, 66.7]
class Cifar10(torch.utils.data.Dataset):
"""CIFAR-10 dataset."""
def __init__(self, data_path, split, batch_size):
assert os.path.exists(data_path), \
'Data path \'{}\' not found'.format(data_path)
assert split in ['train', 'test'], \
'Split \'{}\' not supported for cifar'.format(split)
logger.info('Constructing CIFAR-10 {}...'.format(split))
self._data_path = data_path
self._split = split
self._batch_size = batch_size
# Data format:
# self._inputs - (split_size, 3, 32, 32) ndarray
# self._labels - split_size list
self._inputs, self._labels = self._load_data()
def _load_batch(self, batch_path):
with open(batch_path, 'rb') as f:
d = pickle.load(f, encoding='bytes')
return d[b'data'], d[b'labels']
def _load_data(self):
"""Loads data in memory."""
logger.info('{} data path: {}'.format(self._split, self._data_path))
# Compute data batch names
if self._split == 'train':
batch_names = ['data_batch_{}'.format(i) for i in range(1, 6)]
else:
batch_names = ['test_batch']
# Load data batches
inputs, labels = [], []
for batch_name in batch_names:
batch_path = os.path.join(self._data_path, batch_name)
inputs_batch, labels_batch = self._load_batch(batch_path)
inputs.append(inputs_batch)
labels += labels_batch
# Combine and reshape the inputs
inputs = np.vstack(inputs).astype(np.float32)
inputs = inputs.reshape((-1, 3, 32, 32))
return inputs, labels
def _transform_image(self, image):
"""Transforms the image for network input."""
if self._batch_size != 1:
# Normalizing input images
image = transforms.color_normalization(image, _MEAN, _SD)
if self._split == 'train':
image = transforms.horizontal_flip(image=image, prob=0.5)
image = transforms.random_crop(image=image, size=32, pad_size=4)
return image
def __getitem__(self, index):
image, label = self._inputs[index, ...], self._labels[index]
image = self._transform_image(image)
return image, label
def __len__(self):
return self._inputs.shape[0]
| 3,048 | 33.647727 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/paths.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Dataset paths."""
import os
# Default data directory (/path/pycls/pycls/datasets/data)
_DEF_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
# Data paths
_paths = {
'cifar10': _DEF_DATA_DIR + '/cifar10',
'cifar100': _DEF_DATA_DIR + '/cifar100',
'tinyimagenet200': _DEF_DATA_DIR + '/tinyimagenet200',
'imagenet': _DEF_DATA_DIR + '/imagenet'
}
def has_data_path(dataset_name):
"""Determines if the dataset has a data path."""
return dataset_name in _paths.keys()
def get_data_path(dataset_name):
"""Retrieves data path for the dataset."""
return _paths[dataset_name]
def set_data_path(dataset_name, data_path):
"""Sets data path for the dataset."""
_paths[dataset_name] = data_path
| 1,084 | 26.820513 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/loader.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Data loader."""
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
import torch
from pycls.config import cfg
from pycls.datasets.cifar10 import Cifar10
from pycls.datasets.cifar100 import Cifar100
from pycls.datasets.tinyimagenet200 import TinyImageNet200
from pycls.datasets.imagenet import ImageNet
import pycls.datasets.paths as dp
# Supported datasets
_DATASET_CATALOG = {
'cifar10': Cifar10,
'cifar100': Cifar100,
'tinyimagenet200': TinyImageNet200,
'imagenet': ImageNet
}
def _construct_loader(dataset_name, split, batch_size, shuffle, drop_last):
"""Constructs the data loader for the given dataset."""
assert dataset_name in _DATASET_CATALOG.keys(), \
'Dataset \'{}\' not supported'.format(dataset_name)
assert dp.has_data_path(dataset_name), \
'Dataset \'{}\' has no data path'.format(dataset_name)
# Retrieve the data path for the dataset
data_path = dp.get_data_path(dataset_name)
# Construct the dataset
dataset = _DATASET_CATALOG[dataset_name](data_path, split, batch_size)
# Create a sampler for multi-process training
sampler = DistributedSampler(dataset) if cfg.NUM_GPUS > 1 else None
# Create a loader
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=(False if sampler else shuffle),
sampler=sampler,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=drop_last
)
return loader
def construct_train_loader():
"""Train loader wrapper."""
return _construct_loader(
dataset_name=cfg.TRAIN.DATASET,
split=cfg.TRAIN.SPLIT,
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
drop_last=True
)
def construct_test_loader():
"""Test loader wrapper."""
return _construct_loader(
dataset_name=cfg.TEST.DATASET,
split=cfg.TEST.SPLIT,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
drop_last=False
)
def construct_test_loader_adv():
"""Test loader wrapper."""
return _construct_loader(
dataset_name=cfg.TEST.DATASET,
split=cfg.TEST.SPLIT,
batch_size=1,
shuffle=False,
drop_last=False
)
def shuffle(loader, cur_epoch):
""""Shuffles the data."""
assert isinstance(loader.sampler, (RandomSampler, DistributedSampler)), \
'Sampler type \'{}\' not supported'.format(type(loader.sampler))
# RandomSampler handles shuffling automatically
if isinstance(loader.sampler, DistributedSampler):
# DistributedSampler shuffles data based on epoch
loader.sampler.set_epoch(cur_epoch)
| 3,131 | 30.009901 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/imagenet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""ImageNet dataset."""
import cv2
import numpy as np
import os
import torch
import torch.utils.data
import pycls.datasets.transforms as transforms
import pycls.utils.logging as lu
logger = lu.get_logger(__name__)
# Per-channel mean and SD values in BGR order
_MEAN = [0.406, 0.456, 0.485]
_SD = [0.225, 0.224, 0.229]
# Eig vals and vecs of the cov mat
_EIG_VALS = [0.2175, 0.0188, 0.0045]
_EIG_VECS = np.array([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]
])
class ImageNet(torch.utils.data.Dataset):
"""ImageNet dataset."""
def __init__(self, data_path, split, batch_size):
assert os.path.exists(data_path), \
'Data path \'{}\' not found'.format(data_path)
assert split in ['train', 'val'], \
'Split \'{}\' not supported for ImageNet'.format(split)
logger.info('Constructing ImageNet {}...'.format(split))
self._data_path = data_path
self._split = split
self._batch_size = batch_size
self._construct_imdb()
def _construct_imdb(self):
"""Constructs the imdb."""
# Compile the split data path
split_path = os.path.join(self._data_path, self._split)
logger.info('{} data path: {}'.format(self._split, split_path))
# Map ImageNet class ids to contiguous ids
self._class_ids = os.listdir(split_path)
self._class_id_cont_id = {v: i for i, v in enumerate(self._class_ids)}
# Construct the image db
self._imdb = []
for class_id in self._class_ids:
cont_id = self._class_id_cont_id[class_id]
im_dir = os.path.join(split_path, class_id)
for im_name in os.listdir(im_dir):
self._imdb.append({
'im_path': os.path.join(im_dir, im_name),
'class': cont_id,
})
logger.info('Number of images: {}'.format(len(self._imdb)))
logger.info('Number of classes: {}'.format(len(self._class_ids)))
def _prepare_im(self, im):
"""Prepares the image for network input."""
# Train and test setups differ
if self._split == 'train':
# Scale and aspect ratio
im = transforms.random_sized_crop(
image=im, size=224, area_frac=0.08
)
# Horizontal flip
im = transforms.horizontal_flip(image=im, prob=0.5, order='HWC')
else:
# Scale and center crop
im = transforms.scale(256, im)
im = transforms.center_crop(224, im)
# HWC -> CHW
im = transforms.HWC2CHW(im)
# [0, 255] -> [0, 1]
im = im / 255.0
# PCA jitter
if self._split == 'train':
im = transforms.lighting(im, 0.1, _EIG_VALS, _EIG_VECS)
# Color normalization
if self._batch_size != 1:
im = transforms.color_normalization(im, _MEAN, _SD)
return im
def __getitem__(self, index):
# Load the image
im = cv2.imread(self._imdb[index]['im_path'])
im = im.astype(np.float32, copy=False)
# Prepare the image for training / testing
im = self._prepare_im(im)
# Retrieve the label
label = self._imdb[index]['class']
return im, label
def __len__(self):
return len(self._imdb)
# class ImageNet(torch.utils.data.Dataset):
# """ImageNet dataset."""
# def __init__(self, data_path, split):
# assert os.path.exists(data_path), \
# 'Data path \'{}\' not found'.format(data_path)
# assert split in ['train', 'val'], \
# 'Split \'{}\' not supported for ImageNet'.format(split)
# logger.info('Constructing ImageNet {}...'.format(split))
# self._data_path = data_path
# self._split = split
# self._construct_imdb()
# def _construct_imdb(self):
# """Constructs the imdb."""
# # Compile the split data path
# split_path = os.path.join(self._data_path, self._split)
# logger.info('{} data path: {}'.format(self._split, split_path))
# # Map ImageNet class ids to contiguous ids
# self._class_ids = os.listdir(split_path)
# self._class_id_cont_id = {v: i for i, v in enumerate(self._class_ids)}
# # Construct the image db
# self._imdb = []
# counter = 1
# for class_id in self._class_ids:
# print('progress: {}/{}'.format(counter,len(self._class_ids)))
# counter += 1
# cont_id = self._class_id_cont_id[class_id]
# im_dir = os.path.join(split_path, class_id)
# for im_name in os.listdir(im_dir):
# self._imdb.append({
# 'im_path': os.path.join(im_dir, im_name),
# 'class': cont_id,
# 'img': cv2.imread(os.path.join(im_dir, im_name)).astype(np.float32, copy=False)
# })
# logger.info('Number of images: {}'.format(len(self._imdb)))
# logger.info('Number of classes: {}'.format(len(self._class_ids)))
# def _prepare_im(self, im):
# """Prepares the image for network input."""
# # Train and test setups differ
# if self._split == 'train':
# # Scale and aspect ratio
# im = transforms.random_sized_crop(
# image=im, size=224, area_frac=0.08
# )
# # Horizontal flip
# im = transforms.horizontal_flip(image=im, prob=0.5, order='HWC')
# else:
# # Scale and center crop
# im = transforms.scale(256, im)
# im = transforms.center_crop(224, im)
# # HWC -> CHW
# im = transforms.HWC2CHW(im)
# # [0, 255] -> [0, 1]
# im = im / 255.0
# # PCA jitter
# if self._split == 'train':
# im = transforms.lighting(im, 0.1, _EIG_VALS, _EIG_VECS)
# # Color normalization
# im = transforms.color_normalization(im, _MEAN, _SD)
# return im
# def __getitem__(self, index):
# # Load the image
# im = self._imdb[index]['img']
# # Prepare the image for training / testing
# im = self._prepare_im(im)
# # Retrieve the label
# label = self._imdb[index]['class']
# return im, label
# def __len__(self):
# return len(self._imdb)
| 6,759 | 35.344086 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/transforms.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Image transformations."""
import cv2
import math
import numpy as np
def CHW2HWC(image):
return image.transpose([1, 2, 0])
def HWC2CHW(image):
return image.transpose([2, 0, 1])
def color_normalization(image, mean, std):
"""Expects image in CHW format."""
assert len(mean) == image.shape[0]
assert len(std) == image.shape[0]
for i in range(image.shape[0]):
image[i] = image[i] - mean[i]
image[i] = image[i] / std[i]
return image
def zero_pad(image, pad_size, order='CHW'):
assert order in ['CHW', 'HWC']
if order == 'CHW':
pad_width = ((0, 0), (pad_size, pad_size), (pad_size, pad_size))
else:
pad_width = ((pad_size, pad_size), (pad_size, pad_size), (0, 0))
return np.pad(image, pad_width, mode='constant')
def horizontal_flip(image, prob, order='CHW'):
assert order in ['CHW', 'HWC']
if np.random.uniform() < prob:
if order == 'CHW':
image = image[:, :, ::-1]
else:
image = image[:, ::-1, :]
return image
def random_crop(image, size, pad_size=0, order='CHW'):
assert order in ['CHW', 'HWC']
if pad_size > 0:
image = zero_pad(image=image, pad_size=pad_size, order=order)
if order == 'CHW':
if image.shape[1] == size and image.shape[2] == size:
return image
height = image.shape[1]
width = image.shape[2]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = image[:, y_offset:y_offset + size, x_offset:x_offset + size]
assert cropped.shape[1] == size, "Image not cropped properly"
assert cropped.shape[2] == size, "Image not cropped properly"
else:
if image.shape[0] == size and image.shape[1] == size:
return image
height = image.shape[0]
width = image.shape[1]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = image[y_offset:y_offset + size, x_offset:x_offset + size, :]
assert cropped.shape[0] == size, "Image not cropped properly"
assert cropped.shape[1] == size, "Image not cropped properly"
return cropped
def scale(size, image):
height = image.shape[0]
width = image.shape[1]
if ((width <= height and width == size) or
(height <= width and height == size)):
return image
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
else:
new_width = int(math.floor((float(width) / height) * size))
img = cv2.resize(
image,
(new_width, new_height),
interpolation=cv2.INTER_LINEAR
)
return img.astype(np.float32)
def center_crop(size, image):
height = image.shape[0]
width = image.shape[1]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
cropped = image[y_offset:y_offset + size, x_offset:x_offset + size, :]
assert cropped.shape[0] == size, "Image height not cropped properly"
assert cropped.shape[1] == size, "Image width not cropped properly"
return cropped
def random_sized_crop(image, size, area_frac=0.08):
for _ in range(0, 10):
height = image.shape[0]
width = image.shape[1]
area = height * width
target_area = np.random.uniform(area_frac, 1.0) * area
aspect_ratio = np.random.uniform(3.0 / 4.0, 4.0 / 3.0)
w = int(round(math.sqrt(float(target_area) * aspect_ratio)))
h = int(round(math.sqrt(float(target_area) / aspect_ratio)))
if np.random.uniform() < 0.5:
w, h = h, w
if h <= height and w <= width:
if height == h:
y_offset = 0
else:
y_offset = np.random.randint(0, height - h)
if width == w:
x_offset = 0
else:
x_offset = np.random.randint(0, width - w)
y_offset = int(y_offset)
x_offset = int(x_offset)
cropped = image[y_offset:y_offset + h, x_offset:x_offset + w, :]
assert cropped.shape[0] == h and cropped.shape[1] == w, \
"Wrong crop size"
cropped = cv2.resize(
cropped,
(size, size),
interpolation=cv2.INTER_LINEAR
)
return cropped.astype(np.float32)
return center_crop(size, scale(size, image))
def lighting(img, alphastd, eigval, eigvec):
if alphastd == 0:
return img
# generate alpha1, alpha2, alpha3
alpha = np.random.normal(0, alphastd, size=(1, 3))
eig_vec = np.array(eigvec)
eig_val = np.reshape(eigval, (1, 3))
rgb = np.sum(
eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0),
axis=1
)
for idx in range(img.shape[0]):
img[idx] = img[idx] + rgb[2 - idx]
return img
| 5,563 | 32.119048 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/load_graph.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""load bio neural networks"""
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from networkx.utils import py_random_state
from matplotlib.colors import ListedColormap
import pdb
def compute_stats(G):
G_cluster = sorted(list(nx.clustering(G).values()))
cluster = sum(G_cluster) / len(G_cluster)
path = nx.average_shortest_path_length(G) # path
return cluster, path
def plot_graph(graph, name, dpi=200, width=0.5, layout='spring'):
plt.figure(figsize=(10, 10))
pos = nx.spiral_layout(graph)
if layout == 'spring':
pos = nx.spring_layout(graph)
elif layout == 'circular':
pos = nx.circular_layout(graph)
nx.draw(graph, pos=pos, node_size=100, width=width)
plt.savefig('figs/graph_view_{}.png'.format(name), dpi=dpi, transparent=True)
def load_graph(name, verbose=False, seed=1):
if 'raw' in name:
name = name[:-3]
directed = True
else:
directed = False
filename = '{}.txt'.format(name)
# filename = 'pycls/datasets/{}.txt'.format(name)
with open(filename) as f:
content = f.readlines()
content = [list(x.strip()) for x in content]
adj = np.array(content).astype(int)
if not directed:
adj = np.logical_or(adj.transpose(), adj).astype(int)
graph = nx.from_numpy_array(adj, create_using=nx.DiGraph)
if verbose:
print(type(graph))
print(graph.number_of_nodes(), graph.number_of_edges())
print(compute_stats(graph))
print(len(graph.edges))
# plot_graph(graph, 'mc_whole', dpi=60, width=1, layout='circular')
cmap = ListedColormap(['w', 'k'])
plt.matshow(nx.to_numpy_matrix(graph), cmap=cmap)
plt.show()
return graph
def compute_count(channel, group):
divide = channel // group
remain = channel % group
out = np.zeros(group, dtype=int)
out[:remain] = divide + 1
out[remain:] = divide
return out
@py_random_state(3)
def ws_graph(n, k, p, seed=1):
"""Returns a ws-flex graph, k can be real number in [2,n]
"""
assert k >= 2 and k <= n
# compute number of edges:
edge_num = int(round(k * n / 2))
count = compute_count(edge_num, n)
# print(count)
G = nx.Graph()
for i in range(n):
source = [i] * count[i]
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
# print(source, target)
G.add_edges_from(zip(source, target))
# rewire edges from each node
nodes = list(G.nodes())
for i in range(n):
u = i
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
for v in target:
if seed.random() < p:
w = seed.choice(nodes)
# Enforce no self-loops or multiple edges
while w == u or G.has_edge(u, w):
w = seed.choice(nodes)
if G.degree(u) >= n - 1:
break # skip this rewiring
else:
G.remove_edge(u, v)
G.add_edge(u, w)
return G
@py_random_state(4)
def connected_ws_graph(n, k, p, tries=100, seed=1):
"""Returns a connected ws-flex graph.
"""
for i in range(tries):
# seed is an RNG so should change sequence each call
G = ws_graph(n, k, p, seed)
if nx.is_connected(G):
return G
raise nx.NetworkXError('Maximum number of tries exceeded')
def generate_graph(message_type='ws', n=16, sparsity=0.5, p=0.2,
directed=False, seed=123):
### for relaxed ws
degree = n * sparsity
if message_type == 'ws':
graph = connected_ws_graph(n=n, k=degree, p=p, seed=seed)
return graph
# graph = load_graph('mcwhole', True)
# graph = load_graph('mcwholeraw', True)
# graph = load_graph('mcvisual', True)
# graph = load_graph('mcvisualraw', True)
# graph = load_graph('cat', True)
# graph = load_graph('catraw', True)
| 4,341 | 30.014286 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/checkpoint.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Functions that handle saving and loading of checkpoints."""
import os
import torch
from collections import OrderedDict
from pycls.config import cfg
import pycls.utils.distributed as du
# Common prefix for checkpoint file names
_NAME_PREFIX = 'model_epoch_'
# Checkpoints directory name
_DIR_NAME = 'checkpoints'
def get_checkpoint_dir():
"""Get location for storing checkpoints."""
return os.path.join(cfg.OUT_DIR, _DIR_NAME)
def got_checkpoint_dir():
"""Get location for storing checkpoints for inference time."""
return os.path.join(cfg.CHECKPT_DIR, _DIR_NAME)
def get_checkpoint(epoch):
"""Get the full path to a checkpoint file."""
name = '{}{:04d}.pyth'.format(_NAME_PREFIX, epoch)
return os.path.join(get_checkpoint_dir(), name)
def got_checkpoint(epoch):
"""Get the full path to a checkpoint file for inference time."""
name = '{}{:04d}.pyth'.format(_NAME_PREFIX, epoch)
return os.path.join(got_checkpoint_dir(), name)
def get_checkpoint_last():
d = get_checkpoint_dir()
names = os.listdir(d) if os.path.exists(d) else []
names = [f for f in names if _NAME_PREFIX in f]
assert len(names), 'No checkpoints found in \'{}\'.'.format(d)
name = sorted(names)[-1]
return os.path.join(d, name)
def got_checkpoint_last():
d = got_checkpoint_dir()
names = os.listdir(d) if os.path.exists(d) else []
names = [f for f in names if _NAME_PREFIX in f]
assert len(names), 'No checkpoints found in \'{}\'.'.format(d)
name = sorted(names)[-1]
return os.path.join(d, name)
def has_checkpoint():
"""Determines if the given directory contains a checkpoint."""
d = get_checkpoint_dir()
print("checkpoint directory =", d)
files = os.listdir(d) if os.path.exists(d) else []
return any(_NAME_PREFIX in f for f in files)
def had_checkpoint():
"""Determines if the given directory contains a checkpoint for inference time."""
d = got_checkpoint_dir()
print("checkpoint directory =", d)
files = os.listdir(d) if os.path.exists(d) else []
return any(_NAME_PREFIX in f for f in files)
def is_checkpoint_epoch(cur_epoch):
"""Determines if a checkpoint should be saved on current epoch."""
return (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0
def save_checkpoint(model, optimizer, epoch):
"""Saves a checkpoint."""
# Save checkpoints only from the master process
if not du.is_master_proc():
return
os.makedirs(get_checkpoint_dir(), exist_ok=True)
checkpoint = {
'epoch': epoch,
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
'cfg': cfg.dump()
}
checkpoint_file = get_checkpoint(epoch + 1)
torch.save(checkpoint, checkpoint_file)
return checkpoint_file
def load_checkpoint(checkpoint_file, model, optimizer=None):
"""Loads the checkpoint from the given file."""
assert os.path.exists(checkpoint_file), \
'Checkpoint \'{}\' not found'.format(checkpoint_file)
# if cfg.IS_INFERENCE and cfg.IS_DDP:
# state_dict = torch.load(checkpoint_file, map_location='cpu')
# new_state_dict = OrderedDict()
# print("state_dict.items() :", state_dict)
# for k, v in state_dict.items():
# name = k[7:] # remove `module.`
# new_state_dict[name] = v
# # load params
# epoch = state_dict['epoch']
# model.load_state_dict(new_state_dict['model_state'])
# if optimizer:
# optimizer.load_state_dict(new_state_dict['optimizer_state'])
if cfg.IS_INFERENCE:
print("Mapping model to CPU")
checkpoint = torch.load(checkpoint_file, map_location='cpu')
# print(checkpoint)
else:
checkpoint = torch.load(checkpoint_file)
epoch = checkpoint['epoch']
print("Epochs from checkpoint = ", epoch)
model.load_state_dict(checkpoint['model_state'], strict=False)
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state'])
return epoch
| 4,392 | 31.540741 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/timer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Timer."""
import time
class Timer(object):
"""A simple timer (adapted from Detectron)."""
def __init__(self):
self.reset()
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
def reset(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
| 1,013 | 25 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/error_handler.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Multiprocessing error handler."""
import os
import signal
import threading
class ChildException(Exception):
"""Wraps an exception from a child process."""
def __init__(self, child_trace):
super(ChildException, self).__init__(child_trace)
class ErrorHandler(object):
"""Multiprocessing error handler (based on fairseq's).
Listens for errors in child processes and
propagates the tracebacks to the parent process.
"""
def __init__(self, error_queue):
# Shared error queue
self.error_queue = error_queue
# Children processes sharing the error queue
self.children_pids = []
# Start a thread listening to errors
self.error_listener = threading.Thread(target=self.listen, daemon=True)
self.error_listener.start()
# Register the signal handler
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
"""Registers a child process."""
self.children_pids.append(pid)
def listen(self):
"""Listens for errors in the error queue."""
# Wait until there is an error in the queue
child_trace = self.error_queue.get()
# Put the error back for the signal handler
self.error_queue.put(child_trace)
# Invoke the signal handler
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, _sig_num, _stack_frame):
"""Signal handler."""
# Kill children processes
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
# Propagate the error from the child process
raise ChildException(self.error_queue.get())
| 2,012 | 31.467742 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/plotting.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Plotting functions."""
import colorlover as cl
import matplotlib.pyplot as plt
import plotly.graph_objs as go
import plotly.offline as offline
import pycls.utils.logging as lu
def get_plot_colors(max_colors, color_format='pyplot'):
"""Generate colors for plotting."""
colors = cl.scales['11']['qual']['Paired']
if max_colors > len(colors):
colors = cl.to_rgb(cl.interp(colors, max_colors))
if color_format == 'pyplot':
return [[j / 255.0 for j in c] for c in cl.to_numeric(colors)]
return colors
def prepare_plot_data(log_files, names, key='top1_err'):
"""Load logs and extract data for plotting error curves."""
plot_data = []
for file, name in zip(log_files, names):
d, log = {}, lu.load_json_stats(file)
for phase in ['train', 'test']:
x = lu.parse_json_stats(log, phase + '_epoch', 'epoch')
y = lu.parse_json_stats(log, phase + '_epoch', key)
d['x_' + phase], d['y_' + phase] = x, y
d[phase + '_label'] = '[{:5.2f}] '.format(min(y) if y else 0) + name
plot_data.append(d)
assert len(plot_data) > 0, 'No data to plot'
return plot_data
def plot_error_curves_plotly(log_files, names, filename, key='top1_err'):
"""Plot error curves using plotly and save to file."""
plot_data = prepare_plot_data(log_files, names, key)
colors = get_plot_colors(len(plot_data), 'plotly')
# Prepare data for plots (3 sets, train duplicated w and w/o legend)
data = []
for i, d in enumerate(plot_data):
s = str(i)
line_train = {'color': colors[i], 'dash': 'dashdot', 'width': 1.5}
line_test = {'color': colors[i], 'dash': 'solid', 'width': 1.5}
data.append(go.Scatter(
x=d['x_train'], y=d['y_train'], mode='lines', name=d['train_label'],
line=line_train, legendgroup=s, visible=True, showlegend=False
))
data.append(go.Scatter(
x=d['x_test'], y=d['y_test'], mode='lines', name=d['test_label'],
line=line_test, legendgroup=s, visible=True, showlegend=True
))
data.append(go.Scatter(
x=d['x_train'], y=d['y_train'], mode='lines', name=d['train_label'],
line=line_train, legendgroup=s, visible=False, showlegend=True
))
# Prepare layout w ability to toggle 'all', 'train', 'test'
titlefont = {'size': 18, 'color': '#7f7f7f'}
vis = [[True, True, False], [False, False, True], [False, True, False]]
buttons = zip(['all', 'train', 'test'], [[{'visible': v}] for v in vis])
buttons = [{'label': l, 'args': v, 'method': 'update'} for l, v in buttons]
layout = go.Layout(
title=key + ' vs. epoch<br>[dash=train, solid=test]',
xaxis={'title': 'epoch', 'titlefont': titlefont},
yaxis={'title': key, 'titlefont': titlefont},
showlegend=True,
hoverlabel={'namelength': -1},
updatemenus=[{
'buttons': buttons, 'direction': 'down', 'showactive': True,
'x': 1.02, 'xanchor': 'left', 'y': 1.08, 'yanchor': 'top'
}]
)
# Create plotly plot
offline.plot({'data': data, 'layout': layout}, filename=filename)
def plot_error_curves_pyplot(log_files, names, filename=None, key='top1_err'):
"""Plot error curves using matplotlib.pyplot and save to file."""
plot_data = prepare_plot_data(log_files, names, key)
colors = get_plot_colors(len(names))
for ind, d in enumerate(plot_data):
c, lbl = colors[ind], d['test_label']
plt.plot(d['x_train'], d['y_train'], '--', c=c, alpha=0.8)
plt.plot(d['x_test'], d['y_test'], '-', c=c, alpha=0.8, label=lbl)
plt.title(key + ' vs. epoch\n[dash=train, solid=test]', fontsize=14)
plt.xlabel('epoch', fontsize=14)
plt.ylabel(key, fontsize=14)
plt.grid(alpha=0.4)
plt.legend()
if filename:
plt.savefig(filename)
plt.clf()
else:
plt.show()
| 4,288 | 39.847619 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/logging.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Logging."""
import builtins
import decimal
import logging
import os
import simplejson
import sys
from pycls.config import cfg
import pycls.utils.distributed as du
import pycls.utils.metrics as mu
import pdb
# Show filename and line number in logs
_FORMAT = '[%(filename)s: %(lineno)3d]: %(message)s'
# Log file name (for cfg.LOG_DEST = 'file')
_LOG_FILE = 'stdout.log'
# Printed json stats lines will be tagged w/ this
_TAG = 'json_stats: '
def _suppress_print():
"""Suppresses printing from the current process."""
def ignore(*_objects, _sep=' ', _end='\n', _file=sys.stdout, _flush=False):
pass
builtins.print = ignore
def setup_logging():
"""Sets up the logging."""
# Enable logging only for the master process
if du.is_master_proc():
# Clear the root logger to prevent any existing logging config
# (e.g. set by another module) from messing with our setup
logging.root.handlers = []
# Construct logging configuration
logging_config = {
'level': logging.INFO,
'format': _FORMAT
}
# Log either to stdout or to a file
if cfg.LOG_DEST == 'stdout':
logging_config['stream'] = sys.stdout
else:
logging_config['filename'] = os.path.join(cfg.OUT_DIR, _LOG_FILE)
# Configure logging
logging.basicConfig(**logging_config)
else:
pass
# _suppress_print()
def get_logger(name):
"""Retrieves the logger."""
return logging.getLogger(name)
def log_json_stats(stats, cur_epoch=None, writer=None, is_epoch=False, params=0, flops=0, model=None, is_master=False):
"""Logs json stats."""
if writer is not None:
for k, v in stats.items():
if isinstance(v, float) or isinstance(v, int):
writer.add_scalar(k, v, cur_epoch + 1)
# if model is not None:
# for name, param in model.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), cur_epoch)
# Decimal + string workaround for having fixed len float vals in logs
stats = {
k: decimal.Decimal('{:.6f}'.format(v)) if isinstance(v, float) else v
for k, v in stats.items()
}
json_stats = simplejson.dumps(stats, sort_keys=True, use_decimal=True)
logger = get_logger(__name__)
logger.info('{:s}{:s}'.format(_TAG, json_stats))
if is_epoch and cur_epoch is not None and is_master:
epoch_id = cur_epoch + 1
result_info = ', '.join(
[str(round(params / 1000000, 3)), str(round(flops / 1000000000, 3)), '{:.3f}'.format(stats['time_avg']),
'{:.3f}'.format(stats['top1_err']), '{:.3f}'.format(stats['top5_err']),
str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.DIM_LIST[0]), str(cfg.RGRAPH.SEED_TRAIN)])
with open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, epoch_id), "a") as text_file:
text_file.write(result_info + '\n')
def load_json_stats(log_file):
"""Loads json_stats from a single log file."""
with open(log_file, 'r') as f:
lines = f.readlines()
json_lines = [l[l.find(_TAG) + len(_TAG):] for l in lines if _TAG in l]
json_stats = [simplejson.loads(l) for l in json_lines]
return json_stats
def parse_json_stats(log, row_type, key):
"""Extract values corresponding to row_type/key out of log."""
vals = [row[key] for row in log if row['_type'] == row_type and key in row]
if key == 'iter' or key == 'epoch':
vals = [int(val.split('/')[0]) for val in vals]
return vals
def get_log_files(log_dir, name_filter=''):
"""Get all log files in directory containing subdirs of trained models."""
names = [n for n in sorted(os.listdir(log_dir)) if name_filter in n]
files = [os.path.join(log_dir, n, _LOG_FILE) for n in names]
f_n_ps = [(f, n) for (f, n) in zip(files, names) if os.path.exists(f)]
files, names = zip(*f_n_ps)
return files, names
| 4,325 | 33.608 | 119 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/net.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Functions for manipulating networks."""
import itertools
import math
import torch
import torch.nn as nn
from pycls.config import cfg
from ..models.relation_graph import *
def init_weights(m):
"""Performs ResNet style weight initialization."""
if isinstance(m, nn.Conv2d) or isinstance(m, SymConv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, TalkConv2d):
# Note that there is no bias due to BN
### uniform init
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels * m.params_scale
### node specific init
# fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
# m.weight.data = m.weight.data*m.init_scale
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
zero_init_gamma = (
hasattr(m, 'final_bn') and m.final_bn and
cfg.BN.ZERO_INIT_FINAL_GAMMA
)
m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear) or isinstance(m, TalkLinear) or isinstance(m, SymLinear):
m.weight.data.normal_(mean=0.0, std=0.01)
if m.bias is not None:
m.bias.data.zero_()
@torch.no_grad()
def compute_precise_bn_stats(model, loader):
"""Computes precise BN stats on training data."""
# Compute the number of minibatches to use
num_iter = min(cfg.BN.NUM_SAMPLES_PRECISE // loader.batch_size, len(loader))
# Retrieve the BN layers
bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]
# Initialize stats storage
mus = [torch.zeros_like(bn.running_mean) for bn in bns]
sqs = [torch.zeros_like(bn.running_var) for bn in bns]
# Remember momentum values
moms = [bn.momentum for bn in bns]
# Disable momentum
for bn in bns:
bn.momentum = 1.0
# Accumulate the stats across the data samples
for inputs, _labels in itertools.islice(loader, num_iter):
model(inputs.cuda())
# Accumulate the stats for each BN layer
for i, bn in enumerate(bns):
m, v = bn.running_mean, bn.running_var
sqs[i] += (v + m * m) / num_iter
mus[i] += m / num_iter
# Set the stats and restore momentum values
for i, bn in enumerate(bns):
bn.running_var = sqs[i] - mus[i] * mus[i]
bn.running_mean = mus[i]
bn.momentum = moms[i]
def get_flat_weights(model):
"""Gets all model weights as a single flat vector."""
return torch.cat([p.data.view(-1, 1) for p in model.parameters()], 0)
def set_flat_weights(model, flat_weights):
"""Sets all model weights from a single flat vector."""
k = 0
for p in model.parameters():
n = p.data.numel()
p.data.copy_(flat_weights[k:(k + n)].view_as(p.data))
k += n
assert k == flat_weights.numel()
def model2adj(model):
adj_dict = {}
i = 0
for n, m in model.named_modules():
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
adj_dict['weight_{}'.format(i)] = m.weight.data.squeeze().cpu().numpy()
i += 1
elif isinstance(m, SymLinear):
weight = m.weight.data + m.weight.data.permute(1, 0)
adj_dict['weight_{}'.format(i)] = weight.squeeze().cpu().numpy()
i += 1
elif isinstance(m, SymConv2d):
weight = m.weight.data + m.weight.data.permute(1, 0, 2, 3)
adj_dict['weight_{}'.format(i)] = weight.squeeze().cpu().numpy()
i += 1
elif isinstance(m, TalkLinear) or isinstance(m, TalkConv2d):
adj_dict['weight_{}'.format(i)] = m.weight.data.squeeze().cpu().numpy()
adj_dict['mask_{}'.format(i)] = m.mask.data.squeeze().cpu().numpy()
i += 1
return adj_dict
| 4,360 | 37.59292 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/distributed.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Distributed helpers."""
import torch
from pycls.config import cfg
def is_master_proc():
"""Determines if the current process is the master process.
Master process is responsible for logging, writing and loading checkpoints.
In the multi GPU setting, we assign the master role to the rank 0 process.
When training using a single GPU, there is only one training processes
which is considered the master processes.
"""
return cfg.NUM_GPUS == 1 or torch.distributed.get_rank() == 0
def init_process_group(proc_rank, world_size):
"""Initializes the default process group."""
# Set the GPU to use
torch.cuda.set_device(proc_rank)
# Initialize the process group
# print('--rank{},world{}--'.format(proc_rank, world_size))
# torch.distributed.init_process_group(
# backend=cfg.DIST_BACKEND,
# init_method="tcp://{}:{}".format(cfg.HOST, cfg.PORT),
# world_size=world_size,
# rank=proc_rank
# )
torch.distributed.init_process_group(
backend=cfg.DIST_BACKEND,
init_method='env://',
world_size=world_size,
rank=proc_rank
)
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group()
def scaled_all_reduce(tensors):
"""Performs the scaled all_reduce operation on the provided tensors.
The input tensors are modified in-place. Currently supports only the sum
reduction operator. The reduced values are scaled by the inverse size of
the process group (equivalent to cfg.NUM_GPUS).
"""
# Queue the reductions
reductions = []
for tensor in tensors:
reduction = torch.distributed.all_reduce(tensor, async_op=True)
reductions.append(reduction)
# Wait for reductions to finish
for reduction in reductions:
reduction.wait()
# Scale the results
for tensor in tensors:
tensor.mul_(1.0 / cfg.NUM_GPUS)
return tensors
| 2,323 | 31.277778 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/metrics.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Functions for computing metrics."""
import numpy as np
import torch
import torch.nn as nn
import pdb
from pycls.config import cfg
from functools import reduce
import operator
from ..models.relation_graph import *
# Number of bytes in a megabyte
_B_IN_MB = 1024 * 1024
def topks_correct(preds, labels, ks):
"""Computes the number of top-k correct predictions for each k."""
assert preds.size(0) == labels.size(0), \
'Batch dim of predictions and labels must match'
# Find the top max_k predictions for each sample
_top_max_k_vals, top_max_k_inds = torch.topk(
preds, max(ks), dim=1, largest=True, sorted=True
)
# (batch_size, max_k) -> (max_k, batch_size)
top_max_k_inds = top_max_k_inds.t()
# (batch_size, ) -> (max_k, batch_size)
rep_max_k_labels = labels.view(1, -1).expand_as(top_max_k_inds)
# (i, j) = 1 if top i-th prediction for the j-th sample is correct
top_max_k_correct = top_max_k_inds.eq(rep_max_k_labels)
# Compute the number of topk correct predictions for each k
topks_correct = [
top_max_k_correct[:k, :].view(-1).float().sum() for k in ks
]
return topks_correct
def topk_errors(preds, labels, ks):
"""Computes the top-k error for each k."""
num_topks_correct = topks_correct(preds, labels, ks)
return [(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct]
def topk_accuracies(preds, labels, ks):
"""Computes the top-k accuracy for each k."""
num_topks_correct = topks_correct(preds, labels, ks)
return [(x / preds.size(0)) * 100.0 for x in num_topks_correct]
def params_count(model):
"""Computes the number of parameters."""
count = 0
for n,m in model.named_modules():
if isinstance(m, TalkConv2d) or isinstance(m, TalkLinear):
count += np.sum([p.numel()*m.params_scale for p in m.parameters(recurse=False)]).item()
else:
count += np.sum([p.numel() for p in m.parameters(recurse=False)]).item()
return int(count)
def flops_count(model):
"""Computes the number of flops."""
assert cfg.TRAIN.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Computing flops for {} is not supported'.format(cfg.TRAIN.DATASET)
# im_size = 32 if cfg.TRAIN.DATASET == 'cifar10' else 224
if cfg.TRAIN.DATASET == 'cifar10':
im_size = 32
elif cfg.TRAIN.DATASET == 'cifar100':
im_size = 32
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
im_size = 64
else:
im_size = 224
h, w = im_size, im_size
count = 0
for n, m in model.named_modules():
if isinstance(m, nn.Conv2d):
if '.se' in n:
count += m.in_channels * m.out_channels + m.bias.numel()
continue
h_out = (h + 2 * m.padding[0] - m.kernel_size[0]) // m.stride[0] + 1
w_out = (w + 2 * m.padding[1] - m.kernel_size[1]) // m.stride[1] + 1
count += np.prod([
m.weight.numel(),
h_out, w_out
])
if 'proj' not in n:
h, w = h_out, w_out
elif isinstance(m, TalkConv2d):
h_out = (h + 2 * m.padding[0] - m.kernel_size[0]) // m.stride[0] + 1
w_out = (w + 2 * m.padding[1] - m.kernel_size[1]) // m.stride[1] + 1
count += int(np.prod([
m.weight.numel()*m.flops_scale,
h_out, w_out
]))
if 'proj' not in n and 'pool' not in n:
h, w = h_out, w_out
elif isinstance(m, nn.MaxPool2d):
h = (h + 2 * m.padding - m.kernel_size) // m.stride + 1
w = (w + 2 * m.padding - m.kernel_size) // m.stride + 1
elif isinstance(m, TalkLinear):
count += int(m.in_features * m.out_features * m.flops_scale)
elif isinstance(m, nn.Linear):
count += m.in_features * m.out_features
return count
def gpu_mem_usage():
"""Computes the GPU memory usage for the current device (MB)."""
mem_usage_bytes = torch.cuda.max_memory_allocated()
return mem_usage_bytes / _B_IN_MB
# Online FLOPs/Params calculation from CondenseNet codebase
count_ops = 0
count_params = 0
def get_num_gen(gen):
return sum(1 for x in gen)
def is_pruned(layer):
try:
layer.mask
return True
except AttributeError:
return False
def is_leaf(model):
return get_num_gen(model.children()) == 0
def get_layer_info(layer):
layer_str = str(layer)
type_name = layer_str[:layer_str.find('(')].strip()
return type_name
def get_layer_param(model):
return sum([reduce(operator.mul, i.size(), 1) for i in model.parameters()])
### The input batch size should be 1 to call this function
def measure_layer(layer, x):
global count_ops, count_params
delta_ops = 0
delta_params = 0
multi_add = 1
type_name = get_layer_info(layer)
### ops_conv
if type_name in ['Conv2d']:
out_h = int((x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0]) /
layer.stride[0] + 1)
out_w = int((x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1]) /
layer.stride[1] + 1)
delta_ops = layer.in_channels * layer.out_channels * layer.kernel_size[0] * \
layer.kernel_size[1] * out_h * out_w / layer.groups * multi_add
print(layer)
print('out_h: ', out_h, 'out_w:', out_w)
delta_params = get_layer_param(layer)
### ops_nonlinearity
elif type_name in ['ReLU']:
delta_ops = x.numel()
delta_params = get_layer_param(layer)
### ops_pooling
elif type_name in ['AvgPool2d', 'MaxPool2d']:
in_w = x.size()[2]
kernel_ops = layer.kernel_size * layer.kernel_size
out_w = int((in_w + 2 * layer.padding - layer.kernel_size) / layer.stride + 1)
out_h = int((in_w + 2 * layer.padding - layer.kernel_size) / layer.stride + 1)
delta_ops = x.size()[0] * x.size()[1] * out_w * out_h * kernel_ops
delta_params = get_layer_param(layer)
elif type_name in ['AdaptiveAvgPool2d']:
delta_ops = x.size()[0] * x.size()[1] * x.size()[2] * x.size()[3]
delta_params = get_layer_param(layer)
### ops_linear
elif type_name in ['Linear']:
weight_ops = layer.weight.numel() * multi_add
bias_ops = layer.bias.numel()
delta_ops = x.size()[0] * (weight_ops + bias_ops)
delta_params = get_layer_param(layer)
elif type_name in ['WeightedSumTransform']:
weight_ops = layer.weight.numel() * multi_add
delta_ops = x.size()[0] * (weight_ops)
delta_params = get_layer_param(layer)
### ops_nothing
elif type_name in ['BatchNorm2d', 'Dropout2d', 'DropChannel', 'Dropout', 'Sigmoid', 'DirichletWeightedSumTransform', 'Softmax', 'Identity', 'Sequential']:
delta_params = get_layer_param(layer)
### unknown layer type
else:
raise TypeError('unknown layer type: %s' % type_name)
count_ops += delta_ops
count_params += delta_params
return
def measure_model(model, H, W):
global count_ops, count_params
count_ops = 0
count_params = 0
data = torch.zeros(1, 3, H, W).cuda()
def should_measure(x):
return is_leaf(x) or is_pruned(x)
def modify_forward(model):
for child in model.children():
if should_measure(child):
def new_forward(m):
def lambda_forward(x):
measure_layer(m, x)
return m.old_forward(x)
return lambda_forward
child.old_forward = child.forward
child.forward = new_forward(child)
else:
modify_forward(child)
def restore_forward(model):
for child in model.children():
# leaf node
if is_leaf(child) and hasattr(child, 'old_forward'):
child.forward = child.old_forward
child.old_forward = None
else:
restore_forward(child)
modify_forward(model)
model.forward(data)
restore_forward(model)
return count_ops, count_params
| 8,557 | 33.095618 | 158 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/multiprocessing.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Multiprocessing helpers."""
import multiprocessing as mp
import traceback
import subprocess
import numpy as np
import os
from pycls.utils.error_handler import ErrorHandler
import pycls.utils.distributed as du
def run(proc_rank, world_size, error_queue, fun, fun_args, fun_kwargs):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12112'
# print("--proc_rank{}, world_size{}--".format(proc_rank, world_size))
"""Runs a function from a child process."""
try:
# Initialize the process group
du.init_process_group(proc_rank, world_size)
# Run the function
fun(*fun_args, **fun_kwargs)
except KeyboardInterrupt:
# Killed by the parent process
pass
except Exception:
# Propagate exception to the parent process
error_queue.put(traceback.format_exc())
finally:
# Destroy the process group
du.destroy_process_group()
def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs={}):
"""Runs a function in a multi-proc setting."""
# Handle errors from training subprocesses
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Run each training subprocess
ps = []
for i in range(num_proc):
p_i = mp.Process(
target=run,
args=(i, num_proc, error_queue, fun, fun_args, fun_kwargs)
)
ps.append(p_i)
p_i.start()
error_handler.add_child(p_i.pid)
# Wait for each subprocess to finish
for p in ps:
p.join()
# get gpu usage
def get_gpu_memory_map():
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
# Convert lines into a dictionary
gpu_memory = np.array([int(x) for x in result.strip().split('\n')])
return gpu_memory
def auto_select_gpu(memory_threshold=7000, smooth_ratio=200):
gpu_memory_raw = get_gpu_memory_map() + 10
gpu_memory = gpu_memory_raw / smooth_ratio
gpu_memory = gpu_memory.sum() / (gpu_memory + 10)
gpu_memory[gpu_memory_raw > memory_threshold] = 0
gpu_prob = gpu_memory / gpu_memory.sum()
cuda = str(np.random.choice(len(gpu_prob), p=gpu_prob))
print('GPU select prob: {}, Select GPU {}'.format(gpu_prob, cuda))
return cuda
| 2,888 | 29.09375 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/lr_policy.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Learning rate policies."""
import numpy as np
from pycls.config import cfg
def lr_fun_steps(cur_epoch):
"""Steps schedule (cfg.OPTIM.LR_POLICY = 'steps')."""
ind = [i for i, s in enumerate(cfg.OPTIM.STEPS) if cur_epoch >= s][-1]
return cfg.OPTIM.BASE_LR * (cfg.OPTIM.LR_MULT ** ind)
def lr_fun_exp(cur_epoch):
"""Exponential schedule (cfg.OPTIM.LR_POLICY = 'exp')."""
return cfg.OPTIM.BASE_LR * (cfg.OPTIM.GAMMA ** cur_epoch)
def lr_fun_cos(cur_epoch):
"""Cosine schedule (cfg.OPTIM.LR_POLICY = 'cos')."""
base_lr, max_epoch = cfg.OPTIM.BASE_LR, cfg.OPTIM.MAX_EPOCH
return 0.5 * base_lr * (1.0 + np.cos(np.pi * cur_epoch / max_epoch))
def get_lr_fun():
"""Retrieves the specified lr policy function"""
lr_fun = 'lr_fun_' + cfg.OPTIM.LR_POLICY
if lr_fun not in globals():
raise NotImplementedError('Unknown LR policy:' + cfg.OPTIM.LR_POLICY)
return globals()[lr_fun]
def get_epoch_lr(cur_epoch):
"""Retrieves the lr for the given epoch according to the policy."""
lr = get_lr_fun()(cur_epoch)
# Linear warmup
if cur_epoch < cfg.OPTIM.WARMUP_EPOCHS:
alpha = cur_epoch / cfg.OPTIM.WARMUP_EPOCHS
warmup_factor = cfg.OPTIM.WARMUP_FACTOR * (1.0 - alpha) + alpha
lr *= warmup_factor
return lr
| 1,643 | 31.235294 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/meters.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Meters."""
from collections import deque
import datetime
import numpy as np
from pycls.config import cfg
from pycls.utils.timer import Timer
import pycls.utils.logging as lu
import pycls.utils.metrics as metrics
def eta_str(eta_td):
"""Converts an eta timedelta to a fixed-width string format."""
days = eta_td.days
hrs, rem = divmod(eta_td.seconds, 3600)
mins, secs = divmod(rem, 60)
return '{0:02},{1:02}:{2:02}:{3:02}'.format(days, hrs, mins, secs)
class ScalarMeter(object):
"""Measures a scalar value (adapted from Detectron)."""
def __init__(self, window_size):
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
def reset(self):
self.deque.clear()
self.total = 0.0
self.count = 0
def add_value(self, value):
self.deque.append(value)
self.count += 1
self.total += value
def get_win_median(self):
return np.median(self.deque)
def get_win_avg(self):
return np.mean(self.deque)
def get_global_avg(self):
return self.total / self.count
class TrainMeter(object):
"""Measures training stats."""
def __init__(self, epoch_iters):
self.epoch_iters = epoch_iters
self.max_iter = cfg.OPTIM.MAX_EPOCH * epoch_iters
self.iter_timer = Timer()
self.loss = ScalarMeter(cfg.LOG_PERIOD)
self.loss_total = 0.0
self.lr = None
# Current minibatch errors (smoothed over a window)
self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
# Number of misclassified examples
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def reset(self, timer=False):
if timer:
self.iter_timer.reset()
self.loss.reset()
self.loss_total = 0.0
self.lr = None
self.mb_top1_err.reset()
self.mb_top5_err.reset()
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def iter_tic(self):
self.iter_timer.tic()
def iter_toc(self):
self.iter_timer.toc()
def update_stats(self, top1_err, top5_err, loss, lr, mb_size):
# Current minibatch stats
self.mb_top1_err.add_value(top1_err)
self.mb_top5_err.add_value(top5_err)
self.loss.add_value(loss)
self.lr = lr
# Aggregate stats
self.num_top1_mis += top1_err * mb_size
self.num_top5_mis += top5_err * mb_size
self.loss_total += loss * mb_size
self.num_samples += mb_size
def get_iter_stats(self, cur_epoch, cur_iter):
eta_sec = self.iter_timer.average_time * (
self.max_iter - (cur_epoch * self.epoch_iters + cur_iter + 1)
)
eta_td = datetime.timedelta(seconds=int(eta_sec))
mem_usage = metrics.gpu_mem_usage()
stats = {
'_type': 'train_iter',
'epoch': '{}/{}'.format(cur_epoch + 1, cfg.OPTIM.MAX_EPOCH),
'iter': '{}/{}'.format(cur_iter + 1, self.epoch_iters),
'time_avg': self.iter_timer.average_time,
'time_diff': self.iter_timer.diff,
'eta': eta_str(eta_td),
'top1_err': self.mb_top1_err.get_win_median(),
'top5_err': self.mb_top5_err.get_win_median(),
'loss': self.loss.get_win_median(),
'lr': self.lr,
'mem': int(np.ceil(mem_usage))
}
return stats
def log_iter_stats(self, cur_epoch, cur_iter):
if (cur_iter + 1) % cfg.LOG_PERIOD != 0:
return
stats = self.get_iter_stats(cur_epoch, cur_iter)
lu.log_json_stats(stats)
def get_epoch_stats(self, cur_epoch):
eta_sec = self.iter_timer.average_time * (
self.max_iter - (cur_epoch + 1) * self.epoch_iters
)
eta_td = datetime.timedelta(seconds=int(eta_sec))
mem_usage = metrics.gpu_mem_usage()
top1_err = self.num_top1_mis / self.num_samples
top5_err = self.num_top5_mis / self.num_samples
avg_loss = self.loss_total / self.num_samples
stats = {
'_type': 'train_epoch',
'epoch': '{}/{}'.format(cur_epoch + 1, cfg.OPTIM.MAX_EPOCH),
'time_avg': self.iter_timer.average_time,
'eta': eta_str(eta_td),
'top1_err': top1_err,
'top5_err': top5_err,
'loss': avg_loss,
'lr': self.lr,
'mem': int(np.ceil(mem_usage))
}
return stats
def log_epoch_stats(self, cur_epoch, writer, params=0, flops=0, is_master=False):
stats = self.get_epoch_stats(cur_epoch)
lu.log_json_stats(stats, cur_epoch, writer, is_epoch=False, params=params, flops=flops, is_master=is_master)
class TestMeter(object):
"""Measures testing stats."""
def __init__(self, max_iter):
self.max_iter = max_iter
self.iter_timer = Timer()
# Current minibatch errors (smoothed over a window)
self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
# Min errors (over the full test set)
self.min_top1_err = 100.0
self.min_top5_err = 100.0
# Number of misclassified examples
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def reset(self, min_errs=False):
if min_errs:
self.min_top1_err = 100.0
self.min_top5_err = 100.0
self.iter_timer.reset()
self.mb_top1_err.reset()
self.mb_top5_err.reset()
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def iter_tic(self):
self.iter_timer.tic()
def iter_toc(self):
self.iter_timer.toc()
def update_stats(self, top1_err, top5_err, mb_size):
self.mb_top1_err.add_value(top1_err)
self.mb_top5_err.add_value(top5_err)
self.num_top1_mis += top1_err * mb_size
self.num_top5_mis += top5_err * mb_size
self.num_samples += mb_size
def get_iter_stats(self, cur_epoch, cur_iter):
mem_usage = metrics.gpu_mem_usage()
iter_stats = {
'_type': 'test_iter',
'epoch': '{}/{}'.format(cur_epoch + 1, cfg.OPTIM.MAX_EPOCH),
'iter': '{}/{}'.format(cur_iter + 1, self.max_iter),
'time_avg': self.iter_timer.average_time,
'time_diff': self.iter_timer.diff,
'top1_err': self.mb_top1_err.get_win_median(),
'top5_err': self.mb_top5_err.get_win_median(),
'mem': int(np.ceil(mem_usage))
}
return iter_stats
def log_iter_stats(self, cur_epoch, cur_iter):
if (cur_iter + 1) % cfg.LOG_PERIOD != 0:
return
stats = self.get_iter_stats(cur_epoch, cur_iter)
lu.log_json_stats(stats)
def get_epoch_stats(self, cur_epoch):
top1_err = self.num_top1_mis / self.num_samples
top5_err = self.num_top5_mis / self.num_samples
self.min_top1_err = min(self.min_top1_err, top1_err)
self.min_top5_err = min(self.min_top5_err, top5_err)
mem_usage = metrics.gpu_mem_usage()
stats = {
'_type': 'test_epoch',
'epoch': '{}/{}'.format(cur_epoch + 1, cfg.OPTIM.MAX_EPOCH),
'time_avg': self.iter_timer.average_time,
'top1_err': top1_err,
'top5_err': top5_err,
'min_top1_err': self.min_top1_err,
'min_top5_err': self.min_top5_err,
'mem': int(np.ceil(mem_usage))
}
return stats
def log_epoch_stats(self, cur_epoch, writer, params=0, flops=0, model=None, is_master=False):
stats = self.get_epoch_stats(cur_epoch)
lu.log_json_stats(stats, cur_epoch, writer, is_epoch=True, params=params, flops=flops, model=model,
is_master=is_master)
| 8,313 | 32.934694 | 116 | py |